From 2f7841b5a29609c658878f6fe78fcce3b8583715 Mon Sep 17 00:00:00 2001 From: Matthias Bertschy Date: Mon, 26 Jan 2026 08:32:53 +0100 Subject: [PATCH] update policy.json testdata Signed-off-by: Matthias Bertschy --- core/cautils/getter/testdata/policy.json | 57298 ++++++++++++++------- 1 file changed, 38061 insertions(+), 19237 deletions(-) diff --git a/core/cautils/getter/testdata/policy.json b/core/cautils/getter/testdata/policy.json index 672203d8..835e75b1 100644 --- a/core/cautils/getter/testdata/policy.json +++ b/core/cautils/getter/testdata/policy.json @@ -6,19 +6,16 @@ "FrameworkControlRelations": { "Err": null }, - "Tag": "latest/download", + "Tag": "download/v2", "Owner": "kubescape", "CurGitVersion": "", "Branch": "", - "URL": "https://github.com/kubescape/regolibrary/releases/latest/download", + "URL": "https://github.com/kubescape/regolibrary/releases/download/v2", "Path": "releases", "BaseUrl": "https://github.com", "Repository": "regolibrary", "DefaultConfigInputs": { "name": "default", - "attributes": { - "armoBuiltin": true - }, "scope": { "designatorType": "attributes", "attributes": { @@ -27,37 +24,6 @@ }, "settings": { "postureControlInputs": { - "servicesNames": [ - "nifi-service", - "argo-server", - "minio", - "postgres", - "workflow-controller-metrics", - "weave-scope-app", - "kubernetes-dashboard" - ], - "cpu_request_min": [], - "k8sRecommendedLabels": [ - "app.kubernetes.io/name", - "app.kubernetes.io/instance", - "app.kubernetes.io/version", - "app.kubernetes.io/component", - "app.kubernetes.io/part-of", - "app.kubernetes.io/managed-by", - "app.kubernetes.io/created-by" - ], - "cpu_limit_max": [], - "listOfDangerousArtifacts": [ - "bin/bash", - "sbin/sh", - "bin/ksh", - "bin/tcsh", - "bin/zsh", - "usr/bin/scsh", - "bin/csh", - "bin/busybox", - "usr/bin/busybox" - ], "sensitiveInterfaces": [ "nifi", "argo-server", @@ -67,13 +33,33 @@ "jenkins", "prometheus-deployment" ], - "max_high_vulnerabilities": [ - "10" - ], "sensitiveValuesAllowed": [], - "untrustedRegistries": [], + "memory_request_min": [ + "0" + ], + "memory_limit_max": [], + "cpu_limit_max": [], + "cpu_limit_min": [ + "0" + ], + "insecureCapabilities": [ + "SETPCAP", + "NET_ADMIN", + "NET_RAW", + "SYS_MODULE", + "SYS_RAWIO", + "SYS_PTRACE", + "SYS_ADMIN", + "SYS_BOOT", + "MAC_OVERRIDE", + "MAC_ADMIN", + "PERFMON", + "ALL", + "BPF" + ], + "sensitiveKeyNamesAllowed": [], "memory_request_max": [], - "memory_limit_min": [], + "cpu_request_max": [], "wlKnownNames": [ "coredns", "kube-proxy", @@ -93,32 +79,20 @@ "ca-websocket", "clair-clair" ], - "trustedCosignPublicKeys": [], - "insecureCapabilities": [ - "SETPCAP", - "NET_ADMIN", - "NET_RAW", - "SYS_MODULE", - "SYS_RAWIO", - "SYS_PTRACE", - "SYS_ADMIN", - "SYS_BOOT", - "MAC_OVERRIDE", - "MAC_ADMIN", - "PERFMON", - "ALL", - "BPF" + "recommendedLabels": [ + "app", + "tier", + "phase", + "version", + "owner", + "env" ], - "publicRegistries": [], "max_critical_vulnerabilities": [ "5" ], "sensitiveKeyNames": [ - "aws_access_key_id", "aws_secret_access_key", - "azure_batchai_storage_account", "azure_batchai_storage_key", - "azure_batch_account", "azure_batch_key", "secret", "key", @@ -138,19 +112,37 @@ "_key_", "_secret_" ], - "cpu_limit_min": [], - "recommendedLabels": [ - "app", - "tier", - "phase", - "version", - "owner", - "env" + "memory_limit_min": [ + "0" + ], + "k8sRecommendedLabels": [ + "app.kubernetes.io/name", + "app.kubernetes.io/instance", + "app.kubernetes.io/version", + "app.kubernetes.io/component", + "app.kubernetes.io/part-of", + "app.kubernetes.io/managed-by", + "app.kubernetes.io/created-by" + ], + "publicRegistries": [], + "max_high_vulnerabilities": [ + "10" + ], + "servicesNames": [ + "nifi-service", + "argo-server", + "minio", + "postgres", + "workflow-controller-metrics", + "weave-scope-app", + "kubernetes-dashboard" + ], + "untrustedRegistries": [], + "cpu_request_min": [ + "0" ], "imageRepositoryAllowList": [], - "memory_request_min": [], - "memory_limit_max": [], - "cpu_request_max": [] + "trustedCosignPublicKeys": [] }, "postureScanConfig": { @@ -172,35 +164,16 @@ "apiVersion": "regolibrary.kubescape/v1alpha1", "kind": "AttackTrack", "metadata": { - "name": "kubeapi" + "name": "workload-unauthenticated-service" }, "spec": { "data": { - "name": "Initial access", + "name": "Initial Access", + "description": "The service is exposed outside the Kubernetes network.", "subSteps": [ { - "name": "Persistence" - }, - { - "name": "Privilege escalation" - }, - { - "name": "Credential access" - }, - { - "name": "Discovery" - }, - { - "name": "Lateral movement" - }, - { - "name": "Defense evasion" - }, - { - "name": "Impact - data destruction" - }, - { - "name": "Impact - service injection" + "name": "Execution", + "description": "Database access is missing authentication and it can be accessed by anyone" } ] } @@ -210,50 +183,43 @@ "apiVersion": "regolibrary.kubescape/v1alpha1", "kind": "AttackTrack", "metadata": { - "name": "container" + "name": "workload-external-track" }, "spec": { "data": { - "name": "Initial access", + "name": "Initial Access", + "description": "An attacker can access the Kubernetes environment.", "subSteps": [ { - "name": "Execution", + "name": "Execution (Vulnerable Image)", + "description": "An attacker can execute malicious code by exploiting vulnerable images.", + "checksVulnerabilities": true, "subSteps": [ { - "name": "Privilege escalation" + "name": "Data Collection", + "description": "An attacker can gather data." + }, + { + "name": "Secret Access", + "description": "An attacker can steal secrets." }, { "name": "Credential access", - "subSteps": [ - { - "name": "Impact - service access" - }, - { - "name": "Impact - K8s API access", - "subSteps": [ - { - "name": "Defense evasion - KubeAPI" - } - ] - } - ] + "description": "An attacker can steal account names and passwords." }, { - "name": "Discovery" + "name": "Privilege Escalation (Node)", + "description": "An attacker can gain permissions and access node resources." }, { - "name": "Lateral movement" + "name": "Persistence", + "description": "An attacker can create a foothold." }, { - "name": "Impact - Data access in container" - }, - { - "name": "Persistence" + "name": "Lateral Movement (Network)", + "description": "An attacker can move through the network." } ] - }, - { - "name": "Impact - service destruction" } ] } @@ -263,40 +229,54 @@ "apiVersion": "regolibrary.kubescape/v1alpha1", "kind": "AttackTrack", "metadata": { - "name": "node" + "name": "external-database-without-authentication" }, "spec": { "data": { - "name": "Initial access", + "name": "Initial Access", + "description": "An attacker can access the Kubernetes environment.", "subSteps": [ { - "name": "Execution", - "subSteps": [ - { - "name": "Persistence" - }, - { - "name": "Credential access" - }, - { - "name": "Defense evasion" - }, - { - "name": "Discovery" - }, - { - "name": "Lateral movement" - }, - { - "name": "Impact - data theft" - }, - { - "name": "Impact - data destruction" - }, - { - "name": "Impact - service injection" - } - ] + "name": "Unauthenticated Access", + "description": "An unauthenticated attacker can access resources." + } + ] + } + } + }, + { + "apiVersion": "regolibrary.kubescape/v1alpha1", + "kind": "AttackTrack", + "metadata": { + "name": "service-destruction" + }, + "spec": { + "data": { + "name": "Initial Access", + "description": "An attacker can access the Kubernetes environment.", + "subSteps": [ + { + "name": "Denial of service", + "description": "An attacker can overload the workload, making it unavailable." + } + ] + } + } + }, + { + "apiVersion": "regolibrary.kubescape/v1alpha1", + "kind": "AttackTrack", + "metadata": { + "name": "external-workload-with-cluster-takeover-roles" + }, + "spec": { + "data": { + "name": "Initial Access", + "description": "An attacker can access the Kubernetes environment.", + "subSteps": [ + { + "name": "Cluster Access", + "description": "An attacker has access to sensitive information and can leverage them by creating pods in the cluster." } ] } @@ -306,46 +286,437 @@ "Frameworks": [ { "guid": "", - "name": "ArmoBest", + "name": "DevOpsBest", "attributes": { - "armoBuiltin": true + "builtin": true }, "creationTime": "", "description": "", + "typeTags": [ + "compliance" + ], "controls": [ { "guid": "", - "name": "Forbidden Container Registries", + "name": "Configured readiness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0018", + "creationTime": "", + "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", + "remediation": "Ensure Readiness probes are configured wherever possible.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Container hostPort", "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial Access" - ], "controlTypeTags": [ "security", - "compliance" + "compliance", + "devops" + ] + }, + "controlID": "C-0044", + "creationTime": "", + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Configured liveness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0056", + "creationTime": "", + "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", + "remediation": "Ensure Liveness probes are configured wherever possible.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Pods in default namespace", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "controlID": "C-0061", + "creationTime": "", + "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", + "remediation": "Create necessary namespaces and move all the pods from default namespace there.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Naked pods", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0073", + "creationTime": "", + "description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", + "remediation": "Create necessary Deployment object for every pod making any pod a first class citizen in your IaC architecture.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Container runtime socket mounted", + "attributes": { + "controlTypeTags": [ + "devops", + "smartRemediation" + ] + }, + "controlID": "C-0074", + "creationTime": "", + "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", + "remediation": "Remove container runtime socket mount request or define an exception.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Image pull policy on latest tag", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0075", + "creationTime": "", + "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always.", + "remediation": "Set ImagePullPolicy to Always in all pods found by this control.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Label usage for resources", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0076", + "creationTime": "", + "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "K8s common labels usage", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0077", + "creationTime": "", + "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Deprecated Kubernetes image registry", + "controlID": "C-0253", + "creationTime": "", + "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Ensure CPU requests are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "controlID": "C-0268", + "creationTime": "", + "description": "This control identifies all Pods for which the CPU requests are not set.", + "remediation": "Set the CPU requests or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "Ensure memory requests are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "controlID": "C-0269", + "creationTime": "", + "description": "This control identifies all Pods for which the memory requests are not set.", + "remediation": "Set the memory requests or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" ], "attackTracks": [ { - "attackTrack": "container", + "attackTrack": "service-destruction", "categories": [ - "Initial access" + "Denial of service" ] } ] }, - "controlID": "C-0001", + "controlID": "C-0270", "creationTime": "", - "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster’s management layer.", - "remediation": "Limit the registries from which you pull container images from", + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", "rules": [], - "baseScore": 7 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } }, { "guid": "", - "name": "Exec into container", + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "controlID": "C-0271", + "creationTime": "", + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + } + ], + "controlsIDs": [ + "C-0018", + "C-0044", + "C-0056", + "C-0061", + "C-0073", + "C-0074", + "C-0075", + "C-0076", + "C-0077", + "C-0253", + "C-0268", + "C-0269", + "C-0270", + "C-0271" + ], + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "NSA", + "attributes": { + "builtin": true + }, + "creationTime": "", + "description": "Implement NSA security advices for K8s ", + "typeTags": [ + "compliance" + ], + "controls": [ + { + "guid": "", + "name": "Prevent containers from allowing command execution", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Execution" ], @@ -360,24 +731,25 @@ "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using “kubectl exec” command. This control determines which subjects have permissions to use this command.", "remediation": "It is recommended to prohibit “kubectl exec” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", "name": "API server insecure port is enabled", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } ] }, "controlID": "C-0005", @@ -385,36 +757,22 @@ "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", "remediation": "Set the insecure-port flag of the API server to zero.", "rules": [], - "baseScore": 9 - }, - { - "guid": "", - "name": "Resource limits", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" ] }, - "controlID": "C-0009", - "creationTime": "", - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", - "rules": [], - "baseScore": 7 + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "Applications credentials in configuration files", "attributes": { + "actionRequired": "configuration", "microsoftMitreColumns": [ "Credential access", "Lateral Movement" @@ -423,43 +781,29 @@ "security", "compliance", "security-impact" - ], - "attackTracks": [ - { - "categories": [ - "Credential access" - ], - "attackTrack": "kubeapi" - }, - { - "attackTrack": "container", - "categories": [ - "Credential access" - ] - } - ], - "armoBuiltin": true + ] }, "controlID": "C-0012", "creationTime": "", "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } }, { "guid": "", "name": "Non-root containers", "attributes": { - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ], - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" @@ -467,27 +811,33 @@ }, "controlID": "C-0013", "creationTime": "", - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", "name": "Allow privilege escalation", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } + "compliance", + "smartRemediation" ] }, "controlID": "C-0016", @@ -495,7 +845,21 @@ "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", @@ -503,167 +867,197 @@ "attributes": { "controlTypeTags": [ "security", - "compliance" + "compliance", + "smartRemediation" ], "attackTracks": [ { - "attackTrack": "container", + "attackTrack": "workload-external-track", "categories": [ - "Execution", "Persistence" ] } - ], - "armoBuiltin": true + ] }, "controlID": "C-0017", "creationTime": "", "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", "rules": [], - "baseScore": 3 + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", "name": "Ingress and Egress blocked", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "compliance" ] }, "controlID": "C-0030", "creationTime": "", - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", "remediation": "Define a network policy that restricts ingress and egress connections.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } }, { "guid": "", "name": "Automatic mapping of service account", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } + "compliance", + "smartRemediation" ] }, "controlID": "C-0034", "creationTime": "", - "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } }, { "guid": "", - "name": "Cluster-admin binding", + "name": "Administrative Roles", "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin", "controlTypeTags": [ "security", "compliance" ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin" }, "controlID": "C-0035", "creationTime": "", "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", "name": "Host PID/IPC privileges", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "categories": [ - "Privilege escalation" - ], - "attackTrack": "container" - } ] }, "controlID": "C-0038", "creationTime": "", - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", "name": "HostNetwork access", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" ], "attackTracks": [ { - "attackTrack": "container", + "attackTrack": "workload-external-track", "categories": [ - "Discovery", - "Lateral movement", - "Impact - service access" + "Lateral Movement (Network)" ] } ] }, "controlID": "C-0041", "creationTime": "", - "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", - "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Network", + "id": "Cat-4" + } + } }, { "guid": "", "name": "Container hostPort", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance", "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } ] }, "controlID": "C-0044", @@ -671,106 +1065,94 @@ "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } }, { "guid": "", "name": "Insecure capabilities", "attributes": { - "armoBuiltin": true, + "actionRequired": "configuration", "controlTypeTags": [ "security", - "compliance" + "compliance", + "smartRemediation" ], "attackTracks": [ { - "attackTrack": "container", + "attackTrack": "workload-external-track", "categories": [ - "Privilege escalation" + "Privilege Escalation (Node)" ] } ] }, "controlID": "C-0046", "creationTime": "", - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", "remediation": "Remove all insecure capabilities which are not necessary for the container.", "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Network mapping", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery" - ] - } + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" ] }, - "controlID": "C-0049", - "creationTime": "", - "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", - "remediation": "Define network policies or use similar network protection mechanisms.", - "rules": [], - "baseScore": 3 + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", "name": "Cluster internal networking", "attributes": { - "microsoftMitreColumns": [ - "Lateral movement" - ], "controlTypeTags": [ "security", "compliance" ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement" - ] - } - ], - "armoBuiltin": true + "microsoftMitreColumns": [ + "Lateral movement" + ] }, "controlID": "C-0054", "creationTime": "", "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } }, { "guid": "", "name": "Linux hardening", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } ] }, "controlID": "C-0055", @@ -778,26 +1160,32 @@ "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", "name": "Privileged container", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Privilege escalation" ], "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } + "security", + "smartRemediation" ] }, "controlID": "C-0057", @@ -805,25 +1193,29 @@ "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } ] }, "controlID": "C-0058", @@ -831,7 +1223,16 @@ "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", @@ -840,144 +1241,49 @@ "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ], - "armoBuiltin": true + ] }, "controlID": "C-0059", "creationTime": "", "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Pods in default namespace", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" ] }, - "controlID": "C-0061", - "creationTime": "", - "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", - "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", - "rules": [], - "baseScore": 3 + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", - "name": "Sudo in container entrypoint", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0062", - "creationTime": "", - "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", - "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Portforwarding privileges", - "attributes": { - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Discovery", - "Lateral movement" - ] - } - ], - "armoBuiltin": true, - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ] - }, - "controlID": "C-0063", - "creationTime": "", - "description": "Attackers with relevant RBAC permission can use “kubectl portforward” command to establish direct communication with PODs from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit “kubectl portforward” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "No impersonation", - "attributes": { - "rbacQuery": "Impersonation", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Privilege escalation" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0065", - "creationTime": "", - "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Secret/ETCD encryption enabled", + "name": "Secret/etcd encryption enabled", "attributes": { "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ], - "armoBuiltin": true + ] }, "controlID": "C-0066", "creationTime": "", "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", @@ -986,40 +1292,31 @@ "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ], - "armoBuiltin": true + ] }, "controlID": "C-0067", "creationTime": "", "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "PSP enabled", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } ] }, "controlID": "C-0068", @@ -1027,21 +1324,22 @@ "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", "rules": [], - "baseScore": 1 + "baseScore": 1, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "Disable anonymous access to Kubelet service", "attributes": { - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ], - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" @@ -1052,24 +1350,24 @@ "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", "remediation": "Start the kubelet with the --anonymous-auth=false flag.", "rules": [], - "baseScore": 10 + "baseScore": 10, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "Enforce Kubelet client TLS authentication", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } ] }, "controlID": "C-0070", @@ -1077,261 +1375,99 @@ "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", "rules": [], - "baseScore": 9 - }, - { - "guid": "", - "name": "Images from allowed registry", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" ] }, - "controlID": "C-0078", - "creationTime": "", - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "rules": [], - "baseScore": 5 + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CVE-2022-0185-linux-kernel-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0079", - "creationTime": "", - "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", - "remediation": "Patch Linux kernel version to 5.16.2 or above", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "CVE-2022-24348-argocddirtraversal", - "attributes": { - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ], - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ] - }, - "controlID": "C-0081", - "creationTime": "", - "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", - "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Workloads with Critical vulnerabilities exposed to external traffic", + "name": "Ensure CPU limits are set", "attributes": { "controlTypeTags": [ + "compliance", + "devops", "security" ], "attackTracks": [ { - "attackTrack": "container", + "attackTrack": "service-destruction", "categories": [ - "Initial access", - "Execution" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0083", - "creationTime": "", - "description": "Container images with known critical vulnerabilities pose elevated risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if either LoadBalancer or NodePort service is assigned to them.", - "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the POD to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Workloads with RCE vulnerabilities exposed to external traffic", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0084", - "creationTime": "", - "description": "Container images with known Remote Code Execution (RCE) vulnerabilities pose significantly higher risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if their POD has either LoadBalancer or NodePort service.", - "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the POD to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Workloads with excessive amount of vulnerabilities", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" + "Denial of service" ] } ] }, - "controlID": "C-0085", + "controlID": "C-0270", "creationTime": "", - "description": "Container images with multiple Critical and High sevirity vulnerabilities increase the risk of potential exploit. This control lists all such images according to the threashold provided by the customer.", - "remediation": "Update your workload images as soon as possible when fixes become available.", + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "CVE-2022-0492-cgroups-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" ] }, - "controlID": "C-0086", - "creationTime": "", - "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", - "rules": [], - "baseScore": 4 + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } }, { "guid": "", - "name": "CVE-2022-23648-containerd-fs-escape", + "name": "Ensure memory limits are set", "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation", - "Impact - Data access in container" - ] - } - ] - }, - "controlID": "C-0087", - "creationTime": "", - "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using aspecially-crafted POD configuration yamls", - "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "CVE-2022-3172-aggregated-API-server-redirect", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [] - }, - "controlID": "C-0089", - "creationTime": "", - "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "CVE-2022-47633-kyverno-signature-bypass", - "attributes": { - "armoBuiltin": true, "controlTypeTags": [ + "compliance", + "devops", "security" ], "attackTracks": [ { "categories": [ - "Initial access", - "Execution" + "Denial of service" ], - "attackTrack": "container" + "attackTrack": "service-destruction" } ] }, - "controlID": "C-0091", + "controlID": "C-0271", "creationTime": "", - "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", - "remediation": "Update your Grafana to 9.2.4 or above", + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } } ], "controlsIDs": [ - "C-0001", "C-0002", "C-0005", - "C-0009", "C-0012", "C-0013", "C-0016", @@ -1343,12 +1479,5502 @@ "C-0041", "C-0044", "C-0046", - "C-0049", "C-0054", "C-0055", "C-0057", "C-0058", "C-0059", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070", + "C-0270", + "C-0271" + ], + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "cis-eks-t1.8.0", + "attributes": { + "builtin": true, + "version": "v1.8.0" + }, + "creationTime": "", + "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/20537", + "typeTags": [ + "compliance" + ], + "controls": [ + { + "guid": "", + "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0066", + "creationTime": "", + "description": "Encrypt Kubernetes secrets, stored in etcd, using secrets encryption feature during Amazon EKS cluster creation.", + "remediation": "This process can only be performed during Cluster Creation.\n\n Enable 'Secrets Encryption' during Amazon EKS cluster creation as described in the links within the 'References' section.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-2.1.1 Enable audit Logs", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0067", + "creationTime": "", + "description": "Control plane logs provide visibility into operation of the EKS Control plane component systems. The API server audit logs record all accepted and rejected requests in the cluster. When enabled via EKS configuration the control plane logs for a cluster are exported to a CloudWatch Log Group for persistence.", + "remediation": "**From Console:**\n\n 1. For each EKS Cluster in each region;\n2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.\n3. Click 'Manage logging'.\n4. Ensure that all options are toggled to 'Enabled'.\n\n \n```\nAPI server: Enabled\nAudit: Enabled\t\nAuthenticator: Enabled\nController manager: Enabled\nScheduler: Enabled\n\n```\n 5. Click 'Save Changes'.\n\n **From CLI:**\n\n \n```\n# For each EKS Cluster in each region;\naws eks update-cluster-config \\\n --region '${REGION_CODE}' \\\n --name '${CLUSTER_NAME}' \\\n --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-5.1.4 Minimize Container Registries to only those approved", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0078", + "creationTime": "", + "description": "Use approved container registries.", + "remediation": "To minimize AWS ECR container registries to only those approved, you can follow these steps:\n\n 1. Define your approval criteria: Determine the criteria that containers must meet to be considered approved. This can include factors such as security, compliance, compatibility, and other requirements.\n2. Identify all existing ECR registries: Identify all ECR registries that are currently being used in your organization.\n3. Evaluate ECR registries against approval criteria: Evaluate each ECR registry against your approval criteria to determine whether it should be approved or not. This can be done by reviewing the registry settings and configuration, as well as conducting security assessments and vulnerability scans.\n4. Establish policies and procedures: Establish policies and procedures that outline how ECR registries will be approved, maintained, and monitored. This should include guidelines for developers to follow when selecting a registry for their container images.\n5. Implement access controls: Implement access controls to ensure that only approved ECR registries are used to store and distribute container images. This can be done by setting up IAM policies and roles that restrict access to unapproved registries or create a whitelist of approved registries.\n6. Monitor and review: Continuously monitor and review the use of ECR registries to ensure that they continue to meet your approval criteria. This can include regularly reviewing access logs, scanning for vulnerabilities, and conducting periodic audits.\n\n By following these steps, you can minimize AWS ECR container registries to only those approved, which can help to improve security, reduce complexity, and streamline container management in your organization. Additionally, AWS provides several tools and services that can help you manage your ECR registries, such as AWS Config, AWS CloudFormation, and AWS Identity and Access Management (IAM).", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", + "controlID": "C-0167", + "creationTime": "", + "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on each worker node.\n\n For example,\n\n \n```\nchown root:root \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", + "controlID": "C-0171", + "creationTime": "", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/config.json\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.1 Ensure that the Anonymous Auth is Not Enabled", + "controlID": "C-0172", + "creationTime": "", + "description": "Disable anonymous requests to the Kubelet server.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Disable Anonymous Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--anonymous-auth=false\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "controlID": "C-0173", + "creationTime": "", + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Enable Webhook Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"webhook\": { \"enabled\": true } }\n\n```\n Next, set the Authorization Mode to `Webhook` by setting the following parameter:\n\n \n```\n\"authorization\": { \"mode\": \"Webhook }\n\n```\n Finer detail of the `authentication` and `authorization` fields can be found in the [Kubelet Configuration documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--authentication-token-webhook\n--authorization-mode=Webhook\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.3 Ensure that a Client CA File is Configured", + "controlID": "C-0174", + "creationTime": "", + "description": "Enable Kubelet authentication using certificates.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Configure the client certificate authority file by setting the following parameter appropriately:\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile\": } }\"\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--client-ca-file=\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.4 Ensure that the --read-only-port is disabled", + "controlID": "C-0175", + "creationTime": "", + "description": "Disable the read-only port.", + "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 0\n\n \n```\n\"readOnlyPort\": 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For each remediation:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "controlID": "C-0176", + "creationTime": "", + "description": "Do not disable timeouts on streaming connections.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.6 Ensure that the --make-iptables-util-chains argument is set to true", + "controlID": "C-0178", + "creationTime": "", + "description": "Allow Kubelet to manage iptables.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n Ensure that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--make-iptables-util-chains` argument because that would override your Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains.: true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.7 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", + "controlID": "C-0180", + "creationTime": "", + "description": "Security relevant information should be captured. The eventRecordQPS on the Kubelet configuration can be used to limit the rate at which events are gathered and sets the maximum event creations per second. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.8 Ensure that the --rotate-certificates argument is not present or is set to true", + "controlID": "C-0181", + "creationTime": "", + "description": "Enable kubelet client certificate rotation.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.9 Ensure that the RotateKubeletServerCertificate argument is set to true", + "controlID": "C-0183", + "creationTime": "", + "description": "Enable kubelet server certificate rotation.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"featureGates\": {\n \"RotateKubeletServerCertificate\":true\n},\n\n```\n Additionally, ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--rotate-kubelet-server-certificate` executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediation methods:**\nRestart the `kubelet` service and check status. The example below is for when using systemctl to manage services:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "creationTime": "", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "remediation": "Identify all ClusterRoleBindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the ClusterRoleBinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.2 Minimize access to secrets", + "controlID": "C-0186", + "creationTime": "", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "creationTime": "", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.4 Minimize access to create pods", + "controlID": "C-0188", + "creationTime": "", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", + "controlID": "C-0189", + "creationTime": "", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "creationTime": "", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "remediation": "Regularly review pod and service account objects in the cluster to ensure that the `automountServiceAccountToken` setting is `false` for pods and accounts that do not explicitly require API server access.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.1.7 Cluster Access Manager API to streamline and enhance the management of access controls within EKS clusters", + "attributes": { + "armoBuiltin": true + }, + "id": "CIS-4.1.7", + "controlID": "C-0285", + "creationTime": "", + "description": "Amazon EKS has introduced the Cluster Access Manager API to streamline and enhance the management of access controls within EKS clusters. This new approach is now the recommended method over the traditional `aws-auth` ConfigMap for managing Role-Based Access Control (RBAC) and Service Accounts.\n\n Key Advantages of Using the Cluster Access Manager API:\n\n 1. **Simplified Access Management:** The Cluster Access Manager API allows administrators to manage access directly through the Amazon EKS API, eliminating the need to modify the aws-auth ConfigMap manually. This reduces operational overhead and minimizes the risk of misconfigurations.\n2. **Enhanced Security Controls:** With this API, administrators can assign predefined AWS-managed Kubernetes permissions, known as \"access policies,\" to IAM principals. This provides a more secure and auditable way to manage permissions compared to manual ConfigMap edits.\n3. **Improved Visibility and Auditing:** The API offers better visibility into cluster access configurations, facilitating easier auditing and compliance checks. Administrators can list and describe access entries and policies directly through the EKS API.", + "remediation": "Log in to the AWS Management Console.\n\n Navigate to Amazon EKS and select your EKS cluster.\n\n Go to the Access tab and click on \"Manage Access\" in the \"Access Configuration section\".\n\n Under Cluster Authentication Mode for Cluster Access settings.\n\n * Click `EKS API` to change `cluster will source authenticated IAM principals only from EKS access entry APIs`.\n* Click `ConfigMap` to change `cluster will source authenticated IAM principals only from the aws-auth ConfigMap`.\n* Note: `EKS API and ConfigMap` must be selected during Cluster creation and cannot be changed once the Cluster is provisioned.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "creationTime": "", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.2.1 Minimize the admission of privileged containers", + "controlID": "C-0193", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.\n\n To enable PSA for a namespace in your cluster, set the pod-security.kubernetes.io/enforce label with the policy value you want to enforce.\n\n `kubectl label --overwrite ns NAMESPACE pod-security.kubernetes.io/enforce=restricted`\n\n The above command enforces the restricted policy for the NAMESPACE namespace.\n\n You can also enable Pod Security Admission for all your namespaces. For example:\n\n \n```\nkubectl label --overwrite ns --all pod-security.kubernetes.io/warn=baseline\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", + "controlID": "C-0194", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", + "controlID": "C-0195", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", + "controlID": "C-0196", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", + "controlID": "C-0197", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to `true`. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with `.spec.allowPrivilegeEscalation` set to `true`.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.3.1 Ensure CNI plugin supports network policies.", + "controlID": "C-0205", + "creationTime": "", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "CIS-4.3.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "creationTime": "", + "description": "Use network policies to isolate traffic in your cluster network.", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "CIS-4.4.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "creationTime": "", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + } + } + }, + { + "guid": "", + "name": "CIS-4.5.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "creationTime": "", + "description": "Use namespaces to isolate your Kubernetes objects.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.5.2 The default namespace should not be used", + "controlID": "C-0212", + "creationTime": "", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", + "controlID": "C-0221", + "creationTime": "", + "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", + "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.\n\n 1. Open the Amazon ECR console at .\n2. From the navigation bar, choose the Region to create your repository in.\n3. In the navigation pane, choose Repositories.\n4. On the Repositories page, choose the repository that contains the image to scan.\n5. On the Images page, select the image to scan and then choose Scan.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-5.1.2 Minimize user access to Amazon ECR", + "controlID": "C-0222", + "creationTime": "", + "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", + "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a `Resource` or a `NotResource` element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.1.3 Minimize cluster access to read-only for Amazon ECR", + "controlID": "C-0223", + "creationTime": "", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", + "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cloud" + ] + } + }, + { + "guid": "", + "name": "CIS-5.2.1 Prefer using dedicated EKS Service Accounts", + "controlID": "C-0225", + "creationTime": "", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", + "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance’s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod’s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege — By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation — A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability — Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", + "controlID": "C-0227", + "creationTime": "", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=true, publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cloud" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "controlID": "C-0228", + "creationTime": "", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", + "controlID": "C-0229", + "creationTime": "", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "remediation": "To disable public IP addresses for EKS nodegroup nodes using the AWS CLI, you must ensure the following when running create-nodegroup:\n\n * Use private subnets (that don't auto-assign public IPs).\n* Set associatePublicIpAddress to false.\n\n \n```\n\"NetworkInterfaces\": [{\n \"AssociatePublicIpAddress\": false\n}]\n\n```\n You can restrict access to the control plane endpoint using:\n\n \n```\naws eks update-cluster-config \\\n --name \\\n --region \\\n --resources-vpc-config endpointPublicAccess=false, endpointPrivateAccess=true\n\n```\n This makes the API server private, but does not affect node IPs.\n\n To ensure nodes use only private IPs:\n\n * Use aws eks create-nodegroup with only private subnets, or\n* Use a launch template with AssociatePublicIpAddress=false.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", + "controlID": "C-0230", + "creationTime": "", + "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux iptables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", + "remediation": "Utilize Calico or other network policy engine to segment and isolate your traffic.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", + "controlID": "C-0231", + "creationTime": "", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "remediation": "Your load balancer vendor can provide details on configuring HTTPS with TLS.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.5.1 Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 or greater", + "controlID": "C-0232", + "creationTime": "", + "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", + "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-4.4.2 Consider external secret storage", + "controlID": "C-0234", + "creationTime": "", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "controlID": "C-0235", + "creationTime": "", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/config.json\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "controlID": "C-0238", + "creationTime": "", + "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-4.1.9 Minimize access to create persistent volumes", + "controlID": "C-0278", + "creationTime": "", + "description": "The ability to create persistent volumes in a cluster can provide an opportunity for privilege escalation, via the creation of hostPath volumes. ", + "remediation": "Where possible, remove `create` access to `persistentvolume` objects in the cluster.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.10 Minimize access to the proxy sub-resource of nodes", + "controlID": "C-0279", + "creationTime": "", + "description": "Users with access to the Proxy sub-resource of Node objects automatically have permissions to use the Kubelet API, which may allow for privilege escalation or bypass cluster security controls such as audit logs.", + "remediation": "Where possible, remove access to the proxy sub-resource of node objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.11 Minimize access to webhook configuration objects", + "controlID": "C-0281", + "creationTime": "", + "description": "Users with rights to create/modify/delete validatingwebhookconfigurations or mutatingwebhookconfigurations can control webhooks that can read any object admitted to the cluster, and in the case of mutating webhooks, also mutate admitted objects. This could allow for privilege escalation or disruption of the operation of the cluster.", + "remediation": "Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.12 Minimize access to the service account token creation", + "controlID": "C-0282", + "creationTime": "", + "description": "Users with rights to create new service account tokens at a cluster level, can create long-lived privileged credentials in the cluster. This could allow for privilege escalation and persistent access to the cluster, even if the users account has been revoked.", + "remediation": "Where possible, remove access to the token sub-resource of serviceaccount objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + } + ], + "controlsIDs": [ + "C-0066", + "C-0067", + "C-0078", + "C-0167", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0178", + "C-0180", + "C-0181", + "C-0183", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0285", + "C-0191", + "C-0193", + "C-0194", + "C-0195", + "C-0196", + "C-0197", + "C-0205", + "C-0206", + "C-0207", + "C-0209", + "C-0212", + "C-0221", + "C-0222", + "C-0223", + "C-0225", + "C-0227", + "C-0228", + "C-0229", + "C-0230", + "C-0231", + "C-0232", + "C-0234", + "C-0235", + "C-0238", + "C-0278", + "C-0279", + "C-0281", + "C-0282" + ], + "subSections": { + "2": { + "guid": "", + "name": "Control Plane Configuration", + "id": "2", + "subSections": { + "1": { + "guid": "", + "name": "Logging", + "id": "2.1", + "controlsIDs": [ + "C-0067" + ] + } + } + }, + "3": { + "guid": "", + "name": "Worker Nodes", + "id": "3", + "subSections": { + "1": { + "guid": "", + "name": "Worker Node Configuration Files", + "id": "3.1", + "controlsIDs": [ + "C-0167", + "C-0171", + "C-0235", + "C-0238" + ] + }, + "2": { + "guid": "", + "name": "Kubelet", + "id": "3.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0178", + "C-0180", + "C-0181", + "C-0183" + ] + } + } + }, + "4": { + "guid": "", + "name": "Policies", + "id": "4", + "subSections": { + "2": { + "guid": "", + "name": "Pod Security Standards", + "id": "4.2", + "controlsIDs": [ + "C-0193", + "C-0194", + "C-0195", + "C-0196", + "C-0197" + ] + }, + "3": { + "guid": "", + "name": "CNI Plugin", + "id": "4.3", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "4": { + "guid": "", + "name": "Secrets Management", + "id": "4.4", + "controlsIDs": [ + "C-0207", + "C-0234" + ] + }, + "5": { + "guid": "", + "name": "General Policies", + "id": "4.5", + "controlsIDs": [ + "C-0209", + "C-0212" + ] + }, + "1": { + "guid": "", + "name": "RBAC and Service Accounts", + "id": "4.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0285", + "C-0191", + "C-0278", + "C-0279", + "C-0281", + "C-0282" + ] + } + } + }, + "5": { + "guid": "", + "name": "Managed services", + "id": "5", + "subSections": { + "1": { + "guid": "", + "name": "Image Registry and Image Scanning", + "id": "5.1", + "controlsIDs": [ + "C-0078", + "C-0221", + "C-0222", + "C-0223" + ] + }, + "2": { + "guid": "", + "name": "Identity and Access Management (IAM)", + "id": "5.2", + "controlsIDs": [ + "C-0225" + ] + }, + "3": { + "guid": "", + "name": "AWS EKS Key Management Service", + "id": "5.3", + "controlsIDs": [ + "C-0066" + ] + }, + "4": { + "guid": "", + "name": "Cluster Networking", + "id": "5.4", + "controlsIDs": [ + "C-0227", + "C-0228", + "C-0229", + "C-0230", + "C-0231" + ] + }, + "5": { + "guid": "", + "name": "Authentication and Authorization", + "id": "5.5", + "controlsIDs": [ + "C-0232" + ] + } + } + } + }, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "cis-aks-t1.2.0", + "attributes": { + "version": "v1.2.0", + "builtin": true + }, + "creationTime": "", + "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", + "typeTags": [ + "compliance" + ], + "controls": [ + { + "guid": "", + "name": "CIS-5.1.4 Minimize Container Registries to only those approved", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0078", + "creationTime": "", + "description": "Use approved container registries.", + "remediation": "If you are using Azure Container Registry you have this option:\n\n For other non-AKS repos using admission controllers or Azure Policy will also work.\n\n Limiting or locking down egress traffic is also recommended:\n", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "CIS-5.5.1 Manage Kubernetes RBAC users with Azure AD", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0088", + "creationTime": "", + "description": "Azure Kubernetes Service (AKS) can be configured to use Azure Active Directory (AD) for user authentication. In this configuration, you sign in to an AKS cluster using an Azure AD authentication token. You can also configure Kubernetes role-based access control (Kubernetes RBAC) to limit access to cluster resources based a user's identity or group membership.", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", + "controlID": "C-0167", + "creationTime": "", + "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", + "controlID": "C-0171", + "creationTime": "", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.1 Ensure that the --anonymous-auth argument is set to false", + "controlID": "C-0172", + "creationTime": "", + "description": "Disable anonymous requests to the Kubelet server.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"anonymous\": \"enabled\": false\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--anonymous-auth=false\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*anonymous\":{\"enabled\":false}\"` by extracting the live configuration from the nodes running kubelet.\\*\\*See detailed step-by-step configmap procedures in[Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "controlID": "C-0173", + "creationTime": "", + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\"... \"webhook\":{\"enabled\":true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--authorization-mode=Webhook\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*webhook\":{\"enabled\":true\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.3 Ensure that the --client-ca-file argument is set as appropriate", + "controlID": "C-0174", + "creationTime": "", + "description": "Enable Kubelet authentication using certificates.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile:\" to the location of the client CA file.\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--client-ca-file=\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.4 Ensure that the --read-only-port is secured", + "controlID": "C-0175", + "creationTime": "", + "description": "Disable the read-only port.", + "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\nreadOnlyPort to 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For all remediations:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "controlID": "C-0176", + "creationTime": "", + "description": "Do not disable timeouts on streaming connections.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "controlID": "C-0177", + "creationTime": "", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": \n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n----protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "controlID": "C-0178", + "creationTime": "", + "description": "Allow Kubelet to manage iptables.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains\": true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", + "controlID": "C-0179", + "creationTime": "", + "description": "Do not override node hostnames.", + "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", + "controlID": "C-0180", + "creationTime": "", + "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not set to false", + "controlID": "C-0182", + "creationTime": "", + "description": "Enable kubelet client certificate rotation.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", + "controlID": "C-0183", + "creationTime": "", + "description": "Enable kubelet server certificate rotation.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateKubeletServerCertificate\":true\n\n```\n **Remediation Method 2:**\n\n If using a Kubelet config file, edit the file to set `RotateKubeletServerCertificate to true`.\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "creationTime": "", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.2 Minimize access to secrets", + "controlID": "C-0186", + "creationTime": "", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "creationTime": "", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.4 Minimize access to create pods", + "controlID": "C-0188", + "creationTime": "", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", + "controlID": "C-0189", + "creationTime": "", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "creationTime": "", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", + "controlID": "C-0201", + "creationTime": "", + "description": "Do not generally permit containers with capabilities", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.4.1 Ensure latest CNI version is used", + "controlID": "C-0205", + "creationTime": "", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "CIS-4.4.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "creationTime": "", + "description": "Use network policies to isolate traffic in your cluster network.", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "CIS-4.5.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "creationTime": "", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + } + } + }, + { + "guid": "", + "name": "CIS-4.5.2 Consider external secret storage", + "controlID": "C-0208", + "creationTime": "", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.7.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "creationTime": "", + "description": "Use namespaces to isolate your Kubernetes objects.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.7.2 Apply Security Context to Your Pods and Containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "controlID": "C-0211", + "creationTime": "", + "description": "Apply Security Context to Your Pods and Containers", + "remediation": "As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy such as this:\n\n \n```\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: restricted\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'\n apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'\n seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'\n apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'\nspec:\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n # This is redundant with non-root + disallow privilege escalation,\n # but we can provide it for defense in depth.\n requiredDropCapabilities:\n - ALL\n # Allow core volume types.\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n # Assume that persistentVolumes set up by the cluster admin are safe to use.\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n # Require the container to run without root privileges.\n rule: 'MustRunAsNonRoot'\n seLinux:\n # This policy assumes the nodes are using AppArmor rather than SELinux.\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n\n```\n This policy prevents pods from running as privileged or escalating privileges. It also restricts the types of volumes that can be mounted and the root supplemental groups that can be added.\n\n Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.7.3 The default namespace should not be used", + "controlID": "C-0212", + "creationTime": "", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.2.1 Minimize the admission of privileged containers", + "controlID": "C-0213", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is omitted or set to `false`.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", + "controlID": "C-0214", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", + "controlID": "C-0215", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", + "controlID": "C-0216", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", + "controlID": "C-0217", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-4.2.6 Minimize the admission of root containers", + "controlID": "C-0218", + "creationTime": "", + "description": "Do not generally permit containers to be run as the root user.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", + "controlID": "C-0219", + "creationTime": "", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "controlID": "C-0235", + "creationTime": "", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "controlID": "C-0238", + "creationTime": "", + "description": "If `kubelet` is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.2.1 Prefer using dedicated AKS Service Accounts", + "controlID": "C-0239", + "creationTime": "", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", + "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", + "controlID": "C-0240", + "creationTime": "", + "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", + "remediation": "", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.2.2 Use Azure RBAC for Kubernetes Authorization", + "controlID": "C-0241", + "creationTime": "", + "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", + "remediation": "Set Azure RBAC as access system.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.6.2 Hostile multi-tenant workloads", + "controlID": "C-0242", + "creationTime": "", + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", + "remediation": "", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", + "controlID": "C-0243", + "creationTime": "", + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "remediation": "", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted", + "controlID": "C-0244", + "creationTime": "", + "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", + "remediation": "", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", + "controlID": "C-0245", + "creationTime": "", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "remediation": "", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", + "controlID": "C-0247", + "creationTime": "", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "remediation": "", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", + "controlID": "C-0248", + "creationTime": "", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.6.1 Restrict untrusted workloads", + "attributes": { + "actionRequired": "manual review" + }, + "controlID": "C-0249", + "creationTime": "", + "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", + "remediation": "", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.1.2 Minimize cluster access to read-only for Azure Container Registry (ACR)", + "controlID": "C-0250", + "creationTime": "", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", + "remediation": "", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.1.3 Minimize user access to Azure Container Registry (ACR)", + "controlID": "C-0251", + "creationTime": "", + "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", + "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "controlID": "C-0252", + "creationTime": "", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-2.1.1 Enable audit Logs", + "attributes": { + "actionRequired": "manual review" + }, + "controlID": "C-0254", + "creationTime": "", + "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", + "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "AKS" + ] + } + } + ], + "controlsIDs": [ + "C-0078", + "C-0088", + "C-0167", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0182", + "C-0183", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0201", + "C-0205", + "C-0206", + "C-0207", + "C-0208", + "C-0209", + "C-0211", + "C-0212", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0235", + "C-0238", + "C-0239", + "C-0240", + "C-0241", + "C-0242", + "C-0243", + "C-0244", + "C-0245", + "C-0247", + "C-0248", + "C-0249", + "C-0250", + "C-0251", + "C-0252", + "C-0254" + ], + "subSections": { + "2": { + "guid": "", + "name": "Master (Control Plane) Configuration", + "id": "2", + "subSections": { + "1": { + "guid": "", + "name": "Logging", + "id": "2.1", + "controlsIDs": [ + "C-0254" + ] + } + } + }, + "3": { + "guid": "", + "name": "Worker Nodes", + "id": "3", + "subSections": { + "2": { + "guid": "", + "name": "Kubelet", + "id": "3.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0182", + "C-0183" + ] + }, + "1": { + "guid": "", + "name": "Worker Node Configuration Files", + "id": "3.1", + "controlsIDs": [ + "C-0167", + "C-0171", + "C-0235", + "C-0238" + ] + } + } + }, + "4": { + "guid": "", + "name": "Policies", + "id": "4", + "subSections": { + "5": { + "guid": "", + "name": "Secrets Management", + "id": "4.5", + "controlsIDs": [ + "C-0207", + "C-0208" + ] + }, + "6": { + "guid": "", + "name": "Extensible Admission Control", + "id": "4.6" + }, + "7": { + "guid": "", + "name": "General Policies", + "id": "4.7", + "controlsIDs": [ + "C-0209", + "C-0211", + "C-0212" + ] + }, + "1": { + "guid": "", + "name": "RBAC and Service Accounts", + "id": "4.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190" + ] + }, + "2": { + "guid": "", + "name": "Pod Security Standards", + "id": "4.2", + "controlsIDs": [ + "C-0201", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219" + ] + }, + "3": { + "guid": "", + "name": "Azure Policy / OPA", + "id": "4.3" + }, + "4": { + "guid": "", + "name": "CNI Plugin", + "id": "4.4", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + } + } + }, + "5": { + "guid": "", + "name": "Managed services", + "id": "5", + "subSections": { + "6": { + "guid": "", + "name": "Other Cluster Configurations", + "id": "5.6", + "controlsIDs": [ + "C-0242", + "C-0249" + ] + }, + "1": { + "guid": "", + "name": "Image Registry and Image Scanning", + "id": "5.1", + "controlsIDs": [ + "C-0078", + "C-0243", + "C-0250", + "C-0251" + ] + }, + "2": { + "guid": "", + "name": "Access and identity options for Azure Kubernetes Service (AKS)", + "id": "5.2", + "controlsIDs": [ + "C-0239", + "C-0241" + ] + }, + "3": { + "guid": "", + "name": "Key Management Service (KMS)", + "id": "5.3", + "controlsIDs": [ + "C-0244" + ] + }, + "4": { + "guid": "", + "name": "Cluster Networking", + "id": "5.4", + "controlsIDs": [ + "C-0240", + "C-0245", + "C-0247", + "C-0248", + "C-0252" + ] + }, + "5": { + "guid": "", + "name": "Authentication and Authorization", + "id": "5.5", + "controlsIDs": [ + "C-0088" + ] + } + } + } + }, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "cis-aks-t1.8.0", + "attributes": { + "version": "v1.8.0", + "builtin": true + }, + "creationTime": "", + "description": "Testing CIS for Azure Kubernetes Service (AKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/9058", + "typeTags": [ + "compliance" + ], + "controls": [ + { + "guid": "", + "name": "CIS-5.1.4 Minimize Container Registries to only those approved", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0078", + "creationTime": "", + "description": "Use approved container registries.", + "remediation": "If you are using Azure Container Registry you have this option:\n\n For other non-AKS repos using admission controllers or Azure Policy will also work.\n\n Limiting or locking down egress traffic is also recommended:\n", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "CIS-5.5.1 Manage Kubernetes RBAC users with Azure AD", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0088", + "creationTime": "", + "description": "Azure Kubernetes Service (AKS) can be configured to use Azure Active Directory (AD) for user authentication. In this configuration, you sign in to an AKS cluster using an Azure AD authentication token. You can also configure Kubernetes role-based access control (Kubernetes RBAC) to limit access to cluster resources based a user's identity or group membership.", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", + "controlID": "C-0167", + "creationTime": "", + "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", + "controlID": "C-0171", + "creationTime": "", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.1 Ensure that the --anonymous-auth argument is set to false", + "controlID": "C-0172", + "creationTime": "", + "description": "Disable anonymous requests to the Kubelet server.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--anonymous-auth=false\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*anonymous\":{\"enabled\":false}\"` by extracting the live configuration from the nodes running kubelet.\\*\\*See detailed step-by-step configmap procedures in[Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "controlID": "C-0173", + "creationTime": "", + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"authorization\": { \"mode\": \"Webhook\" }\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--authorization-mode=Webhook\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*webhook\":{\"enabled\":true\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.3 Ensure that the --client-ca-file argument is set as appropriate", + "controlID": "C-0174", + "creationTime": "", + "description": "Enable Kubelet authentication using certificates.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to the location of the client CA file\n\n \n```\n\"authentication\": { \"x509\": { \"clientCAFile\": \"\" } }\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--client-ca-file=\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"authentication.*x509\":(\"clientCAFile\":\"/etc/kubernetes/pki/ca.crt\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.4 Ensure that the --read-only-port is secured", + "controlID": "C-0175", + "creationTime": "", + "description": "Disable the read-only port.", + "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to false\n\n \n```\nreadOnlyPort to 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For all remediations:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "controlID": "C-0176", + "creationTime": "", + "description": "Do not disable timeouts on streaming connections.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.6 Ensure that the --protect-kernel-defaults argument is set to true", + "controlID": "C-0177", + "creationTime": "", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"protectKernelDefaults\": true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"protectKernelDefaults\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.7 Ensure that the --make-iptables-util-chains argument is set to true", + "controlID": "C-0178", + "creationTime": "", + "description": "Allow Kubelet to manage iptables.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains\": true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.8 Ensure that the --hostname-override argument is not set", + "controlID": "C-0179", + "creationTime": "", + "description": "Do not override node hostnames.", + "remediation": "**Remediation Method 1:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and remove the below parameter from the `KUBELET_ARGS` variable string.\n\n \n```\n--hostname-override\n\n```\n Based on your system, restart the `kubelet` service and check status. The example below is for systemctl:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.9 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", + "controlID": "C-0180", + "creationTime": "", + "description": "Security relevant information should be captured. The `--eventRecordQPS` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 5 or a value greater or equal to 0\n\n \n```\n\"eventRecordQPS\": 5\n\n```\n Check that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not define an executable argument for `eventRecordQPS` because this would override your Kubelet config.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--eventRecordQPS=5\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"eventRecordQPS\"` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.10 Ensure that the --rotate-certificates argument is not set to false", + "controlID": "C-0182", + "creationTime": "", + "description": "Enable kubelet client certificate rotation.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"rotateCertificates\": true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --rotate-certificates executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-certificates=true\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.11 Ensure that the RotateKubeletServerCertificate argument is set to true", + "controlID": "C-0183", + "creationTime": "", + "description": "Enable kubelet server certificate rotation.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"serverTLSBootstrap\": true\n\n```\n **Remediation Method 2:**\n\n If using a Kubelet config file, edit the file to set `serverTLSBootstrap` to `true`.\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"serverTLSBootstrap\": true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.aks.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "creationTime": "", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.2 Minimize access to secrets", + "controlID": "C-0186", + "creationTime": "", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "creationTime": "", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.4 Minimize access to create pods", + "controlID": "C-0188", + "creationTime": "", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", + "controlID": "C-0189", + "creationTime": "", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "creationTime": "", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.2.8 Minimize the admission of containers with capabilities assigned", + "controlID": "C-0201", + "creationTime": "", + "description": "Do not generally permit containers with capabilities", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.4.1 Ensure latest CNI version is used", + "controlID": "C-0205", + "creationTime": "", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "CIS-4.4.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "creationTime": "", + "description": "Use network policies to isolate traffic in your cluster network.", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "CIS-4.5.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "creationTime": "", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + } + } + }, + { + "guid": "", + "name": "CIS-4.5.2 Consider external secret storage", + "controlID": "C-0208", + "creationTime": "", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.7.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "creationTime": "", + "description": "Use namespaces to isolate your Kubernetes objects.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.7.2 Apply Security Context to Your Pods and Containers", + "attributes": { + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0211", + "creationTime": "", + "description": "Apply Security Context to Your Pods and Containers", + "remediation": "As a best practice we recommend that you scope the binding for privileged pods to service accounts within a particular namespace, e.g. kube-system, and limiting access to that namespace. For all other serviceaccounts/namespaces, we recommend implementing a more restrictive policy such as this:\n\n \n```\napiVersion: policy/v1beta1\nkind: PodSecurityPolicy\nmetadata:\n name: restricted\n annotations:\n seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'\n apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'\n seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'\n apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'\nspec:\n privileged: false\n # Required to prevent escalations to root.\n allowPrivilegeEscalation: false\n # This is redundant with non-root + disallow privilege escalation,\n # but we can provide it for defense in depth.\n requiredDropCapabilities:\n - ALL\n # Allow core volume types.\n volumes:\n - 'configMap'\n - 'emptyDir'\n - 'projected'\n - 'secret'\n - 'downwardAPI'\n # Assume that persistentVolumes set up by the cluster admin are safe to use.\n - 'persistentVolumeClaim'\n hostNetwork: false\n hostIPC: false\n hostPID: false\n runAsUser:\n # Require the container to run without root privileges.\n rule: 'MustRunAsNonRoot'\n seLinux:\n # This policy assumes the nodes are using AppArmor rather than SELinux.\n rule: 'RunAsAny'\n supplementalGroups:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n fsGroup:\n rule: 'MustRunAs'\n ranges:\n # Forbid adding the root group.\n - min: 1\n max: 65535\n readOnlyRootFilesystem: false\n\n```\n This policy prevents pods from running as privileged or escalating privileges. It also restricts the types of volumes that can be mounted and the root supplemental groups that can be added.\n\n Another, albeit similar, approach is to start with policy that locks everything down and incrementally add exceptions for applications that need looser restrictions such as logging agents which need the ability to mount a host path.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.7.3 The default namespace should not be used", + "controlID": "C-0212", + "creationTime": "", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.2.1 Minimize the admission of privileged containers", + "controlID": "C-0213", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is omitted or set to `false`.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", + "controlID": "C-0214", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", + "controlID": "C-0215", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", + "controlID": "C-0216", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", + "controlID": "C-0217", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-4.2.6 Minimize the admission of root containers", + "controlID": "C-0218", + "creationTime": "", + "description": "Do not generally permit containers to be run as the root user.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-4.2.7 Minimize the admission of containers with added capabilities", + "controlID": "C-0219", + "creationTime": "", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "controlID": "C-0235", + "creationTime": "", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "controlID": "C-0238", + "creationTime": "", + "description": "If `kubelet` is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.2.1 Prefer using dedicated AKS Service Accounts", + "controlID": "C-0239", + "creationTime": "", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", + "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", + "controlID": "C-0240", + "creationTime": "", + "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", + "remediation": "", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.2.2 Use Azure RBAC for Kubernetes Authorization", + "controlID": "C-0241", + "creationTime": "", + "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", + "remediation": "Set Azure RBAC as access system.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.6.2 Hostile multi-tenant workloads", + "controlID": "C-0242", + "creationTime": "", + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", + "remediation": "", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", + "controlID": "C-0243", + "creationTime": "", + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "remediation": "", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted", + "controlID": "C-0244", + "creationTime": "", + "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", + "remediation": "", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", + "controlID": "C-0245", + "creationTime": "", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "remediation": "", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", + "controlID": "C-0247", + "creationTime": "", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "remediation": "", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", + "controlID": "C-0248", + "creationTime": "", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.6.1 Restrict untrusted workloads", + "attributes": { + "actionRequired": "manual review" + }, + "controlID": "C-0249", + "creationTime": "", + "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", + "remediation": "", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.1.2 Minimize cluster access to read-only for Azure Container Registry (ACR)", + "controlID": "C-0250", + "creationTime": "", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", + "remediation": "", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.1.3 Minimize user access to Azure Container Registry (ACR)", + "controlID": "C-0251", + "creationTime": "", + "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", + "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "controlID": "C-0252", + "creationTime": "", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-2.1.1 Enable audit Logs", + "attributes": { + "actionRequired": "manual review" + }, + "controlID": "C-0254", + "creationTime": "", + "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", + "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "CIS-4.1.9 Minimize access to the proxy sub-resource of nodes", + "controlID": "C-0279", + "creationTime": "", + "description": "Users with access to the Proxy sub-resource of Node objects automatically have permissions to use the Kubelet API, which may allow for privilege escalation or bypass cluster security controls such as audit logs.", + "remediation": "Where possible, remove access to the proxy sub-resource of node objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.10 Minimize access to the approval sub-resource of certificatesigningrequests objects", + "controlID": "C-0280", + "creationTime": "", + "description": "Users with access to the update the approval sub-resource of certificatesigningrequests objects can approve new client certificates for the Kubernetes API effectively allowing them to create new high-privileged user accounts.", + "remediation": "Where possible, remove access to the approval sub-resource of certificatesigningrequests objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.11 Minimize access to webhook configuration objects", + "controlID": "C-0281", + "creationTime": "", + "description": "Users with rights to create/modify/delete validatingwebhookconfigurations or mutatingwebhookconfigurations can control webhooks that can read any object admitted to the cluster, and in the case of mutating webhooks, also mutate admitted objects. This could allow for privilege escalation or disruption of the operation of the cluster.", + "remediation": "Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.12 Minimize access to the service account token creation", + "controlID": "C-0282", + "creationTime": "", + "description": "Users with rights to create new service account tokens at a cluster level, can create long-lived privileged credentials in the cluster. This could allow for privilege escalation and persistent access to the cluster, even if the users account has been revoked.", + "remediation": "Where possible, remove access to the token sub-resource of serviceaccount objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + } + ], + "controlsIDs": [ + "C-0078", + "C-0088", + "C-0167", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0182", + "C-0183", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0201", + "C-0205", + "C-0206", + "C-0207", + "C-0208", + "C-0209", + "C-0211", + "C-0212", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219", + "C-0235", + "C-0238", + "C-0239", + "C-0240", + "C-0241", + "C-0242", + "C-0243", + "C-0244", + "C-0245", + "C-0247", + "C-0248", + "C-0249", + "C-0250", + "C-0251", + "C-0252", + "C-0254", + "C-0279", + "C-0280", + "C-0281", + "C-0282" + ], + "subSections": { + "2": { + "guid": "", + "name": "Master (Control Plane) Configuration", + "id": "2", + "subSections": { + "1": { + "guid": "", + "name": "Logging", + "id": "2.1", + "controlsIDs": [ + "C-0254" + ] + } + } + }, + "3": { + "guid": "", + "name": "Worker Nodes", + "id": "3", + "subSections": { + "2": { + "guid": "", + "name": "Kubelet", + "id": "3.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0177", + "C-0178", + "C-0179", + "C-0180", + "C-0182", + "C-0183" + ] + }, + "1": { + "guid": "", + "name": "Worker Node Configuration Files", + "id": "3.1", + "controlsIDs": [ + "C-0167", + "C-0171", + "C-0235", + "C-0238" + ] + } + } + }, + "4": { + "guid": "", + "name": "Policies", + "id": "4", + "subSections": { + "3": { + "guid": "", + "name": "Azure Policy / OPA", + "id": "4.3" + }, + "4": { + "guid": "", + "name": "CNI Plugin", + "id": "4.4", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "5": { + "guid": "", + "name": "Secrets Management", + "id": "4.5", + "controlsIDs": [ + "C-0207", + "C-0208" + ] + }, + "6": { + "guid": "", + "name": "Extensible Admission Control", + "id": "4.6" + }, + "7": { + "guid": "", + "name": "General Policies", + "id": "4.7", + "controlsIDs": [ + "C-0209", + "C-0211", + "C-0212" + ] + }, + "1": { + "guid": "", + "name": "RBAC and Service Accounts", + "id": "4.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0279", + "C-0280", + "C-0281", + "C-0282" + ] + }, + "2": { + "guid": "", + "name": "Pod Security Standards", + "id": "4.2", + "controlsIDs": [ + "C-0201", + "C-0213", + "C-0214", + "C-0215", + "C-0216", + "C-0217", + "C-0218", + "C-0219" + ] + } + } + }, + "5": { + "guid": "", + "name": "Managed services", + "id": "5", + "subSections": { + "4": { + "guid": "", + "name": "Cluster Networking", + "id": "5.4", + "controlsIDs": [ + "C-0240", + "C-0245", + "C-0247", + "C-0248", + "C-0252" + ] + }, + "5": { + "guid": "", + "name": "Authentication and Authorization", + "id": "5.5", + "controlsIDs": [ + "C-0088" + ] + }, + "6": { + "guid": "", + "name": "Other Cluster Configurations", + "id": "5.6", + "controlsIDs": [ + "C-0242", + "C-0249" + ] + }, + "1": { + "guid": "", + "name": "Image Registry and Image Scanning", + "id": "5.1", + "controlsIDs": [ + "C-0078", + "C-0243", + "C-0250", + "C-0251" + ] + }, + "2": { + "guid": "", + "name": "Access and identity options for Azure Kubernetes Service (AKS)", + "id": "5.2", + "controlsIDs": [ + "C-0239", + "C-0241" + ] + }, + "3": { + "guid": "", + "name": "Key Management Service (KMS)", + "id": "5.3", + "controlsIDs": [ + "C-0244" + ] + } + } + } + }, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "AllControls", + "attributes": { + "builtin": true + }, + "creationTime": "", + "description": "Contains all the controls from all the frameworks", + "typeTags": [ + "compliance" + ], + "controls": [ + { + "guid": "", + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "controlID": "C-0002", + "creationTime": "", + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using “kubectl exec” command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit “kubectl exec” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0005", + "creationTime": "", + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "rules": [], + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Roles with delete capabilities", + "attributes": { + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ], + "microsoftMitreColumns": [ + "Impact" + ] + }, + "controlID": "C-0007", + "creationTime": "", + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "controlID": "C-0012", + "creationTime": "", + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } + }, + { + "guid": "", + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0013", + "creationTime": "", + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Access Kubernetes dashboard", + "attributes": { + "microsoftMitreColumns": [ + "Discovery", + "Lateral Movement" + ], + "rbacQuery": "Access k8s Dashboard", + "controlTypeTags": [ + "compliance" + ] + }, + "controlID": "C-0014", + "creationTime": "", + "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", + "remediation": "Make sure that the “Kubernetes Dashboard” service account is only bound to the Kubernetes dashboard following the least privilege principle.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "List Kubernetes secrets", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "controlID": "C-0015", + "creationTime": "", + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "controlID": "C-0016", + "creationTime": "", + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "controlID": "C-0017", + "creationTime": "", + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Configured readiness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0018", + "creationTime": "", + "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", + "remediation": "Ensure Readiness probes are configured wherever possible.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Mount service principal", + "attributes": { + "microsoftMitreColumns": [ + "Credential Access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "controlID": "C-0020", + "creationTime": "", + "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", + "remediation": "Refrain from using path mount to known cloud credentials folders or files .", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Exposed sensitive interfaces", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Initial access" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "controlID": "C-0021", + "creationTime": "", + "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", + "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Kubernetes CronJob", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "controlID": "C-0026", + "creationTime": "", + "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a pod in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", + "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", + "rules": [], + "baseScore": 1, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ingress and Egress blocked", + "attributes": { + "controlTypeTags": [ + "compliance" + ] + }, + "controlID": "C-0030", + "creationTime": "", + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Delete Kubernetes events", + "attributes": { + "microsoftMitreColumns": [ + "Defense evasion" + ], + "rbacQuery": "Show who can delete k8s events", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0031", + "creationTime": "", + "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", + "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "controlID": "C-0034", + "creationTime": "", + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } + }, + { + "guid": "", + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0035", + "creationTime": "", + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Validate admission controller (validating)", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0036", + "creationTime": "", + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0038", + "creationTime": "", + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Validate admission controller (mutating)", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0039", + "creationTime": "", + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "controlID": "C-0041", + "creationTime": "", + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Network", + "id": "Cat-4" + } + } + }, + { + "guid": "", + "name": "SSH server running inside container", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "controlID": "C-0042", + "creationTime": "", + "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", + "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "controlID": "C-0044", + "creationTime": "", + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Writable hostPath mount", + "attributes": { + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ], + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" + ] + }, + "controlID": "C-0045", + "creationTime": "", + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } + }, + { + "guid": "", + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "categories": [ + "Privilege Escalation (Node)" + ], + "attackTrack": "workload-external-track" + } + ] + }, + "controlID": "C-0046", + "creationTime": "", + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "HostPath mount", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ], + "microsoftMitreColumns": [ + "Privilege escalation" + ] + }, + "controlID": "C-0048", + "creationTime": "", + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } + }, + { + "guid": "", + "name": "Network mapping", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0049", + "creationTime": "", + "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", + "remediation": "Define network policies or use similar network protection mechanisms.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Instance Metadata API", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0052", + "creationTime": "", + "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", + "remediation": "Disable metadata services for pods in cloud provider settings.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Access container service account", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Container service account mapping", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "controlID": "C-0053", + "creationTime": "", + "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", + "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Cluster internal networking", + "attributes": { + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0054", + "creationTime": "", + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Linux hardening", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0055", + "creationTime": "", + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Configured liveness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0056", + "creationTime": "", + "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", + "remediation": "Ensure Liveness probes are configured wherever possible.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "controlID": "C-0057", + "creationTime": "", + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0058", + "creationTime": "", + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0059", + "creationTime": "", + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Pods in default namespace", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "controlID": "C-0061", + "creationTime": "", + "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", + "remediation": "Create necessary namespaces and move all the pods from default namespace there.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Sudo in container entrypoint", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0062", + "creationTime": "", + "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", + "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Portforwarding privileges", + "attributes": { + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "controlID": "C-0063", + "creationTime": "", + "description": "Attackers with relevant RBAC permission can use “kubectl portforward” command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit “kubectl portforward” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "No impersonation", + "attributes": { + "rbacQuery": "Impersonation", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0065", + "creationTime": "", + "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0066", + "creationTime": "", + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0067", + "creationTime": "", + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "PSP enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0068", + "creationTime": "", + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "rules": [], + "baseScore": 1, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0069", + "creationTime": "", + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "rules": [], + "baseScore": 10, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0070", + "creationTime": "", + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "rules": [], + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Naked pods", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0073", + "creationTime": "", + "description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", + "remediation": "Create necessary Deployment object for every pod making any pod a first class citizen in your IaC architecture.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Container runtime socket mounted", + "attributes": { + "controlTypeTags": [ + "devops", + "smartRemediation" + ] + }, + "controlID": "C-0074", + "creationTime": "", + "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", + "remediation": "Remove container runtime socket mount request or define an exception.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Image pull policy on latest tag", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0075", + "creationTime": "", + "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always.", + "remediation": "Set ImagePullPolicy to Always in all pods found by this control.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Label usage for resources", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0076", + "creationTime": "", + "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "K8s common labels usage", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0077", + "creationTime": "", + "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0078", + "creationTime": "", + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "CVE-2022-0185-linux-kernel-container-escape", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0079", + "creationTime": "", + "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", + "remediation": "Patch Linux kernel version to 5.16.2 or above", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CVE-2022-24348-argocddirtraversal", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0081", + "creationTime": "", + "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", + "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CVE-2022-23648-containerd-fs-escape", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0087", + "creationTime": "", + "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using specially-crafted manifests", + "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "RBAC enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0088", + "creationTime": "", + "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CVE-2022-39328-grafana-auth-bypass", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0090", + "creationTime": "", + "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", + "remediation": "Update your Grafana to 9.2.4 or above", + "rules": [], + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CVE-2022-47633-kyverno-signature-bypass", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0091", + "creationTime": "", + "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", + "remediation": "Update your Grafana to 9.2.4 or above", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Anonymous access enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0262", + "creationTime": "", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "Authenticated user has sensitive permissions", + "controlID": "C-0265", + "creationTime": "", + "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "categories": [ + "Denial of service" + ], + "attackTrack": "service-destruction" + } + ] + }, + "controlID": "C-0270", + "creationTime": "", + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "controlID": "C-0271", + "creationTime": "", + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + } + ], + "controlsIDs": [ + "C-0002", + "C-0005", + "C-0007", + "C-0012", + "C-0013", + "C-0014", + "C-0015", + "C-0016", + "C-0017", + "C-0018", + "C-0020", + "C-0021", + "C-0026", + "C-0030", + "C-0031", + "C-0034", + "C-0035", + "C-0036", + "C-0038", + "C-0039", + "C-0041", + "C-0042", + "C-0044", + "C-0045", + "C-0046", + "C-0048", + "C-0049", + "C-0052", + "C-0053", + "C-0054", + "C-0055", + "C-0056", + "C-0057", + "C-0058", + "C-0059", "C-0061", "C-0062", "C-0063", @@ -1358,1603 +6984,2598 @@ "C-0068", "C-0069", "C-0070", + "C-0073", + "C-0074", + "C-0075", + "C-0076", + "C-0077", "C-0078", "C-0079", "C-0081", - "C-0083", - "C-0084", - "C-0085", - "C-0086", "C-0087", - "C-0089", - "C-0091" - ] + "C-0088", + "C-0090", + "C-0091", + "C-0262", + "C-0265", + "C-0270", + "C-0271" + ], + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } }, { "guid": "", - "name": "cis-v1.23-t1.0.1", + "name": "cis-v1.12.0", "attributes": { - "version": "v1.0.1", - "armoBuiltin": true + "version": "v1.12.0", + "builtin": true }, "creationTime": "", - "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/8973", + "description": "CIS Kubernetes Benchmark v1.12.0 - https://workbench.cisecurity.org/benchmarks", + "typeTags": [ + "compliance" + ], "controls": [ { "guid": "", - "name": "CIS-1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "name": "CIS-5.2.5 Minimize the admission of containers wishing to share the host network namespace", "attributes": { - "armoBuiltin": true + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "categories": [ + "Lateral Movement (Network)" + ], + "attackTrack": "workload-external-track" + } + ] }, + "controlID": "C-0041", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Network", + "id": "Cat-4" + } + } + }, + { + "guid": "", + "name": "CIS-1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive", "controlID": "C-0092", "creationTime": "", "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.2 Ensure that the API server pod specification file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0093", "creationTime": "", "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0094", "creationTime": "", "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0095", "creationTime": "", "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0096", "creationTime": "", "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0097", "creationTime": "", "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0098", "creationTime": "", "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.8 Ensure that the etcd pod specification file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0099", "creationTime": "", "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0100", "creationTime": "", "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } }, { "guid": "", "name": "CIS-1.1.10 Ensure that the Container Network Interface file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0101", "creationTime": "", "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } }, { "guid": "", "name": "CIS-1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0102", "creationTime": "", "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0103", "creationTime": "", "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.1.13 Ensure that the admin.conf file permissions are set to 600", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.1.13 Ensure that the default administrative credential file permissions are set to 600", "controlID": "C-0104", "creationTime": "", - "description": "Ensure that the `admin.conf` file has permissions of `600`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", + "description": "Ensure that the `admin.conf` file (and `super-admin.conf` file, where it exists) have permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```\n On Kubernetes 1.29+ the `super-admin.conf` file should also be modified, if present. For example,\n\n \n```\nchmod 600 /etc/kubernetes/super-admin.conf\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.1.14 Ensure that the admin.conf file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.1.14 Ensure that the default administrative credential file ownership is set to root:root", "controlID": "C-0105", "creationTime": "", - "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "description": "Ensure that the `admin.conf` (and `super-admin.conf` file, where it exists) file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```\n On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present. For example,\n\n \n```\nchown root:root /etc/kubernetes/super-admin.conf\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0106", "creationTime": "", "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.16 Ensure that the scheduler.conf file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0107", "creationTime": "", "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0108", "creationTime": "", "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0109", "creationTime": "", "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0110", "creationTime": "", "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive", "controlID": "C-0111", "creationTime": "", - "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "description": "Ensure that Kubernetes PKI certificate files have permissions of `644` or more restrictive.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0112", "creationTime": "", "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.1 Ensure that the API Server --anonymous-auth argument is set to false", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.1 Ensure that the --anonymous-auth argument is set to false", "controlID": "C-0113", "creationTime": "", "description": "Disable anonymous requests to the API server.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.2 Ensure that the API Server --token-auth-file parameter is not set", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.2 Ensure that the --token-auth-file parameter is not set", "controlID": "C-0114", "creationTime": "", "description": "Do not use token based authentication.", "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.3 Ensure that the API Server --DenyServiceExternalIPs is not set", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0115", - "creationTime": "", - "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "CIS-1.2.4 Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.4 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", "controlID": "C-0116", "creationTime": "", "description": "Enable certificate based kubelet authentication.", "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.5 Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.5 Ensure that the --kubelet-certificate-authority argument is set as appropriate", "controlID": "C-0117", "creationTime": "", "description": "Verify kubelet's certificate before establishing connection.", "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.6 Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.6 Ensure that the --authorization-mode argument is not set to AlwaysAllow", "controlID": "C-0118", "creationTime": "", "description": "Do not always authorize all requests.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.7 Ensure that the API Server --authorization-mode argument includes Node", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.7 Ensure that the --authorization-mode argument includes Node", "controlID": "C-0119", "creationTime": "", "description": "Restrict kubelet nodes to reading only objects associated with them.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.8 Ensure that the API Server --authorization-mode argument includes RBAC", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.8 Ensure that the --authorization-mode argument includes RBAC", "controlID": "C-0120", "creationTime": "", "description": "Turn on Role Based Access Control.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.2.9 Ensure that the admission control plugin EventRateLimit is set", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0121", "creationTime": "", "description": "Limit the rate at which the API server accepts requests.", "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0122", "creationTime": "", "description": "Do not allow all requests.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-1.2.11 Ensure that the admission control plugin AlwaysPullImages is set", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0123", "creationTime": "", "description": "Always pull images.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0124", - "creationTime": "", - "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "CIS-1.2.13 Ensure that the admission control plugin ServiceAccount is set", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.12 Ensure that the admission control plugin ServiceAccount is set", "controlID": "C-0125", "creationTime": "", "description": "Automate service accounts management.", "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", "rules": [], - "baseScore": 3 + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.14 Ensure that the admission control plugin NamespaceLifecycle is set", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.13 Ensure that the admission control plugin NamespaceLifecycle is set", "controlID": "C-0126", "creationTime": "", "description": "Reject creating objects in a namespace that is undergoing termination.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", "rules": [], - "baseScore": 3 + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.15 Ensure that the admission control plugin NodeRestriction is set", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.14 Ensure that the admission control plugin NodeRestriction is set", "controlID": "C-0127", "creationTime": "", "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.16 Ensure that the API Server --secure-port argument is not set to 0", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0128", - "creationTime": "", - "description": "Do not disable the secure port.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "CIS-1.2.17 Ensure that the API Server --profiling argument is set to false", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.15 Ensure that the --profiling argument is set to false", "controlID": "C-0129", "creationTime": "", "description": "Disable profiling, if not needed.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", "rules": [], - "baseScore": 3 + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.18 Ensure that the API Server --audit-log-path argument is set", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.16 Ensure that the --audit-log-path argument is set", "controlID": "C-0130", "creationTime": "", "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.19 Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.17 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate", "controlID": "C-0131", "creationTime": "", "description": "Retain the logs for at least 30 days or as appropriate.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.20 Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.18 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate", "controlID": "C-0132", "creationTime": "", "description": "Retain 10 or an appropriate number of old log files.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.21 Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.19 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate", "controlID": "C-0133", "creationTime": "", "description": "Rotate log files on reaching 100 MB or as appropriate.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.22 Ensure that the API Server --request-timeout argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.20 Ensure that the --request-timeout argument is set as appropriate", "controlID": "C-0134", "creationTime": "", "description": "Set global request timeout for API server requests as appropriate.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.23 Ensure that the API Server --service-account-lookup argument is set to true", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.21 Ensure that the --service-account-lookup argument is set to true", "controlID": "C-0135", "creationTime": "", "description": "Validate service account before validating token.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.24 Ensure that the API Server --service-account-key-file argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.22 Ensure that the --service-account-key-file argument is set as appropriate", "controlID": "C-0136", "creationTime": "", "description": "Explicitly set a service account public key file for service accounts on the apiserver.", "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.25 Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.23 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate", "controlID": "C-0137", "creationTime": "", "description": "etcd should be configured to make use of TLS encryption for client connections.", "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.26 Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.24 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", "controlID": "C-0138", "creationTime": "", "description": "Setup TLS connection on the API server.", "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.27 Ensure that the API Server --client-ca-file argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.25 Ensure that the --client-ca-file argument is set as appropriate", "controlID": "C-0139", "creationTime": "", "description": "Setup TLS connection on the API server.", "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.28 Ensure that the API Server --etcd-cafile argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.26 Ensure that the --etcd-cafile argument is set as appropriate", "controlID": "C-0140", "creationTime": "", "description": "etcd should be configured to make use of TLS encryption for client connections.", "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.29 Ensure that the API Server --encryption-provider-config argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.27 Ensure that the --encryption-provider-config argument is set as appropriate", "controlID": "C-0141", "creationTime": "", "description": "Encrypt etcd key-value store.", "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.30 Ensure that encryption providers are appropriately configured", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.2.28 Ensure that encryption providers are appropriately configured", "controlID": "C-0142", "creationTime": "", "description": "Where `etcd` encryption is used, appropriate providers should be configured.", "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.2.31 Ensure that the API Server only makes use of Strong Cryptographic Ciphers", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0143", - "creationTime": "", - "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", - "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "CIS-1.3.1 Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate", "controlID": "C-0144", "creationTime": "", "description": "Activate garbage collector on pod termination, as appropriate.", "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.3.2 Ensure that the Controller Manager --profiling argument is set to false", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.3.2 Ensure that the --profiling argument is set to false", "controlID": "C-0145", "creationTime": "", "description": "Disable profiling, if not needed.", "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", "rules": [], - "baseScore": 3 + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.3.3 Ensure that the Controller Manager --use-service-account-credentials argument is set to true", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.3.3 Ensure that the --use-service-account-credentials argument is set to true", "controlID": "C-0146", "creationTime": "", "description": "Use individual service account credentials for each controller.", "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.3.4 Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate", "controlID": "C-0147", "creationTime": "", "description": "Explicitly set a service account private key file for service accounts on the controller manager.", "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.3.5 Ensure that the Controller Manager --root-ca-file argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.3.5 Ensure that the --root-ca-file argument is set as appropriate", "controlID": "C-0148", "creationTime": "", "description": "Allow pods to verify the API server's serving certificate before establishing connections.", "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.3.6 Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true", "controlID": "C-0149", "creationTime": "", "description": "Enable kubelet server certificate rotation on controller-manager.", "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.3.7 Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1", "controlID": "C-0150", "creationTime": "", "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.4.1 Ensure that the Scheduler --profiling argument is set to false", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.4.1 Ensure that the --profiling argument is set to false", "controlID": "C-0151", "creationTime": "", "description": "Disable profiling, if not needed.", "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", "rules": [], - "baseScore": 3 + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-1.4.2 Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1", "controlID": "C-0152", "creationTime": "", "description": "Do not bind the scheduler service to non-loopback insecure addresses.", "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0153", "creationTime": "", "description": "Configure TLS encryption for the etcd service.", "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-2.2 Ensure that the --client-cert-auth argument is set to true", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0154", "creationTime": "", "description": "Enable client authentication on etcd service.", "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-2.3 Ensure that the --auto-tls argument is not set to true", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0155", "creationTime": "", "description": "Do not use self-signed certificates for TLS.", "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0156", "creationTime": "", "description": "etcd should be configured to make use of TLS encryption for peer connections.", "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-2.5 Ensure that the --peer-client-cert-auth argument is set to true", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0157", "creationTime": "", "description": "etcd should be configured for peer authentication.", "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-2.6 Ensure that the --peer-auto-tls argument is not set to true", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0158", "creationTime": "", "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-2.7 Ensure that a unique Certificate Authority is used for etcd", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0159", "creationTime": "", "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-3.2.1 Ensure that a minimal audit policy is created", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0160", "creationTime": "", "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", "remediation": "Create an audit policy file for your cluster.", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-3.2.2 Ensure that the audit policy covers key security concerns", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0161", "creationTime": "", "description": "Ensure that the audit policy created for the cluster covers key security concerns.", "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0162", "creationTime": "", "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.1.2 Ensure that the kubelet service file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0163", "creationTime": "", "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0164", "creationTime": "", "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0165", "creationTime": "", "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0166", "creationTime": "", "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0167", "creationTime": "", "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive", "controlID": "C-0168", "creationTime": "", - "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "description": "Ensure that the certificate authorities file has permissions of `644` or more restrictive.", "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.1.8 Ensure that the client certificate authorities file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0169", "creationTime": "", "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.1.9 If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0170", "creationTime": "", "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.1.10 If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0171", "creationTime": "", "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.2.1 Ensure that the --anonymous-auth argument is set to false", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0172", "creationTime": "", "description": "Disable anonymous requests to the Kubelet server.", "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0173", "creationTime": "", "description": "Do not allow all requests. Enable explicit authorization.", "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.2.3 Ensure that the --client-ca-file argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0174", "creationTime": "", "description": "Enable Kubelet authentication using certificates.", "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.2.4 Verify that the --read-only-port argument is set to 0", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0175", "creationTime": "", "description": "Disable the read-only port.", "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0176", "creationTime": "", "description": "Do not disable timeouts on streaming connections.", "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", "rules": [], - "baseScore": 3 + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-4.2.6 Ensure that the --protect-kernel-defaults argument is set to true", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0177", - "creationTime": "", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "rules": [], - "baseScore": 2 - }, - { - "guid": "", - "name": "CIS-4.2.7 Ensure that the --make-iptables-util-chains argument is set to true", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-4.2.6 Ensure that the --make-iptables-util-chains argument is set to true", "controlID": "C-0178", "creationTime": "", "description": "Allow Kubelet to manage iptables.", "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", "rules": [], - "baseScore": 3 + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-4.2.8 Ensure that the --hostname-override argument is not set", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-4.2.7 Ensure that the --hostname-override argument is not set", "controlID": "C-0179", "creationTime": "", "description": "Do not override node hostnames.", "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", "rules": [], - "baseScore": 3 + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-4.2.8 Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture", "controlID": "C-0180", "creationTime": "", - "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "description": "Security relevant information should be captured. The eventRecordQPS on the Kubelet configuration can be used to limit the rate at which events are gathered and sets the maximum event creations per second. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", "rules": [], - "baseScore": 2 + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-4.2.9 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", "controlID": "C-0181", "creationTime": "", "description": "Setup TLS connection on the Kubelets.", "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-4.2.11 Ensure that the --rotate-certificates argument is not set to false", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-4.2.10 Ensure that the --rotate-certificates argument is not set to false", "controlID": "C-0182", "creationTime": "", "description": "Enable kubelet client certificate rotation.", - "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable or set --rotate-certificates=true .\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-4.2.11 Verify that the RotateKubeletServerCertificate argument is set to true", "controlID": "C-0183", "creationTime": "", "description": "Enable kubelet server certificate rotation.", "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "CIS-4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-4.2.12 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", "controlID": "C-0184", "creationTime": "", "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-5.1.1 Ensure that the cluster-admin role is only used where required", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0185", "creationTime": "", "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", "name": "CIS-5.1.2 Minimize access to secrets", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0186", "creationTime": "", "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", "name": "CIS-5.1.3 Minimize wildcard use in Roles and ClusterRoles", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0187", "creationTime": "", "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", "name": "CIS-5.1.4 Minimize access to create pods", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0188", "creationTime": "", "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", - "name": "CIS-5.1.5 Ensure that default service accounts are not actively used", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-5.1.5 Ensure that default service accounts are not actively used.", "controlID": "C-0189", "creationTime": "", "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CIS-5.1.6 Ensure that Service Account Tokens are only mounted where necessary", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0190", "creationTime": "", "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CIS-5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0191", "creationTime": "", "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", "name": "CIS-5.2.1 Ensure that the cluster has at least one active policy control mechanism in place", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0192", "creationTime": "", "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } }, { "guid": "", "name": "CIS-5.2.2 Minimize the admission of privileged containers", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0193", "creationTime": "", "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "CIS-5.2.3 Minimize the admission of containers wishing to share the host process ID namespace", - "attributes": { - "armoBuiltin": true + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] }, - "controlID": "C-0194", - "creationTime": "", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "CIS-5.2.4 Minimize the admission of containers wishing to share the host IPC namespace", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0195", - "creationTime": "", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "CIS-5.2.5 Minimize the admission of containers wishing to share the host network namespace", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0196", - "creationTime": "", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", - "rules": [], - "baseScore": 5 + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CIS-5.2.6 Minimize the admission of containers with allowPrivilegeEscalation", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0197", "creationTime": "", "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `securityContext: allowPrivilegeEscalation: true`", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CIS-5.2.7 Minimize the admission of root containers", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0198", "creationTime": "", "description": "Do not generally permit containers to be run as the root user.", "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", - "name": "CIS-5.2.8 Minimize the admission of containers with the NET_RAW capability", - "attributes": { - "armoBuiltin": true - }, + "name": "CIS-5.2.8 Minimize the admission of containers with the NET\\_RAW capability", "controlID": "C-0199", "creationTime": "", "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CIS-5.2.9 Minimize the admission of containers with added capabilities", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0200", "creationTime": "", "description": "Do not generally permit containers with capabilities assigned beyond the default set.", "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CIS-5.2.10 Minimize the admission of containers with capabilities assigned", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0201", "creationTime": "", "description": "Do not generally permit containers with capabilities", - "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CIS-5.2.11 Minimize the admission of Windows HostProcess Containers", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0202", "creationTime": "", "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CIS-5.2.12 Minimize the admission of HostPath volumes", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0203", "creationTime": "", "description": "Do not generally admit containers which make use of `hostPath` volumes.", "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CIS-5.2.13 Minimize the admission of containers which use HostPorts", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0204", "creationTime": "", "description": "Do not generally permit containers which require the use of HostPorts.", "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CIS-5.3.1 Ensure that the CNI in use supports Network Policies", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0205", "creationTime": "", "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } }, { "guid": "", "name": "CIS-5.3.2 Ensure that all Namespaces have Network Policies defined", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0206", "creationTime": "", "description": "Use network policies to isolate traffic in your cluster network.", "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } }, { "guid": "", "name": "CIS-5.4.1 Prefer using secrets as files over secrets as environment variables", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0207", "creationTime": "", "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + } + } }, { "guid": "", "name": "CIS-5.4.2 Consider external secret storage", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0208", "creationTime": "", "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CIS-5.7.1 Create administrative boundaries between resources using namespaces", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0209", "creationTime": "", "description": "Use namespaces to isolate your Kubernetes objects.", "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CIS-5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0210", "creationTime": "", "description": "Enable `docker/default` seccomp profile in your pod definitions.", "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CIS-5.7.3 Apply Security Context to Your Pods and Containers", "attributes": { - "armoBuiltin": true + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] }, "controlID": "C-0211", "creationTime": "", "description": "Apply Security Context to Your Pods and Containers", "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CIS-5.7.4 The default namespace should not be used", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0212", "creationTime": "", "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.1.7 Avoid use of system:masters group", + "attributes": { + "actionRequired": "manual review" + }, + "controlID": "C-0246", + "creationTime": "", + "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", + "remediation": "Remove the `system:masters` group from all users in the cluster.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "CIS-5.2.3 Minimize the admission of containers wishing to share the host process ID namespace", + "controlID": "C-0275", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "remediation": "Configure the Admission Controller to restrict the admission of `hostPID` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.2.4 Minimize the admission of containers wishing to share the host IPC namespace", + "controlID": "C-0276", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-1.2.29 Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "controlID": "C-0277", + "creationTime": "", + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256.\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-5.1.9 Minimize access to create persistent volumes", + "controlID": "C-0278", + "creationTime": "", + "description": "The ability to create persistent volumes in a cluster can provide an opportunity for privilege escalation, via the creation of `hostPath` volumes. As persistent volumes are not covered by Pod Security Admission, a user with access to create persistent volumes may be able to get access to sensitive files from the underlying host even where restrictive Pod Security Admission policies are in place.", + "remediation": "Where possible, remove `create` access to `PersistentVolume` objects in the cluster.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-5.1.10 Minimize access to the proxy sub-resource of nodes", + "controlID": "C-0279", + "creationTime": "", + "description": "Users with access to the `Proxy` sub-resource of `Node` objects automatically have permissions to use the Kubelet API, which may allow for privilege escalation or bypass cluster security controls such as audit logs.\n\n The Kubelet provides an API which includes rights to execute commands in any container running on the node. Access to this API is covered by permissions to the main Kubernetes API via the `node` object. The proxy sub-resource specifically allows wide ranging access to the Kubelet API.\n\n Direct access to the Kubelet API bypasses controls like audit logging (there is no audit log of Kubelet API access) and admission control.", + "remediation": "Where possible, remove access to the `proxy` sub-resource of `node` objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-5.1.11 Minimize access to the approval sub-resource of certificatesigningrequests objects", + "controlID": "C-0280", + "creationTime": "", + "description": "Users with access to the update the `approval` sub-resource of `certificatesigningrequests` objects can approve new client certificates for the Kubernetes API effectively allowing them to create new high-privileged user accounts.\n\n This can allow for privilege escalation to full cluster administrator, depending on users configured in the cluster", + "remediation": "Where possible, remove access to the `approval` sub-resource of `certificatesigningrequests` objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-5.1.12 Minimize access to webhook configuration objects", + "controlID": "C-0281", + "creationTime": "", + "description": "Users with rights to create/modify/delete `validatingwebhookconfigurations` or `mutatingwebhookconfigurations` can control webhooks that can read any object admitted to the cluster, and in the case of mutating webhooks, also mutate admitted objects. This could allow for privilege escalation or disruption of the operation of the cluster.", + "remediation": "Where possible, remove access to the `validatingwebhookconfigurations` or `mutatingwebhookconfigurations` objects", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-5.1.13 Minimize access to the service account token creation", + "controlID": "C-0282", + "creationTime": "", + "description": "Users with rights to create new service account tokens at a cluster level, can create long-lived privileged credentials in the cluster. This could allow for privilege escalation and persistent access to the cluster, even if the users account has been revoked.", + "remediation": "Where possible, remove access to the `token` sub-resource of `serviceaccount` objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-1.2.3 Ensure that the DenyServiceExternalIPs is set", + "controlID": "C-0283", + "creationTime": "", + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the master node and append the Kubernetes API server flag --enable-admission-plugins with the DenyServiceExternalIPs plugin. Note, the Kubernetes API server flag --enable-admission-plugins takes a comma-delimited list of admission control plugins to be enabled, even if they are in the list of plugins enabled by default.\n\n kube-apiserver --enable-admission-plugins=DenyServiceExternalIPs", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.13 Ensure that a limit is set on pod PIDs", + "controlID": "C-0284", + "creationTime": "", + "description": "Ensure that the Kubelet sets limits on the number of PIDs that can be created by pods running on the node.", + "remediation": "Decide on an appropriate level for this parameter and set it, either via the `--pod-max-pids` command line parameter or the `PodPidsLimit` configuration file setting.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.1.1 Client certificate authentication should not be used for users", + "attributes": { + "actionRequired": "manual review" + }, + "id": "CIS-3.1.1", + "controlID": "C-0286", + "creationTime": "", + "description": "Kubernetes provides the option to use client certificates for user authentication. However as there is no way to revoke these certificates when a user leaves an organization or loses their credential, they are not suitable for this purpose.\n\n It is not possible to fully disable client certificate use within a cluster as it is used for component to component authentication.", + "remediation": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of client certificates.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "CIS-3.1.2 Service account token authentication should not be used for users", + "attributes": { + "actionRequired": "manual review" + }, + "id": "CIS-3.1.2", + "controlID": "C-0287", + "creationTime": "", + "description": "Kubernetes provides service account tokens which are intended for use by workloads running in the Kubernetes cluster, for authentication to the API server.\n\n These tokens are not designed for use by end-users and do not provide for features such as revocation or expiry, making them insecure. A newer version of the feature (Bound service account token volumes) does introduce expiry but still does not allow for specific revocation.", + "remediation": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of service account tokens.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "CIS-3.1.3 Bootstrap token authentication should not be used for users", + "attributes": { + "actionRequired": "manual review" + }, + "id": "CIS-3.1.3", + "controlID": "C-0288", + "creationTime": "", + "description": "Kubernetes provides bootstrap tokens which are intended for use by new nodes joining the cluster\n\n These tokens are not designed for use by end-users they are specifically designed for the purpose of bootstrapping new nodes and not for general authentication", + "remediation": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of bootstrap tokens.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "CIS-5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller", + "attributes": { + "actionRequired": "manual review" + }, + "id": "CIS-5.5.1", + "controlID": "C-0289", + "creationTime": "", + "description": "Configure Image Provenance for your deployment.", + "remediation": "Follow the Kubernetes documentation and setup image provenance.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "CIS-1.2.30 Ensure that the --service-account-extend-token-expiration parameter is set to false", + "controlID": "C-0290", + "creationTime": "", + "description": "By default Kubernetes extends service account token lifetimes to one year. This should be set to false for security.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n```\n--service-account-extend-token-expiration=false\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.3.1 Ensure that the kube-proxy metrics service is bound to localhost", + "controlID": "C-0291", + "creationTime": "", + "description": "Do not bind the kube-proxy metrics port to non-loopback addresses.", + "remediation": "If running kube-proxy with a configuration file, edit the kube-proxy configuration file and set the metricsBindAddress to `127.0.0.1:10249`.\n\nIf running kube-proxy with command line arguments, set `--metrics-bind-address=127.0.0.1:10249`.\n\nRestart kube-proxy for changes to take effect.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } } ], "controlsIDs": [ + "C-0041", "C-0092", "C-0093", "C-0094", @@ -2978,7 +9599,6 @@ "C-0112", "C-0113", "C-0114", - "C-0115", "C-0116", "C-0117", "C-0118", @@ -2987,11 +9607,9 @@ "C-0121", "C-0122", "C-0123", - "C-0124", "C-0125", "C-0126", "C-0127", - "C-0128", "C-0129", "C-0130", "C-0131", @@ -3006,7 +9624,6 @@ "C-0140", "C-0141", "C-0142", - "C-0143", "C-0144", "C-0145", "C-0146", @@ -3040,7 +9657,6 @@ "C-0174", "C-0175", "C-0176", - "C-0177", "C-0178", "C-0179", "C-0180", @@ -3057,9 +9673,6 @@ "C-0191", "C-0192", "C-0193", - "C-0194", - "C-0195", - "C-0196", "C-0197", "C-0198", "C-0199", @@ -3075,7 +9688,24 @@ "C-0209", "C-0210", "C-0211", - "C-0212" + "C-0212", + "C-0246", + "C-0275", + "C-0276", + "C-0277", + "C-0278", + "C-0279", + "C-0280", + "C-0281", + "C-0282", + "C-0283", + "C-0284", + "C-0286", + "C-0287", + "C-0288", + "C-0289", + "C-0290", + "C-0291" ], "subSections": { "1": { @@ -3083,6 +9713,4303 @@ "name": "Control Plane Components", "id": "1", "subSections": { + "1": { + "guid": "", + "name": "Control Plane Node Configuration Files", + "id": "1.1", + "controlsIDs": [ + "C-0092", + "C-0093", + "C-0094", + "C-0095", + "C-0096", + "C-0097", + "C-0098", + "C-0099", + "C-0100", + "C-0101", + "C-0102", + "C-0103", + "C-0104", + "C-0105", + "C-0106", + "C-0107", + "C-0108", + "C-0109", + "C-0110", + "C-0111", + "C-0112" + ] + }, + "2": { + "guid": "", + "name": "API Server", + "id": "1.2", + "controlsIDs": [ + "C-0113", + "C-0114", + "C-0116", + "C-0117", + "C-0118", + "C-0119", + "C-0120", + "C-0121", + "C-0122", + "C-0123", + "C-0125", + "C-0126", + "C-0127", + "C-0129", + "C-0130", + "C-0131", + "C-0132", + "C-0133", + "C-0134", + "C-0135", + "C-0136", + "C-0137", + "C-0138", + "C-0139", + "C-0140", + "C-0141", + "C-0142", + "C-0277", + "C-0283", + "C-0290" + ] + }, + "3": { + "guid": "", + "name": "Controller Manager", + "id": "1.3", + "controlsIDs": [ + "C-0144", + "C-0145", + "C-0146", + "C-0147", + "C-0148", + "C-0149", + "C-0150" + ] + }, + "4": { + "guid": "", + "name": "Scheduler", + "id": "1.4", + "controlsIDs": [ + "C-0151", + "C-0152" + ] + } + } + }, + "2": { + "guid": "", + "name": "etcd", + "id": "2", + "controlsIDs": [ + "C-0153", + "C-0154", + "C-0155", + "C-0156", + "C-0157", + "C-0158", + "C-0159" + ] + }, + "3": { + "guid": "", + "name": "Control Plane Configuration", + "id": "3", + "subSections": { + "1": { + "guid": "", + "name": "Authentication and Authorization", + "id": "3.1", + "controlsIDs": [ + "C-0286", + "C-0287", + "C-0288" + ] + }, + "2": { + "guid": "", + "name": "Logging", + "id": "3.2", + "controlsIDs": [ + "C-0160", + "C-0161" + ] + } + } + }, + "4": { + "guid": "", + "name": "Worker Nodes", + "id": "4", + "subSections": { + "1": { + "guid": "", + "name": "Worker Node Configuration Files", + "id": "4.1", + "controlsIDs": [ + "C-0162", + "C-0163", + "C-0164", + "C-0165", + "C-0166", + "C-0167", + "C-0168", + "C-0169", + "C-0170", + "C-0171" + ] + }, + "2": { + "guid": "", + "name": "Kubelet", + "id": "4.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0182", + "C-0183", + "C-0184", + "C-0284" + ] + }, + "3": { + "guid": "", + "name": "kube-proxy", + "id": "4.3", + "controlsIDs": [ + "C-0291" + ] + } + } + }, + "5": { + "guid": "", + "name": "Policies", + "id": "5", + "subSections": { + "2": { + "guid": "", + "name": "Pod Security Standards", + "id": "5.2", + "controlsIDs": [ + "C-0041", + "C-0192", + "C-0193", + "C-0197", + "C-0198", + "C-0199", + "C-0200", + "C-0201", + "C-0202", + "C-0203", + "C-0204", + "C-0275", + "C-0276" + ] + }, + "3": { + "guid": "", + "name": "Network Policies and CNI", + "id": "5.3", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "4": { + "guid": "", + "name": "Secrets Management", + "id": "5.4", + "controlsIDs": [ + "C-0207", + "C-0208" + ] + }, + "5": { + "guid": "", + "name": "Extensible Admission Control", + "id": "5.5", + "controlsIDs": [ + "C-0289" + ] + }, + "7": { + "guid": "", + "name": "General Policies", + "id": "5.7", + "controlsIDs": [ + "C-0209", + "C-0210", + "C-0211", + "C-0212" + ] + }, + "1": { + "guid": "", + "name": "RBAC and Service Accounts", + "id": "5.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0246", + "C-0191", + "C-0278", + "C-0279", + "C-0280", + "C-0281", + "C-0282" + ] + } + } + } + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "ArmoBest", + "attributes": { + "builtin": true + }, + "creationTime": "", + "description": "", + "typeTags": [ + "compliance" + ], + "controls": [ + { + "guid": "", + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "controlID": "C-0002", + "creationTime": "", + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using “kubectl exec” command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit “kubectl exec” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0005", + "creationTime": "", + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "rules": [], + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "controlID": "C-0012", + "creationTime": "", + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } + }, + { + "guid": "", + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0013", + "creationTime": "", + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "controlID": "C-0016", + "creationTime": "", + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "controlID": "C-0017", + "creationTime": "", + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Ingress and Egress blocked", + "attributes": { + "controlTypeTags": [ + "compliance" + ] + }, + "controlID": "C-0030", + "creationTime": "", + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "controlID": "C-0034", + "creationTime": "", + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } + }, + { + "guid": "", + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0035", + "creationTime": "", + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0038", + "creationTime": "", + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "controlID": "C-0041", + "creationTime": "", + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Network", + "id": "Cat-4" + } + } + }, + { + "guid": "", + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "controlID": "C-0044", + "creationTime": "", + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Insecure capabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "controlID": "C-0046", + "creationTime": "", + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Network mapping", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0049", + "creationTime": "", + "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", + "remediation": "Define network policies or use similar network protection mechanisms.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Cluster internal networking", + "attributes": { + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0054", + "creationTime": "", + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Linux hardening", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0055", + "creationTime": "", + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "controlID": "C-0057", + "creationTime": "", + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0058", + "creationTime": "", + "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0059", + "creationTime": "", + "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", + "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Pods in default namespace", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "controlID": "C-0061", + "creationTime": "", + "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", + "remediation": "Create necessary namespaces and move all the pods from default namespace there.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Sudo in container entrypoint", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0062", + "creationTime": "", + "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", + "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Portforwarding privileges", + "attributes": { + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "controlID": "C-0063", + "creationTime": "", + "description": "Attackers with relevant RBAC permission can use “kubectl portforward” command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit “kubectl portforward” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "No impersonation", + "attributes": { + "rbacQuery": "Impersonation", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0065", + "creationTime": "", + "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0066", + "creationTime": "", + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0067", + "creationTime": "", + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "PSP enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0068", + "creationTime": "", + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "rules": [], + "baseScore": 1, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0069", + "creationTime": "", + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "rules": [], + "baseScore": 10, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0070", + "creationTime": "", + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "rules": [], + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0078", + "creationTime": "", + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "CVE-2022-0185-linux-kernel-container-escape", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0079", + "creationTime": "", + "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", + "remediation": "Patch Linux kernel version to 5.16.2 or above", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CVE-2022-24348-argocddirtraversal", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0081", + "creationTime": "", + "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", + "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CVE-2022-23648-containerd-fs-escape", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0087", + "creationTime": "", + "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using specially-crafted manifests", + "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CVE-2022-3172-aggregated-API-server-redirect", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [] + }, + "controlID": "C-0089", + "creationTime": "", + "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CVE-2022-47633-kyverno-signature-bypass", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0091", + "creationTime": "", + "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", + "remediation": "Update your Grafana to 9.2.4 or above", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Verify image signature", + "attributes": { + "actionRequired": "configuration" + }, + "controlID": "C-0236", + "creationTime": "", + "description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "Check if signature exists", + "controlID": "C-0237", + "creationTime": "", + "description": "Ensures that all images contain some signature", + "remediation": "Replace the image with a signed image", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "controlID": "C-0270", + "creationTime": "", + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "categories": [ + "Denial of service" + ], + "attackTrack": "service-destruction" + } + ] + }, + "controlID": "C-0271", + "creationTime": "", + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + } + ], + "controlsIDs": [ + "C-0002", + "C-0005", + "C-0012", + "C-0013", + "C-0016", + "C-0017", + "C-0030", + "C-0034", + "C-0035", + "C-0038", + "C-0041", + "C-0044", + "C-0046", + "C-0049", + "C-0054", + "C-0055", + "C-0057", + "C-0058", + "C-0059", + "C-0061", + "C-0062", + "C-0063", + "C-0065", + "C-0066", + "C-0067", + "C-0068", + "C-0069", + "C-0070", + "C-0078", + "C-0079", + "C-0081", + "C-0087", + "C-0089", + "C-0091", + "C-0236", + "C-0237", + "C-0270", + "C-0271" + ], + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "cis-v1.10.0", + "attributes": { + "version": "v1.10.0", + "builtin": true + }, + "creationTime": "", + "description": "Testing CIS for Kubernetes as suggested by CIS in https://workbench.cisecurity.org/benchmarks/17568", + "typeTags": [ + "compliance" + ], + "controls": [ + { + "guid": "", + "name": "CIS-5.2.5 Minimize the admission of containers wishing to share the host network namespace", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "controlID": "C-0041", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Network", + "id": "Cat-4" + } + } + }, + { + "guid": "", + "name": "CIS-1.1.1 Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "controlID": "C-0092", + "creationTime": "", + "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.2 Ensure that the API server pod specification file ownership is set to root:root", + "controlID": "C-0093", + "creationTime": "", + "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.3 Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", + "controlID": "C-0094", + "creationTime": "", + "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.4 Ensure that the controller manager pod specification file ownership is set to root:root", + "controlID": "C-0095", + "creationTime": "", + "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.5 Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", + "controlID": "C-0096", + "creationTime": "", + "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.6 Ensure that the scheduler pod specification file ownership is set to root:root", + "controlID": "C-0097", + "creationTime": "", + "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.7 Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", + "controlID": "C-0098", + "creationTime": "", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.8 Ensure that the etcd pod specification file ownership is set to root:root", + "controlID": "C-0099", + "creationTime": "", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.9 Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", + "controlID": "C-0100", + "creationTime": "", + "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "CIS-1.1.10 Ensure that the Container Network Interface file ownership is set to root:root", + "controlID": "C-0101", + "creationTime": "", + "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "CIS-1.1.11 Ensure that the etcd data directory permissions are set to 700 or more restrictive", + "controlID": "C-0102", + "creationTime": "", + "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.12 Ensure that the etcd data directory ownership is set to etcd:etcd", + "controlID": "C-0103", + "creationTime": "", + "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.13 Ensure that the default administrative credential file permissions are set to 600", + "controlID": "C-0104", + "creationTime": "", + "description": "Ensure that the `admin.conf` file (and `super-admin.conf` file, where it exists) have permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```\n On Kubernetes 1.29+ the `super-admin.conf` file should also be modified, if present. For example,\n\n \n```\nchmod 600 /etc/kubernetes/super-admin.conf\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.14 Ensure that the default administrative credential file ownership is set to root:root", + "controlID": "C-0105", + "creationTime": "", + "description": "Ensure that the `admin.conf` (and `super-admin.conf` file, where it exists) file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```\n On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present. For example,\n\n \n```\nchown root:root /etc/kubernetes/super-admin.conf\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.15 Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", + "controlID": "C-0106", + "creationTime": "", + "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.16 Ensure that the scheduler.conf file ownership is set to root:root", + "controlID": "C-0107", + "creationTime": "", + "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.17 Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", + "controlID": "C-0108", + "creationTime": "", + "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.18 Ensure that the controller-manager.conf file ownership is set to root:root", + "controlID": "C-0109", + "creationTime": "", + "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.19 Ensure that the Kubernetes PKI directory and file ownership is set to root:root", + "controlID": "C-0110", + "creationTime": "", + "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.20 Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", + "controlID": "C-0111", + "creationTime": "", + "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.1.21 Ensure that the Kubernetes PKI key file permissions are set to 600", + "controlID": "C-0112", + "creationTime": "", + "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.1 Ensure that the --anonymous-auth argument is set to false", + "controlID": "C-0113", + "creationTime": "", + "description": "Disable anonymous requests to the API server.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.2 Ensure that the --token-auth-file parameter is not set", + "controlID": "C-0114", + "creationTime": "", + "description": "Do not use token based authentication.", + "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.4 Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", + "controlID": "C-0116", + "creationTime": "", + "description": "Enable certificate based kubelet authentication.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.5 Ensure that the --kubelet-certificate-authority argument is set as appropriate", + "controlID": "C-0117", + "creationTime": "", + "description": "Verify kubelet's certificate before establishing connection.", + "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.6 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "controlID": "C-0118", + "creationTime": "", + "description": "Do not always authorize all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.7 Ensure that the --authorization-mode argument includes Node", + "controlID": "C-0119", + "creationTime": "", + "description": "Restrict kubelet nodes to reading only objects associated with them.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.8 Ensure that the --authorization-mode argument includes RBAC", + "controlID": "C-0120", + "creationTime": "", + "description": "Turn on Role Based Access Control.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.9 Ensure that the admission control plugin EventRateLimit is set", + "controlID": "C-0121", + "creationTime": "", + "description": "Limit the rate at which the API server accepts requests.", + "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.10 Ensure that the admission control plugin AlwaysAdmit is not set", + "controlID": "C-0122", + "creationTime": "", + "description": "Do not allow all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.11 Ensure that the admission control plugin AlwaysPullImages is set", + "controlID": "C-0123", + "creationTime": "", + "description": "Always pull images.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.12 Ensure that the admission control plugin ServiceAccount is set", + "controlID": "C-0125", + "creationTime": "", + "description": "Automate service accounts management.", + "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.13 Ensure that the admission control plugin NamespaceLifecycle is set", + "controlID": "C-0126", + "creationTime": "", + "description": "Reject creating objects in a namespace that is undergoing termination.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.14 Ensure that the admission control plugin NodeRestriction is set", + "controlID": "C-0127", + "creationTime": "", + "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.15 Ensure that the --profiling argument is set to false", + "controlID": "C-0129", + "creationTime": "", + "description": "Disable profiling, if not needed.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.16 Ensure that the --audit-log-path argument is set", + "controlID": "C-0130", + "creationTime": "", + "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.17 Ensure that the --audit-log-maxage argument is set to 30 or as appropriate", + "controlID": "C-0131", + "creationTime": "", + "description": "Retain the logs for at least 30 days or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.18 Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate", + "controlID": "C-0132", + "creationTime": "", + "description": "Retain 10 or an appropriate number of old log files.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.19 Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate", + "controlID": "C-0133", + "creationTime": "", + "description": "Rotate log files on reaching 100 MB or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.20 Ensure that the --request-timeout argument is set as appropriate", + "controlID": "C-0134", + "creationTime": "", + "description": "Set global request timeout for API server requests as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.21 Ensure that the --service-account-lookup argument is set to true", + "controlID": "C-0135", + "creationTime": "", + "description": "Validate service account before validating token.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.22 Ensure that the --service-account-key-file argument is set as appropriate", + "controlID": "C-0136", + "creationTime": "", + "description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.23 Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "controlID": "C-0137", + "creationTime": "", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.24 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "controlID": "C-0138", + "creationTime": "", + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.25 Ensure that the --client-ca-file argument is set as appropriate", + "controlID": "C-0139", + "creationTime": "", + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.26 Ensure that the --etcd-cafile argument is set as appropriate", + "controlID": "C-0140", + "creationTime": "", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.27 Ensure that the --encryption-provider-config argument is set as appropriate", + "controlID": "C-0141", + "creationTime": "", + "description": "Encrypt etcd key-value store.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.2.28 Ensure that encryption providers are appropriately configured", + "controlID": "C-0142", + "creationTime": "", + "description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.3.1 Ensure that the --terminated-pod-gc-threshold argument is set as appropriate", + "controlID": "C-0144", + "creationTime": "", + "description": "Activate garbage collector on pod termination, as appropriate.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.3.2 Ensure that the --profiling argument is set to false", + "controlID": "C-0145", + "creationTime": "", + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.3.3 Ensure that the --use-service-account-credentials argument is set to true", + "controlID": "C-0146", + "creationTime": "", + "description": "Use individual service account credentials for each controller.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.3.4 Ensure that the --service-account-private-key-file argument is set as appropriate", + "controlID": "C-0147", + "creationTime": "", + "description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.3.5 Ensure that the --root-ca-file argument is set as appropriate", + "controlID": "C-0148", + "creationTime": "", + "description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.3.6 Ensure that the RotateKubeletServerCertificate argument is set to true", + "controlID": "C-0149", + "creationTime": "", + "description": "Enable kubelet server certificate rotation on controller-manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1", + "controlID": "C-0150", + "creationTime": "", + "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.4.1 Ensure that the --profiling argument is set to false", + "controlID": "C-0151", + "creationTime": "", + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1", + "controlID": "C-0152", + "creationTime": "", + "description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-2.1 Ensure that the --cert-file and --key-file arguments are set as appropriate", + "controlID": "C-0153", + "creationTime": "", + "description": "Configure TLS encryption for the etcd service.", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-2.2 Ensure that the --client-cert-auth argument is set to true", + "controlID": "C-0154", + "creationTime": "", + "description": "Enable client authentication on etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-2.3 Ensure that the --auto-tls argument is not set to true", + "controlID": "C-0155", + "creationTime": "", + "description": "Do not use self-signed certificates for TLS.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-2.4 Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", + "controlID": "C-0156", + "creationTime": "", + "description": "etcd should be configured to make use of TLS encryption for peer connections.", + "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-2.5 Ensure that the --peer-client-cert-auth argument is set to true", + "controlID": "C-0157", + "creationTime": "", + "description": "etcd should be configured for peer authentication.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-2.6 Ensure that the --peer-auto-tls argument is not set to true", + "controlID": "C-0158", + "creationTime": "", + "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-2.7 Ensure that a unique Certificate Authority is used for etcd", + "controlID": "C-0159", + "creationTime": "", + "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.1 Ensure that a minimal audit policy is created", + "controlID": "C-0160", + "creationTime": "", + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "remediation": "Create an audit policy file for your cluster.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.2 Ensure that the audit policy covers key security concerns", + "controlID": "C-0161", + "creationTime": "", + "description": "Ensure that the audit policy created for the cluster covers key security concerns.", + "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.1 Ensure that the kubelet service file permissions are set to 600 or more restrictive", + "controlID": "C-0162", + "creationTime": "", + "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.2 Ensure that the kubelet service file ownership is set to root:root", + "controlID": "C-0163", + "creationTime": "", + "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.3 If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", + "controlID": "C-0164", + "creationTime": "", + "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root", + "controlID": "C-0165", + "creationTime": "", + "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", + "controlID": "C-0166", + "creationTime": "", + "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", + "controlID": "C-0167", + "creationTime": "", + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.7 Ensure that the certificate authorities file permissions are set to 600 or more restrictive", + "controlID": "C-0168", + "creationTime": "", + "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.8 Ensure that the client certificate authorities file ownership is set to root:root", + "controlID": "C-0169", + "creationTime": "", + "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.9 If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", + "controlID": "C-0170", + "creationTime": "", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.10 If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", + "controlID": "C-0171", + "creationTime": "", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.1 Ensure that the --anonymous-auth argument is set to false", + "controlID": "C-0172", + "creationTime": "", + "description": "Disable anonymous requests to the Kubelet server.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "controlID": "C-0173", + "creationTime": "", + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.3 Ensure that the --client-ca-file argument is set as appropriate", + "controlID": "C-0174", + "creationTime": "", + "description": "Enable Kubelet authentication using certificates.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.4 Verify that the --read-only-port argument is set to 0", + "controlID": "C-0175", + "creationTime": "", + "description": "Disable the read-only port.", + "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "controlID": "C-0176", + "creationTime": "", + "description": "Do not disable timeouts on streaming connections.", + "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.6 Ensure that the --make-iptables-util-chains argument is set to true", + "controlID": "C-0178", + "creationTime": "", + "description": "Allow Kubelet to manage iptables.", + "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.7 Ensure that the --hostname-override argument is not set", + "controlID": "C-0179", + "creationTime": "", + "description": "Do not override node hostnames.", + "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.8 Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture", + "controlID": "C-0180", + "creationTime": "", + "description": "Security relevant information should be captured. The eventRecordQPS on the Kubelet configuration can be used to limit the rate at which events are gathered and sets the maximum event creations per second. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.9 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "controlID": "C-0181", + "creationTime": "", + "description": "Setup TLS connection on the Kubelets.", + "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.10 Ensure that the --rotate-certificates argument is not set to false", + "controlID": "C-0182", + "creationTime": "", + "description": "Enable kubelet client certificate rotation.", + "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable or set --rotate-certificates=true .\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.11 Verify that the RotateKubeletServerCertificate argument is set to true", + "controlID": "C-0183", + "creationTime": "", + "description": "Enable kubelet server certificate rotation.", + "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.12 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", + "controlID": "C-0184", + "creationTime": "", + "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", + "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-5.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "creationTime": "", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-5.1.2 Minimize access to secrets", + "controlID": "C-0186", + "creationTime": "", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-5.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "creationTime": "", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-5.1.4 Minimize access to create pods", + "controlID": "C-0188", + "creationTime": "", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-5.1.5 Ensure that default service accounts are not actively used.", + "controlID": "C-0189", + "creationTime": "", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "creationTime": "", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "creationTime": "", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-5.2.1 Ensure that the cluster has at least one active policy control mechanism in place", + "controlID": "C-0192", + "creationTime": "", + "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "CIS-5.2.2 Minimize the admission of privileged containers", + "controlID": "C-0193", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.2.6 Minimize the admission of containers with allowPrivilegeEscalation", + "controlID": "C-0197", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `securityContext: allowPrivilegeEscalation: true`", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.2.7 Minimize the admission of root containers", + "controlID": "C-0198", + "creationTime": "", + "description": "Do not generally permit containers to be run as the root user.", + "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.2.8 Minimize the admission of containers with the NET\\_RAW capability", + "controlID": "C-0199", + "creationTime": "", + "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.2.9 Minimize the admission of containers with added capabilities", + "controlID": "C-0200", + "creationTime": "", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.2.10 Minimize the admission of containers with capabilities assigned", + "controlID": "C-0201", + "creationTime": "", + "description": "Do not generally permit containers with capabilities", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.2.11 Minimize the admission of Windows HostProcess Containers", + "controlID": "C-0202", + "creationTime": "", + "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.2.12 Minimize the admission of HostPath volumes", + "controlID": "C-0203", + "creationTime": "", + "description": "Do not generally admit containers which make use of `hostPath` volumes.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.2.13 Minimize the admission of containers which use HostPorts", + "controlID": "C-0204", + "creationTime": "", + "description": "Do not generally permit containers which require the use of HostPorts.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.3.1 Ensure that the CNI in use supports Network Policies", + "controlID": "C-0205", + "creationTime": "", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "CIS-5.3.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "creationTime": "", + "description": "Use network policies to isolate traffic in your cluster network.", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "CIS-5.4.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "creationTime": "", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + } + } + }, + { + "guid": "", + "name": "CIS-5.4.2 Consider external secret storage", + "controlID": "C-0208", + "creationTime": "", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-5.7.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "creationTime": "", + "description": "Use namespaces to isolate your Kubernetes objects.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.7.2 Ensure that the seccomp profile is set to docker/default in your pod definitions", + "controlID": "C-0210", + "creationTime": "", + "description": "Enable `docker/default` seccomp profile in your pod definitions.", + "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.7.3 Apply Security Context to Your Pods and Containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "controlID": "C-0211", + "creationTime": "", + "description": "Apply Security Context to Your Pods and Containers", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.7.4 The default namespace should not be used", + "controlID": "C-0212", + "creationTime": "", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.1.7 Avoid use of system:masters group", + "attributes": { + "actionRequired": "manual review" + }, + "controlID": "C-0246", + "creationTime": "", + "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", + "remediation": "Remove the `system:masters` group from all users in the cluster.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "CIS-5.2.3 Minimize the admission of containers wishing to share the host process ID namespace", + "controlID": "C-0275", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "remediation": "Configure the Admission Controller to restrict the admission of `hostPID` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.2.4 Minimize the admission of containers wishing to share the host IPC namespace", + "controlID": "C-0276", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-1.2.29 Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "controlID": "C-0277", + "creationTime": "", + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256.\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-5.1.9 Minimize access to create persistent volumes", + "controlID": "C-0278", + "creationTime": "", + "description": "The ability to create persistent volumes in a cluster can provide an opportunity for privilege escalation, via the creation of `hostPath` volumes. As persistent volumes are not covered by Pod Security Admission, a user with access to create persistent volumes may be able to get access to sensitive files from the underlying host even where restrictive Pod Security Admission policies are in place.", + "remediation": "Where possible, remove `create` access to `PersistentVolume` objects in the cluster.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-5.1.10 Minimize access to the proxy sub-resource of nodes", + "controlID": "C-0279", + "creationTime": "", + "description": "Users with access to the `Proxy` sub-resource of `Node` objects automatically have permissions to use the Kubelet API, which may allow for privilege escalation or bypass cluster security controls such as audit logs.\n\n The Kubelet provides an API which includes rights to execute commands in any container running on the node. Access to this API is covered by permissions to the main Kubernetes API via the `node` object. The proxy sub-resource specifically allows wide ranging access to the Kubelet API.\n\n Direct access to the Kubelet API bypasses controls like audit logging (there is no audit log of Kubelet API access) and admission control.", + "remediation": "Where possible, remove access to the `proxy` sub-resource of `node` objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-5.1.11 Minimize access to the approval sub-resource of certificatesigningrequests objects", + "controlID": "C-0280", + "creationTime": "", + "description": "Users with access to the update the `approval` sub-resource of `certificatesigningrequests` objects can approve new client certificates for the Kubernetes API effectively allowing them to create new high-privileged user accounts.\n\n This can allow for privilege escalation to full cluster administrator, depending on users configured in the cluster", + "remediation": "Where possible, remove access to the `approval` sub-resource of `certificatesigningrequests` objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-5.1.12 Minimize access to webhook configuration objects", + "controlID": "C-0281", + "creationTime": "", + "description": "Users with rights to create/modify/delete `validatingwebhookconfigurations` or `mutatingwebhookconfigurations` can control webhooks that can read any object admitted to the cluster, and in the case of mutating webhooks, also mutate admitted objects. This could allow for privilege escalation or disruption of the operation of the cluster.", + "remediation": "Where possible, remove access to the `validatingwebhookconfigurations` or `mutatingwebhookconfigurations` objects", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-5.1.13 Minimize access to the service account token creation", + "controlID": "C-0282", + "creationTime": "", + "description": "Users with rights to create new service account tokens at a cluster level, can create long-lived privileged credentials in the cluster. This could allow for privilege escalation and persistent access to the cluster, even if the users account has been revoked.", + "remediation": "Where possible, remove access to the `token` sub-resource of `serviceaccount` objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-1.2.3 Ensure that the DenyServiceExternalIPs is set", + "controlID": "C-0283", + "creationTime": "", + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the master node and append the Kubernetes API server flag --enable-admission-plugins with the DenyServiceExternalIPs plugin. Note, the Kubernetes API server flag --enable-admission-plugins takes a comma-delimited list of admission control plugins to be enabled, even if they are in the list of plugins enabled by default.\n\n kube-apiserver --enable-admission-plugins=DenyServiceExternalIPs", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.2.13 Ensure that a limit is set on pod PIDs", + "controlID": "C-0284", + "creationTime": "", + "description": "Ensure that the Kubelet sets limits on the number of PIDs that can be created by pods running on the node.", + "remediation": "Decide on an appropriate level for this parameter and set it, either via the `--pod-max-pids` command line parameter or the `PodPidsLimit` configuration file setting.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.1.1 Client certificate authentication should not be used for users", + "attributes": { + "actionRequired": "manual review" + }, + "id": "CIS-3.1.1", + "controlID": "C-0286", + "creationTime": "", + "description": "Kubernetes provides the option to use client certificates for user authentication. However as there is no way to revoke these certificates when a user leaves an organization or loses their credential, they are not suitable for this purpose.\n\n It is not possible to fully disable client certificate use within a cluster as it is used for component to component authentication.", + "remediation": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of client certificates.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "CIS-3.1.2 Service account token authentication should not be used for users", + "attributes": { + "actionRequired": "manual review" + }, + "id": "CIS-3.1.2", + "controlID": "C-0287", + "creationTime": "", + "description": "Kubernetes provides service account tokens which are intended for use by workloads running in the Kubernetes cluster, for authentication to the API server.\n\n These tokens are not designed for use by end-users and do not provide for features such as revocation or expiry, making them insecure. A newer version of the feature (Bound service account token volumes) does introduce expiry but still does not allow for specific revocation.", + "remediation": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of service account tokens.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "CIS-3.1.3 Bootstrap token authentication should not be used for users", + "attributes": { + "actionRequired": "manual review" + }, + "id": "CIS-3.1.3", + "controlID": "C-0288", + "creationTime": "", + "description": "Kubernetes provides bootstrap tokens which are intended for use by new nodes joining the cluster\n\n These tokens are not designed for use by end-users they are specifically designed for the purpose of bootstrapping new nodes and not for general authentication", + "remediation": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of bootstrap tokens.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "CIS-5.5.1 Configure Image Provenance using ImagePolicyWebhook admission controller", + "attributes": { + "actionRequired": "manual review" + }, + "id": "CIS-5.5.1", + "controlID": "C-0289", + "creationTime": "", + "description": "Configure Image Provenance for your deployment.", + "remediation": "Follow the Kubernetes documentation and setup image provenance.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + } + } + ], + "controlsIDs": [ + "C-0041", + "C-0092", + "C-0093", + "C-0094", + "C-0095", + "C-0096", + "C-0097", + "C-0098", + "C-0099", + "C-0100", + "C-0101", + "C-0102", + "C-0103", + "C-0104", + "C-0105", + "C-0106", + "C-0107", + "C-0108", + "C-0109", + "C-0110", + "C-0111", + "C-0112", + "C-0113", + "C-0114", + "C-0116", + "C-0117", + "C-0118", + "C-0119", + "C-0120", + "C-0121", + "C-0122", + "C-0123", + "C-0125", + "C-0126", + "C-0127", + "C-0129", + "C-0130", + "C-0131", + "C-0132", + "C-0133", + "C-0134", + "C-0135", + "C-0136", + "C-0137", + "C-0138", + "C-0139", + "C-0140", + "C-0141", + "C-0142", + "C-0144", + "C-0145", + "C-0146", + "C-0147", + "C-0148", + "C-0149", + "C-0150", + "C-0151", + "C-0152", + "C-0153", + "C-0154", + "C-0155", + "C-0156", + "C-0157", + "C-0158", + "C-0159", + "C-0160", + "C-0161", + "C-0162", + "C-0163", + "C-0164", + "C-0165", + "C-0166", + "C-0167", + "C-0168", + "C-0169", + "C-0170", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0182", + "C-0183", + "C-0184", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0192", + "C-0193", + "C-0197", + "C-0198", + "C-0199", + "C-0200", + "C-0201", + "C-0202", + "C-0203", + "C-0204", + "C-0205", + "C-0206", + "C-0207", + "C-0208", + "C-0209", + "C-0210", + "C-0211", + "C-0212", + "C-0246", + "C-0275", + "C-0276", + "C-0277", + "C-0278", + "C-0279", + "C-0280", + "C-0281", + "C-0282", + "C-0283", + "C-0284", + "C-0286", + "C-0287", + "C-0288", + "C-0289" + ], + "subSections": { + "2": { + "guid": "", + "name": "etcd", + "id": "2", + "controlsIDs": [ + "C-0153", + "C-0154", + "C-0155", + "C-0156", + "C-0157", + "C-0158", + "C-0159" + ] + }, + "3": { + "guid": "", + "name": "Control Plane Configuration", + "id": "3", + "subSections": { + "2": { + "guid": "", + "name": "Logging", + "id": "3.2", + "controlsIDs": [ + "C-0160", + "C-0161" + ] + }, + "1": { + "guid": "", + "name": "Authentication and Authorization", + "id": "3.1", + "controlsIDs": [ + "C-0286", + "C-0287", + "C-0288" + ] + } + } + }, + "4": { + "guid": "", + "name": "Worker Nodes", + "id": "4", + "subSections": { + "1": { + "guid": "", + "name": "Worker Node Configuration Files", + "id": "4.1", + "controlsIDs": [ + "C-0162", + "C-0163", + "C-0164", + "C-0165", + "C-0166", + "C-0167", + "C-0168", + "C-0169", + "C-0170", + "C-0171" + ] + }, + "2": { + "guid": "", + "name": "Kubelet", + "id": "4.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0178", + "C-0179", + "C-0180", + "C-0181", + "C-0182", + "C-0183", + "C-0184", + "C-0284" + ] + } + } + }, + "5": { + "guid": "", + "name": "Policies", + "id": "5", + "subSections": { + "1": { + "guid": "", + "name": "RBAC and Service Accounts", + "id": "5.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0246", + "C-0278", + "C-0279", + "C-0280", + "C-0281", + "C-0282" + ] + }, + "2": { + "guid": "", + "name": "Pod Security Standards", + "id": "5.2", + "controlsIDs": [ + "C-0041", + "C-0192", + "C-0193", + "C-0197", + "C-0198", + "C-0199", + "C-0200", + "C-0201", + "C-0202", + "C-0203", + "C-0204", + "C-0275", + "C-0276" + ] + }, + "3": { + "guid": "", + "name": "Network Policies and CNI", + "id": "5.3", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "4": { + "guid": "", + "name": "Secrets Management", + "id": "5.4", + "controlsIDs": [ + "C-0207", + "C-0208" + ] + }, + "5": { + "guid": "", + "name": "Extensible Admission Control", + "id": "5.5", + "controlsIDs": [ + "C-0289" + ] + }, + "7": { + "guid": "", + "name": "General Policies", + "id": "5.7", + "controlsIDs": [ + "C-0209", + "C-0210", + "C-0211", + "C-0212" + ] + } + } + }, + "1": { + "guid": "", + "name": "Control Plane Components", + "id": "1", + "subSections": { + "2": { + "guid": "", + "name": "API Server", + "id": "1.2", + "controlsIDs": [ + "C-0113", + "C-0114", + "C-0116", + "C-0117", + "C-0118", + "C-0119", + "C-0120", + "C-0121", + "C-0122", + "C-0123", + "C-0125", + "C-0126", + "C-0127", + "C-0129", + "C-0130", + "C-0131", + "C-0132", + "C-0133", + "C-0134", + "C-0135", + "C-0136", + "C-0137", + "C-0138", + "C-0139", + "C-0140", + "C-0141", + "C-0142", + "C-0277", + "C-0283" + ] + }, "3": { "guid": "", "name": "Controller Manager", @@ -3133,2058 +14060,33 @@ "C-0111", "C-0112" ] - }, - "2": { - "guid": "", - "name": "API Server", - "id": "1.2", - "controlsIDs": [ - "C-0113", - "C-0114", - "C-0115", - "C-0116", - "C-0117", - "C-0118", - "C-0119", - "C-0120", - "C-0121", - "C-0122", - "C-0123", - "C-0124", - "C-0125", - "C-0126", - "C-0127", - "C-0128", - "C-0129", - "C-0130", - "C-0131", - "C-0132", - "C-0133", - "C-0134", - "C-0135", - "C-0136", - "C-0137", - "C-0138", - "C-0139", - "C-0140", - "C-0141", - "C-0142", - "C-0143" - ] - } - } - }, - "2": { - "guid": "", - "name": "etcd", - "id": "2", - "controlsIDs": [ - "C-0153", - "C-0154", - "C-0155", - "C-0156", - "C-0157", - "C-0158", - "C-0159" - ] - }, - "3": { - "guid": "", - "name": "Control Plane Configuration", - "id": "3", - "subSections": { - "2": { - "guid": "", - "name": "Logging", - "id": "3.2", - "controlsIDs": [ - "C-0160", - "C-0161" - ] - } - } - }, - "4": { - "guid": "", - "name": "Worker Nodes", - "id": "4", - "subSections": { - "2": { - "guid": "", - "name": "Kubelet", - "id": "4.2", - "controlsIDs": [ - "C-0172", - "C-0173", - "C-0174", - "C-0175", - "C-0176", - "C-0177", - "C-0178", - "C-0179", - "C-0180", - "C-0181", - "C-0182", - "C-0183", - "C-0184" - ] - }, - "1": { - "guid": "", - "name": "Worker Node Configuration Files", - "id": "4.1", - "controlsIDs": [ - "C-0162", - "C-0163", - "C-0164", - "C-0165", - "C-0166", - "C-0167", - "C-0168", - "C-0169", - "C-0170", - "C-0171" - ] - } - } - }, - "5": { - "guid": "", - "name": "Policies", - "id": "5", - "subSections": { - "3": { - "guid": "", - "name": "Network Policies and CNI", - "id": "5.3", - "controlsIDs": [ - "C-0205", - "C-0206" - ] - }, - "4": { - "guid": "", - "name": "Secrets Management", - "id": "5.4", - "controlsIDs": [ - "C-0207", - "C-0208" - ] - }, - "7": { - "guid": "", - "name": "General Policies", - "id": "5.7", - "controlsIDs": [ - "C-0209", - "C-0210", - "C-0211", - "C-0212" - ] - }, - "1": { - "guid": "", - "name": "RBAC and Service Accounts", - "id": "5.1", - "controlsIDs": [ - "C-0185", - "C-0186", - "C-0187", - "C-0188", - "C-0189", - "C-0190", - "C-0191" - ] - }, - "2": { - "guid": "", - "name": "Pod Security Standards", - "id": "5.2", - "controlsIDs": [ - "C-0192", - "C-0193", - "C-0194", - "C-0195", - "C-0196", - "C-0197", - "C-0198", - "C-0199", - "C-0200", - "C-0201", - "C-0202", - "C-0203", - "C-0204" - ] } } } + }, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] } }, - { - "guid": "", - "name": "DevOpsBest", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "description": "", - "controls": [ - { - "guid": "", - "name": "Resources memory limit and request", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ] - }, - "controlID": "C-0004", - "creationTime": "", - "description": "This control identifies all Pods for which the memory limit is not set.", - "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Configured readiness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "controlID": "C-0018", - "creationTime": "", - "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", - "remediation": "Ensure Readiness probes are configured wherever possible.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ] - }, - "controlID": "C-0044", - "creationTime": "", - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Resources CPU limit and request", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "controlID": "C-0050", - "creationTime": "", - "description": "This control identifies all Pods for which the CPU limit is not set.", - "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Configured liveness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "controlID": "C-0056", - "creationTime": "", - "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", - "remediation": "Ensure Liveness probes are configured wherever possible.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Pods in default namespace", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ], - "armoBuiltin": true - }, - "controlID": "C-0061", - "creationTime": "", - "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", - "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Naked PODs", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "controlID": "C-0073", - "creationTime": "", - "description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", - "remediation": "Create necessary Deployment object for every POD making any POD a first class citizen in your IaC architecture.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Containers mounting Docker socket", - "attributes": { - "controlTypeTags": [ - "devops" - ], - "armoBuiltin": true - }, - "controlID": "C-0074", - "creationTime": "", - "description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", - "remediation": "Remove docker socket mount request or define an exception.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Image pull policy on latest tag", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "controlID": "C-0075", - "creationTime": "", - "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always.", - "remediation": "Set ImagePullPolicy to Always in all PODs found by this control.", - "rules": [], - "baseScore": 2 - }, - { - "guid": "", - "name": "Label usage for resources", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "controlID": "C-0076", - "creationTime": "", - "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", - "rules": [], - "baseScore": 2 - }, - { - "guid": "", - "name": "K8s common labels usage", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "controlID": "C-0077", - "creationTime": "", - "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", - "rules": [], - "baseScore": 2 - } - ], - "controlsIDs": [ - "C-0004", - "C-0018", - "C-0044", - "C-0050", - "C-0056", - "C-0061", - "C-0073", - "C-0074", - "C-0075", - "C-0076", - "C-0077" - ] - }, - { - "guid": "", - "name": "AllControls", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "description": "Contains all the controls from all the frameworks", - "controls": [ - { - "guid": "", - "name": "Forbidden Container Registries", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial Access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "categories": [ - "Initial access" - ], - "attackTrack": "container" - } - ] - }, - "controlID": "C-0001", - "creationTime": "", - "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster’s management layer.", - "remediation": "Limit the registries from which you pull container images from", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Exec into container", - "attributes": { - "controlTypeTags": [ - "compliance", - "security-impact" - ], - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods" - }, - "controlID": "C-0002", - "creationTime": "", - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using “kubectl exec” command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit “kubectl exec” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Resources memory limit and request", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0004", - "creationTime": "", - "description": "This control identifies all Pods for which the memory limit is not set.", - "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "API server insecure port is enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "controlID": "C-0005", - "creationTime": "", - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "rules": [], - "baseScore": 9 - }, - { - "guid": "", - "name": "Data Destruction", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Impact" - ], - "rbacQuery": "Data destruction", - "controlTypeTags": [ - "compliance" - ] - }, - "controlID": "C-0007", - "creationTime": "", - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Resource limits", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ] - }, - "controlID": "C-0009", - "creationTime": "", - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Applications credentials in configuration files", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - }, - { - "attackTrack": "container", - "categories": [ - "Credential access" - ] - } - ] - }, - "controlID": "C-0012", - "creationTime": "", - "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", - "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Non-root containers", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0013", - "creationTime": "", - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Access Kubernetes dashboard", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery", - "Lateral Movement" - ], - "rbacQuery": "Access k8s Dashboard", - "controlTypeTags": [ - "compliance" - ] - }, - "controlID": "C-0014", - "creationTime": "", - "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", - "remediation": "Make sure that the “Kubernetes Dashboard” service account is only bound to the Kubernetes dashboard following the least privilege principle.", - "rules": [], - "baseScore": 2 - }, - { - "guid": "", - "name": "List Kubernetes secrets", - "attributes": { - "rbacQuery": "Show who can access secrets", - "controlTypeTags": [ - "security-impact", - "compliance" - ], - "attackTracks": [ - { - "categories": [ - "Credential access" - ], - "attackTrack": "kubeapi" - } - ], - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ] - }, - "controlID": "C-0015", - "creationTime": "", - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Allow privilege escalation", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0016", - "creationTime": "", - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Immutable container filesystem", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Execution", - "Persistence" - ] - } - ] - }, - "controlID": "C-0017", - "creationTime": "", - "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Configured readiness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "controlID": "C-0018", - "creationTime": "", - "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", - "remediation": "Ensure Readiness probes are configured wherever possible.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Mount service principal", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential Access" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "controlID": "C-0020", - "creationTime": "", - "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", - "remediation": "Refrain from using path mount to known cloud credentials folders or files .", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Exposed sensitive interfaces", - "attributes": { - "controlTypeTags": [ - "compliance" - ], - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial access" - ] - }, - "controlID": "C-0021", - "creationTime": "", - "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", - "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Kubernetes CronJob", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "controlID": "C-0026", - "creationTime": "", - "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", - "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", - "rules": [], - "baseScore": 1 - }, - { - "guid": "", - "name": "Ingress and Egress blocked", - "attributes": { - "controlTypeTags": [ - "compliance" - ], - "armoBuiltin": true - }, - "controlID": "C-0030", - "creationTime": "", - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Delete Kubernetes events", - "attributes": { - "microsoftMitreColumns": [ - "Defense evasion" - ], - "rbacQuery": "Show who can delete k8s events", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Defense evasion" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0031", - "creationTime": "", - "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", - "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Automatic mapping of service account", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "controlID": "C-0034", - "creationTime": "", - "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Cluster-admin binding", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ], - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "rbacQuery": "Show cluster_admin" - }, - "controlID": "C-0035", - "creationTime": "", - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Malicious admission controller (validating)", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ] - }, - "controlID": "C-0036", - "creationTime": "", - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Host PID/IPC privileges", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0038", - "creationTime": "", - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Malicious admission controller (mutating)", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ], - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ] - }, - "controlID": "C-0039", - "creationTime": "", - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "HostNetwork access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement", - "Impact - service access" - ] - } - ] - }, - "controlID": "C-0041", - "creationTime": "", - "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", - "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "SSH server running inside container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "controlID": "C-0042", - "creationTime": "", - "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", - "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } - ] - }, - "controlID": "C-0044", - "creationTime": "", - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Writable hostPath mount", - "attributes": { - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ], - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ] - }, - "controlID": "C-0045", - "creationTime": "", - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Insecure capabilities", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0046", - "creationTime": "", - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "HostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - Data access in container" - ] - } - ] - }, - "controlID": "C-0048", - "creationTime": "", - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Network mapping", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery" - ] - } - ] - }, - "controlID": "C-0049", - "creationTime": "", - "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", - "remediation": "Define network policies or use similar network protection mechanisms.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Resources CPU limit and request", - "attributes": { - "controlTypeTags": [ - "compliance", - "devops" - ], - "armoBuiltin": true - }, - "controlID": "C-0050", - "creationTime": "", - "description": "This control identifies all Pods for which the CPU limit is not set.", - "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Instance Metadata API", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Discovery", - "Impact - service access" - ] - } - ] - }, - "controlID": "C-0052", - "creationTime": "", - "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", - "remediation": "Disable metadata services for pods in cloud provider settings.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Access container service account", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Container service account mapping", - "controlTypeTags": [ - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ] - }, - "controlID": "C-0053", - "creationTime": "", - "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", - "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Cluster internal networking", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement" - ] - } - ], - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Lateral movement" - ] - }, - "controlID": "C-0054", - "creationTime": "", - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Linux hardening", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0055", - "creationTime": "", - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Configured liveness probe", - "attributes": { - "controlTypeTags": [ - "devops" - ], - "armoBuiltin": true - }, - "controlID": "C-0056", - "creationTime": "", - "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", - "remediation": "Ensure Liveness probes are configured wherever possible.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Privileged container", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ], - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ] - }, - "controlID": "C-0057", - "creationTime": "", - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "controlID": "C-0058", - "creationTime": "", - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "controlID": "C-0059", - "creationTime": "", - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Pods in default namespace", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "controlID": "C-0061", - "creationTime": "", - "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", - "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Sudo in container entrypoint", - "attributes": { - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0062", - "creationTime": "", - "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", - "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Portforwarding privileges", - "attributes": { - "armoBuiltin": true, - "rbacQuery": "Port Forwarding", - "controlTypeTags": [ - "security-impact", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Discovery", - "Lateral movement" - ] - } - ] - }, - "controlID": "C-0063", - "creationTime": "", - "description": "Attackers with relevant RBAC permission can use “kubectl portforward” command to establish direct communication with PODs from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit “kubectl portforward” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "No impersonation", - "attributes": { - "armoBuiltin": true, - "rbacQuery": "Impersonation", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0065", - "creationTime": "", - "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Secret/ETCD encryption enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0066", - "creationTime": "", - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Audit logs enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ] - }, - "controlID": "C-0067", - "creationTime": "", - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "PSP enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ] - }, - "controlID": "C-0068", - "creationTime": "", - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "rules": [], - "baseScore": 1 - }, - { - "guid": "", - "name": "Disable anonymous access to Kubelet service", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "controlID": "C-0069", - "creationTime": "", - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", - "rules": [], - "baseScore": 10 - }, - { - "guid": "", - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } - ] - }, - "controlID": "C-0070", - "creationTime": "", - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "rules": [], - "baseScore": 9 - }, - { - "guid": "", - "name": "Naked PODs", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "controlID": "C-0073", - "creationTime": "", - "description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", - "remediation": "Create necessary Deployment object for every POD making any POD a first class citizen in your IaC architecture.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Containers mounting Docker socket", - "attributes": { - "controlTypeTags": [ - "devops" - ], - "armoBuiltin": true - }, - "controlID": "C-0074", - "creationTime": "", - "description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", - "remediation": "Remove docker socket mount request or define an exception.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Image pull policy on latest tag", - "attributes": { - "controlTypeTags": [ - "devops" - ], - "armoBuiltin": true - }, - "controlID": "C-0075", - "creationTime": "", - "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always.", - "remediation": "Set ImagePullPolicy to Always in all PODs found by this control.", - "rules": [], - "baseScore": 2 - }, - { - "guid": "", - "name": "Label usage for resources", - "attributes": { - "controlTypeTags": [ - "devops" - ], - "armoBuiltin": true - }, - "controlID": "C-0076", - "creationTime": "", - "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", - "rules": [], - "baseScore": 2 - }, - { - "guid": "", - "name": "K8s common labels usage", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "controlID": "C-0077", - "creationTime": "", - "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", - "rules": [], - "baseScore": 2 - }, - { - "guid": "", - "name": "Images from allowed registry", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "controlID": "C-0078", - "creationTime": "", - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "CVE-2022-0185-linux-kernel-container-escape", - "attributes": { - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ], - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "controlID": "C-0079", - "creationTime": "", - "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", - "remediation": "Patch Linux kernel version to 5.16.2 or above", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "CVE-2022-24348-argocddirtraversal", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0081", - "creationTime": "", - "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", - "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Workloads with Critical vulnerabilities exposed to external traffic", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "controlID": "C-0083", - "creationTime": "", - "description": "Container images with known critical vulnerabilities pose elevated risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if either LoadBalancer or NodePort service is assigned to them.", - "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the POD to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Workloads with RCE vulnerabilities exposed to external traffic", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "controlID": "C-0084", - "creationTime": "", - "description": "Container images with known Remote Code Execution (RCE) vulnerabilities pose significantly higher risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if their POD has either LoadBalancer or NodePort service.", - "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the POD to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Workloads with excessive amount of vulnerabilities", - "attributes": { - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ], - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "controlID": "C-0085", - "creationTime": "", - "description": "Container images with multiple Critical and High sevirity vulnerabilities increase the risk of potential exploit. This control lists all such images according to the threashold provided by the customer.", - "remediation": "Update your workload images as soon as possible when fixes become available.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "CVE-2022-0492-cgroups-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0086", - "creationTime": "", - "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "CVE-2022-23648-containerd-fs-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation", - "Impact - Data access in container" - ] - } - ] - }, - "controlID": "C-0087", - "creationTime": "", - "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using aspecially-crafted POD configuration yamls", - "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "RBAC enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "categories": [ - "Initial access", - "Privilege escalation" - ], - "attackTrack": "kubeapi" - } - ] - }, - "controlID": "C-0088", - "creationTime": "", - "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "CVE-2022-39328-grafana-auth-bypass", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "controlID": "C-0090", - "creationTime": "", - "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", - "remediation": "Update your Grafana to 9.2.4 or above", - "rules": [], - "baseScore": 9 - }, - { - "guid": "", - "name": "CVE-2022-47633-kyverno-signature-bypass", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "categories": [ - "Initial access", - "Execution" - ], - "attackTrack": "container" - } - ] - }, - "controlID": "C-0091", - "creationTime": "", - "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", - "remediation": "Update your Grafana to 9.2.4 or above", - "rules": [], - "baseScore": 8 - } - ], - "controlsIDs": [ - "C-0001", - "C-0002", - "C-0004", - "C-0005", - "C-0007", - "C-0009", - "C-0012", - "C-0013", - "C-0014", - "C-0015", - "C-0016", - "C-0017", - "C-0018", - "C-0020", - "C-0021", - "C-0026", - "C-0030", - "C-0031", - "C-0034", - "C-0035", - "C-0036", - "C-0038", - "C-0039", - "C-0041", - "C-0042", - "C-0044", - "C-0045", - "C-0046", - "C-0048", - "C-0049", - "C-0050", - "C-0052", - "C-0053", - "C-0054", - "C-0055", - "C-0056", - "C-0057", - "C-0058", - "C-0059", - "C-0061", - "C-0062", - "C-0063", - "C-0065", - "C-0066", - "C-0067", - "C-0068", - "C-0069", - "C-0070", - "C-0073", - "C-0074", - "C-0075", - "C-0076", - "C-0077", - "C-0078", - "C-0079", - "C-0081", - "C-0083", - "C-0084", - "C-0085", - "C-0086", - "C-0087", - "C-0088", - "C-0090", - "C-0091" - ] - }, { "guid": "", "name": "MITRE", "attributes": { - "armoBuiltin": true + "builtin": true }, "creationTime": "", "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png", + "typeTags": [ + "compliance" + ], "controls": [ { "guid": "", - "name": "Exec into container", + "name": "Prevent containers from allowing command execution", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Execution" ], @@ -5199,13 +14101,22 @@ "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using “kubectl exec” command. This control determines which subjects have permissions to use this command.", "remediation": "It is recommended to prohibit “kubectl exec” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", - "name": "Data Destruction", + "name": "Roles with delete capabilities", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Impact" ], @@ -5219,27 +14130,23 @@ "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", "name": "Applications credentials in configuration files", "attributes": { - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - }, - { - "categories": [ - "Credential access" - ], - "attackTrack": "container" - } - ], - "armoBuiltin": true, + "actionRequired": "configuration", "microsoftMitreColumns": [ "Credential access", "Lateral Movement" @@ -5255,13 +14162,22 @@ "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } }, { "guid": "", "name": "Access Kubernetes dashboard", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Discovery", "Lateral Movement" @@ -5276,21 +14192,22 @@ "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", "remediation": "Make sure that the “Kubernetes Dashboard” service account is only bound to the Kubernetes dashboard following the least privilege principle.", "rules": [], - "baseScore": 2 + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", "name": "List Kubernetes secrets", "attributes": { - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - } - ], - "armoBuiltin": true, "microsoftMitreColumns": [ "Credential access" ], @@ -5305,7 +14222,17 @@ "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", @@ -5316,21 +14243,30 @@ ], "controlTypeTags": [ "compliance" - ], - "armoBuiltin": true + ] }, "controlID": "C-0020", "creationTime": "", "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", "remediation": "Refrain from using path mount to known cloud credentials folders or files .", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "Exposed sensitive interfaces", "attributes": { - "armoBuiltin": true, + "actionRequired": "configuration", "microsoftMitreColumns": [ "Initial access" ], @@ -5343,13 +14279,21 @@ "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "Kubernetes CronJob", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Persistence" ], @@ -5359,31 +14303,31 @@ }, "controlID": "C-0026", "creationTime": "", - "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", + "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a pod in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", "rules": [], - "baseScore": 1 + "baseScore": 1, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "Delete Kubernetes events", "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Defense evasion" - ], "rbacQuery": "Show who can delete k8s events", "controlTypeTags": [ "security", "compliance" ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Defense evasion" - ] - } + "microsoftMitreColumns": [ + "Defense evasion" ] }, "controlID": "C-0031", @@ -5391,11 +14335,21 @@ "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", - "name": "Cluster-admin binding", + "name": "Administrative Roles", "attributes": { "microsoftMitreColumns": [ "Privilege escalation" @@ -5404,72 +14358,62 @@ "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ], - "armoBuiltin": true + ] }, "controlID": "C-0035", "creationTime": "", "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", - "name": "Malicious admission controller (validating)", + "name": "Validate admission controller (validating)", "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], "controlTypeTags": [ "security", "compliance" ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ], - "armoBuiltin": true + "microsoftMitreColumns": [ + "Credential access" + ] }, "controlID": "C-0036", "creationTime": "", "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", "rules": [], - "baseScore": 3 + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", "name": "CoreDNS poisoning", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Lateral Movement" ], "controlTypeTags": [ "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } ] }, "controlID": "C-0037", @@ -5477,27 +14421,28 @@ "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster’s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", - "name": "Malicious admission controller (mutating)", + "name": "Validate admission controller (mutating)", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Persistence" ], "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } ] }, "controlID": "C-0039", @@ -5505,13 +14450,21 @@ "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", "name": "SSH server running inside container", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Execution" ], @@ -5524,13 +14477,30 @@ "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", "rules": [], - "baseScore": 3 + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "Writable hostPath mount", "attributes": { - "armoBuiltin": true, + "attackTracks": [ + { + "categories": [ + "Privilege Escalation (Node)" + ], + "attackTrack": "workload-external-track" + } + ], "microsoftMitreColumns": [ "Persistence", "Lateral Movement" @@ -5539,16 +14509,8 @@ "security", "compliance", "devops", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } + "security-impact", + "smartRemediation" ] }, "controlID": "C-0045", @@ -5556,57 +14518,74 @@ "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } }, { "guid": "", "name": "HostPath mount", "attributes": { - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - Data access in container" - ] - } - ], - "armoBuiltin": true, "microsoftMitreColumns": [ "Privilege escalation" ], "controlTypeTags": [ "security", - "compliance" + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } ] }, "controlID": "C-0048", "creationTime": "", - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } }, { "guid": "", "name": "Instance Metadata API", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Discovery" ], "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Discovery", - "Impact - service access" - ] - } ] }, "controlID": "C-0052", @@ -5614,13 +14593,21 @@ "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", "remediation": "Disable metadata services for pods in cloud provider settings.", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "Access container service account", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Credential access" ], @@ -5628,44 +14615,35 @@ "controlTypeTags": [ "compliance", "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } ] }, "controlID": "C-0053", "creationTime": "", - "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", - "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.", + "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", + "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", "name": "Cluster internal networking", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Lateral movement" ], "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement" - ] - } ] }, "controlID": "C-0054", @@ -5673,26 +14651,28 @@ "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } }, { "guid": "", "name": "Privileged container", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Privilege escalation" ], "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } + "security", + "smartRemediation" ] }, "controlID": "C-0057", @@ -5700,25 +14680,29 @@ "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } ] }, "controlID": "C-0058", @@ -5726,25 +14710,24 @@ "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } ] }, "controlID": "C-0059", @@ -5752,21 +14735,22 @@ "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", - "name": "Secret/ETCD encryption enabled", + "name": "Secret/etcd encryption enabled", "attributes": { - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } - ], - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" @@ -5777,24 +14761,24 @@ "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "Audit logs enabled", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } ] }, "controlID": "C-0067", @@ -5802,24 +14786,24 @@ "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "PSP enabled", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } ] }, "controlID": "C-0068", @@ -5827,21 +14811,22 @@ "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", "rules": [], - "baseScore": 1 + "baseScore": 1, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "Disable anonymous access to Kubelet service", "attributes": { - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ], - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" @@ -5852,24 +14837,24 @@ "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", "remediation": "Start the kubelet with the --anonymous-auth=false flag.", "rules": [], - "baseScore": 10 + "baseScore": 10, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "Enforce Kubelet client TLS authentication", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } ] }, "controlID": "C-0070", @@ -5877,7 +14862,16 @@ "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", "rules": [], - "baseScore": 9 + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } } ], "controlsIDs": [ @@ -5908,92 +14902,1538 @@ "C-0068", "C-0069", "C-0070" - ] + ], + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } }, { "guid": "", - "name": "NSA", + "name": "SOC2", "attributes": { - "armoBuiltin": true + "builtin": true }, "creationTime": "", - "description": "Implement NSA security advices for K8s ", + "description": "SOC2 compliance related controls", + "typeTags": [ + "compliance" + ], "controls": [ { "guid": "", - "name": "Exec into container", + "name": "Firewall (CC6.1,CC6.6,CC7.2)", "attributes": { - "controlTypeTags": [ - "compliance", - "security-impact" - ], - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods" - }, - "controlID": "C-0002", - "creationTime": "", - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using “kubectl exec” command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit “kubectl exec” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "API server insecure port is enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ] - }, - "controlID": "C-0005", - "creationTime": "", - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "rules": [], - "baseScore": 9 - }, - { - "guid": "", - "name": "Resource limits", - "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ], "attackTracks": [ { - "attackTrack": "container", + "attackTrack": "workload-external-track", "categories": [ - "Impact - service destruction" + "Lateral Movement (Network)" + ] + } + ], + "isFixedByNetworkPolicy": true + }, + "controlID": "C-0260", + "creationTime": "", + "description": "Network is monitored and protected by the following. System firewalls are configured to limit unnecessary ports, protocols and services. Firewall rules are reviewed at least annually by IT management.", + "remediation": "Define network policies for all workloads to protect unwanted access", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Cryptographic key management - misplaced secrets (CC6.1,CC6.6,CC6.7)", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ], + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ] + }, + "controlID": "C-0012", + "creationTime": "", + "description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } + }, + { + "guid": "", + "name": "Cryptographic key management - minimize access to secrets (CC6.1,CC6.6,CC6.7)", + "controlID": "C-0186", + "creationTime": "", + "description": "Encryption keys used to protect data at rest and in transit are stored and managed in accordance with the organization's cryptography policy. Access to encryption keys are restricted to authorized personnel.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Access restriction to infrastructure - admin access (CC6.1 ,CC6.2, CC6.7, CC6.8)", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin" + }, + "controlID": "C-0035", + "creationTime": "", + "description": "Administrative access on the in-scope production infrastructure (cloud platform, servers, database) are restricted to authorized users based on job responsibilities.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Event logging (CC6.8,CC7.1,CC7.2)", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0067", + "creationTime": "", + "description": "Logging is enabled to monitor the following events at the application and/or infrastructure layers.", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Data in motion encryption - Ingress is TLS encrypted (CC6.1,CC6.6,CC6.7)", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0263", + "creationTime": "", + "description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server.", + "remediation": "The user needs to implement TLS for the Ingress resource in order to encrypt the incoming traffic", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Data in rest encryption - Persistent Volumes are encrypted (CC1.1,CC6.7)", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0264", + "creationTime": "", + "description": "Transport Layer Security (TLS) is used to protect the transmission of data sent over the internet to and from the organization's application server.", + "remediation": "Enable encryption on the PersistentVolume using the configuration in StorageClass", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + } + } + ], + "controlsIDs": [ + "C-0260", + "C-0012", + "C-0186", + "C-0035", + "C-0067", + "C-0263", + "C-0264" + ], + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "cis-eks-t1.7.0", + "attributes": { + "version": "v1.7.0", + "builtin": true + }, + "creationTime": "", + "description": "Testing CIS for Amazon Elastic Kubernetes Service (EKS) as suggested by CIS benchmark: https://workbench.cisecurity.org/benchmarks/20537", + "typeTags": [ + "compliance" + ], + "controls": [ + { + "guid": "", + "name": "CIS-5.3.1 Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0066", + "creationTime": "", + "description": "Encrypt Kubernetes secrets, stored in etcd, using secrets encryption feature during Amazon EKS cluster creation.", + "remediation": "This process can only be performed during Cluster Creation.\n\n Enable 'Secrets Encryption' during Amazon EKS cluster creation as described in the links within the 'References' section.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-2.1.1 Enable audit Logs", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0067", + "creationTime": "", + "description": "Control plane logs provide visibility into operation of the EKS Control plane component systems. The API server audit logs record all accepted and rejected requests in the cluster. When enabled via EKS configuration the control plane logs for a cluster are exported to a CloudWatch Log Group for persistence.", + "remediation": "**From Console:**\n\n 1. For each EKS Cluster in each region;\n2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.\n3. Click 'Manage logging'.\n4. Ensure that all options are toggled to 'Enabled'.\n\n \n```\nAPI server: Enabled\nAudit: Enabled\t\nAuthenticator: Enabled\nController manager: Enabled\nScheduler: Enabled\n\n```\n 5. Click 'Save Changes'.\n\n **From CLI:**\n\n \n```\n# For each EKS Cluster in each region;\naws eks update-cluster-config \\\n --region '${REGION_CODE}' \\\n --name '${CLUSTER_NAME}' \\\n --logging '{\"clusterLogging\":[{\"types\":[\"api\",\"audit\",\"authenticator\",\"controllerManager\",\"scheduler\"],\"enabled\":true}]}'\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-5.1.4 Minimize Container Registries to only those approved", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0078", + "creationTime": "", + "description": "Use approved container registries.", + "remediation": "To minimize AWS ECR container registries to only those approved, you can follow these steps:\n\n 1. Define your approval criteria: Determine the criteria that containers must meet to be considered approved. This can include factors such as security, compliance, compatibility, and other requirements.\n2. Identify all existing ECR registries: Identify all ECR registries that are currently being used in your organization.\n3. Evaluate ECR registries against approval criteria: Evaluate each ECR registry against your approval criteria to determine whether it should be approved or not. This can be done by reviewing the registry settings and configuration, as well as conducting security assessments and vulnerability scans.\n4. Establish policies and procedures: Establish policies and procedures that outline how ECR registries will be approved, maintained, and monitored. This should include guidelines for developers to follow when selecting a registry for their container images.\n5. Implement access controls: Implement access controls to ensure that only approved ECR registries are used to store and distribute container images. This can be done by setting up IAM policies and roles that restrict access to unapproved registries or create a whitelist of approved registries.\n6. Monitor and review: Continuously monitor and review the use of ECR registries to ensure that they continue to meet your approval criteria. This can include regularly reviewing access logs, scanning for vulnerabilities, and conducting periodic audits.\n\n By following these steps, you can minimize AWS ECR container registries to only those approved, which can help to improve security, reduce complexity, and streamline container management in your organization. Additionally, AWS provides several tools and services that can help you manage your ECR registries, such as AWS Config, AWS CloudFormation, and AWS Identity and Access Management (IAM).", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "CIS-3.1.2 Ensure that the kubelet kubeconfig file ownership is set to root:root", + "controlID": "C-0167", + "creationTime": "", + "description": "If `kubelet` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on each worker node.\n\n For example,\n\n \n```\nchown root:root \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.1.4 Ensure that the kubelet configuration file ownership is set to root:root", + "controlID": "C-0171", + "creationTime": "", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet/config.json\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.1 Ensure that the Anonymous Auth is Not Enabled", + "controlID": "C-0172", + "creationTime": "", + "description": "Disable anonymous requests to the Kubelet server.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Disable Anonymous Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"anonymous\": { \"enabled\": false } }\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--anonymous-auth=false\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.2 Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "controlID": "C-0173", + "creationTime": "", + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Enable Webhook Authentication by setting the following parameter:\n\n \n```\n\"authentication\": { \"webhook\": { \"enabled\": true } }\n\n```\n Next, set the Authorization Mode to `Webhook` by setting the following parameter:\n\n \n```\n\"authorization\": { \"mode\": \"Webhook }\n\n```\n Finer detail of the `authentication` and `authorization` fields can be found in the [Kubelet Configuration documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--authentication-token-webhook\n--authorization-mode=Webhook\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.3 Ensure that a Client CA File is Configured", + "controlID": "C-0174", + "creationTime": "", + "description": "Enable Kubelet authentication using certificates.", + "remediation": "**Remediation Method 1:**\n\n If configuring via the Kubelet config file, you first need to locate the file.\n\n To do this, SSH to each node and execute the following command to find the kubelet process:\n\n \n```\nps -ef | grep kubelet\n\n```\n The output of the above command provides details of the active kubelet process, from which we can see the location of the configuration file provided to the kubelet service with the `--config` argument. The file can be viewed with a command such as `more` or `less`, like so:\n\n \n```\nsudo less /path/to/kubelet-config.json\n\n```\n Configure the client certificate authority file by setting the following parameter appropriately:\n\n \n```\n\"authentication\": { \"x509\": {\"clientCAFile\": } }\"\n\n```\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file on each worker node and ensure the below parameters are part of the `KUBELET_ARGS` variable string.\n\n For systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, then this file can be found at `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf`. Otherwise, you may need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\n--client-ca-file=\n\n```\n **For Both Remediation Steps:**\n\n Based on your system, restart the `kubelet` service and check the service status.\n\n The following example is for operating systems using `systemd`, such as the Amazon EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the `systemctl` command. If `systemctl` is not available then you will need to look up documentation for your chosen operating system to determine which service manager is configured:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.4 Ensure that the --read-only-port is disabled", + "controlID": "C-0175", + "creationTime": "", + "description": "Disable the read-only port.", + "remediation": "If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to 0\n\n \n```\n\"readOnlyPort\": 0\n\n```\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--read-only-port=0\n\n```\n For each remediation:\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.5 Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "controlID": "C-0176", + "creationTime": "", + "description": "Do not disable timeouts on streaming connections.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to a non-zero value in the format of #h#m#s\n\n \n```\n\"streamingConnectionIdleTimeout\": \"4h0m0s\"\n\n```\n You should ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not specify a `--streaming-connection-idle-timeout` argument because it would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--streaming-connection-idle-timeout=4h0m0s\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"streamingConnectionIdleTimeout\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.6 Ensure that the --make-iptables-util-chains argument is set to true", + "controlID": "C-0178", + "creationTime": "", + "description": "Allow Kubelet to manage iptables.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"makeIPTablesUtilChains\": true\n\n```\n Ensure that `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--make-iptables-util-chains` argument because that would override your Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--make-iptables-util-chains:true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"makeIPTablesUtilChains.: true` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediations:**\nBased on your system, restart the `kubelet` service and check status\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.7 Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture", + "controlID": "C-0180", + "creationTime": "", + "description": "Security relevant information should be captured. The eventRecordQPS on the Kubelet configuration can be used to limit the rate at which events are gathered and sets the maximum event creations per second. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.8 Ensure that the --rotate-certificates argument is not present or is set to true", + "controlID": "C-0181", + "creationTime": "", + "description": "Enable kubelet client certificate rotation.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"RotateCertificate\":true\n\n```\n Additionally, ensure that the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--RotateCertificate=true\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-3.2.9 Ensure that the RotateKubeletServerCertificate argument is set to true", + "controlID": "C-0183", + "creationTime": "", + "description": "Enable kubelet server certificate rotation.", + "remediation": "**Remediation Method 1:**\n\n If modifying the Kubelet config file, edit the kubelet-config.json file `/etc/kubernetes/kubelet/kubelet-config.json` and set the below parameter to true\n\n \n```\n\"featureGates\": {\n \"RotateKubeletServerCertificate\":true\n},\n\n```\n Additionally, ensure that the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` does not set the `--rotate-kubelet-server-certificate` executable argument to false because this would override the Kubelet config file.\n\n **Remediation Method 2:**\n\n If using executable arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubelet-args.conf` on each worker node and add the below parameter at the end of the `KUBELET_ARGS` variable string.\n\n \n```\n--rotate-kubelet-server-certificate=true\n\n```\n **Remediation Method 3:**\n\n If using the api configz endpoint consider searching for the status of `\"RotateKubeletServerCertificate\":` by extracting the live configuration from the nodes running kubelet.\n\n \\*\\*See detailed step-by-step configmap procedures in [Reconfigure a Node's Kubelet in a Live Cluster](https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), and then rerun the curl statement from audit process to check for kubelet configuration changes\n\n \n```\nkubectl proxy --port=8001 &\n\nexport HOSTNAME_PORT=localhost:8001 (example host and port number)\nexport NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from \"kubectl get nodes\")\n\ncurl -sSL \"http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz\"\n\n```\n **For all three remediation methods:**\nRestart the `kubelet` service and check status. The example below is for when using systemctl to manage services:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\nsystemctl status kubelet -l\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CIS-4.1.1 Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", + "creationTime": "", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "remediation": "Identify all ClusterRoleBindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the ClusterRoleBinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.2 Minimize access to secrets", + "controlID": "C-0186", + "creationTime": "", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.3 Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "creationTime": "", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.4 Minimize access to create pods", + "controlID": "C-0188", + "creationTime": "", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.1.5 Ensure that default service accounts are not actively used.", + "controlID": "C-0189", + "creationTime": "", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```\n Automatic remediation for the default account:\n\n `kubectl patch serviceaccount default -p $'automountServiceAccountToken: false'`", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.1.6 Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "creationTime": "", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "remediation": "Regularly review pod and service account objects in the cluster to ensure that the `automountServiceAccountToken` setting is `false` for pods and accounts that do not explicitly require API server access.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.1.8 Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "creationTime": "", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CIS-4.2.1 Minimize the admission of privileged containers", + "controlID": "C-0193", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.\n\n To enable PSA for a namespace in your cluster, set the pod-security.kubernetes.io/enforce label with the policy value you want to enforce.\n\n `kubectl label --overwrite ns NAMESPACE pod-security.kubernetes.io/enforce=restricted`\n\n The above command enforces the restricted policy for the NAMESPACE namespace.\n\n You can also enable Pod Security Admission for all your namespaces. For example:\n\n \n```\nkubectl label --overwrite ns --all pod-security.kubernetes.io/warn=baseline\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.2.2 Minimize the admission of containers wishing to share the host process ID namespace", + "controlID": "C-0194", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.2.3 Minimize the admission of containers wishing to share the host IPC namespace", + "controlID": "C-0195", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.2.4 Minimize the admission of containers wishing to share the host network namespace", + "controlID": "C-0196", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.2.5 Minimize the admission of containers with allowPrivilegeEscalation", + "controlID": "C-0197", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to `true`. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with `.spec.allowPrivilegeEscalation` set to `true`.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.3.1 Ensure CNI plugin supports network policies.", + "controlID": "C-0205", + "creationTime": "", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "As with RBAC policies, network policies should adhere to the policy of least privileged access. Start by creating a deny all policy that restricts all inbound and outbound traffic from a namespace or create a global policy using Calico.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "CIS-4.3.2 Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "creationTime": "", + "description": "Use network policies to isolate traffic in your cluster network.", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "CIS-4.4.1 Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "creationTime": "", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + } + } + }, + { + "guid": "", + "name": "CIS-4.5.1 Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "creationTime": "", + "description": "Use namespaces to isolate your Kubernetes objects.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-4.5.2 The default namespace should not be used", + "controlID": "C-0212", + "creationTime": "", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CIS-5.1.1 Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", + "controlID": "C-0221", + "creationTime": "", + "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", + "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.\n\n 1. Open the Amazon ECR console at .\n2. From the navigation bar, choose the Region to create your repository in.\n3. In the navigation pane, choose Repositories.\n4. On the Repositories page, choose the repository that contains the image to scan.\n5. On the Images page, select the image to scan and then choose Scan.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "CIS-5.1.2 Minimize user access to Amazon ECR", + "controlID": "C-0222", + "creationTime": "", + "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", + "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a `Resource` or a `NotResource` element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.1.3 Minimize cluster access to read-only for Amazon ECR", + "controlID": "C-0223", + "creationTime": "", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", + "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cloud" + ] + } + }, + { + "guid": "", + "name": "CIS-5.2.1 Prefer using dedicated EKS Service Accounts", + "controlID": "C-0225", + "creationTime": "", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", + "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance’s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod’s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege — By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation — A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability — Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.1 Restrict Access to the Control Plane Endpoint", + "controlID": "C-0227", + "creationTime": "", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=true, publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cloud" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.2 Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "controlID": "C-0228", + "creationTime": "", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.3 Ensure clusters are created with Private Nodes", + "controlID": "C-0229", + "creationTime": "", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "remediation": "To disable public IP addresses for EKS nodegroup nodes using the AWS CLI, you must ensure the following when running create-nodegroup:\n\n * Use private subnets (that don't auto-assign public IPs).\n* Set associatePublicIpAddress to false.\n\n \n```\n\"NetworkInterfaces\": [{\n \"AssociatePublicIpAddress\": false\n}]\n\n```\n You can restrict access to the control plane endpoint using:\n\n \n```\naws eks update-cluster-config \\\n --name \\\n --region \\\n --resources-vpc-config endpointPublicAccess=false, endpointPrivateAccess=true\n\n```\n This makes the API server private, but does not affect node IPs.\n\n To ensure nodes use only private IPs:\n\n * Use aws eks create-nodegroup with only private subnets, or\n* Use a launch template with AssociatePublicIpAddress=false.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.4 Ensure Network Policy is Enabled and set as appropriate", + "controlID": "C-0230", + "creationTime": "", + "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux iptables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", + "remediation": "Utilize Calico or other network policy engine to segment and isolate your traffic.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.4.5 Encrypt traffic to HTTPS load balancers with TLS certificates", + "controlID": "C-0231", + "creationTime": "", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "remediation": "Your load balancer vendor can provide details on configuring HTTPS with TLS.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-5.5.1 Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 or greater", + "controlID": "C-0232", + "creationTime": "", + "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", + "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-4.4.2 Consider external secret storage", + "controlID": "C-0234", + "creationTime": "", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "CIS-3.1.3 Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "controlID": "C-0235", + "creationTime": "", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/config.json\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-3.1.1 Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "controlID": "C-0238", + "creationTime": "", + "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "CIS-4.1.7 Cluster Access Manager API to streamline and enhance the management of access controls within EKS clusters", + "attributes": { + "armoBuiltin": true + }, + "id": "CIS-4.1.7", + "controlID": "C-0285", + "creationTime": "", + "description": "Amazon EKS has introduced the Cluster Access Manager API to streamline and enhance the management of access controls within EKS clusters. This new approach is now the recommended method over the traditional `aws-auth` ConfigMap for managing Role-Based Access Control (RBAC) and Service Accounts.\n\n Key Advantages of Using the Cluster Access Manager API:\n\n 1. **Simplified Access Management:** The Cluster Access Manager API allows administrators to manage access directly through the Amazon EKS API, eliminating the need to modify the aws-auth ConfigMap manually. This reduces operational overhead and minimizes the risk of misconfigurations.\n2. **Enhanced Security Controls:** With this API, administrators can assign predefined AWS-managed Kubernetes permissions, known as \"access policies,\" to IAM principals. This provides a more secure and auditable way to manage permissions compared to manual ConfigMap edits.\n3. **Improved Visibility and Auditing:** The API offers better visibility into cluster access configurations, facilitating easier auditing and compliance checks. Administrators can list and describe access entries and policies directly through the EKS API.", + "remediation": "Log in to the AWS Management Console.\n\n Navigate to Amazon EKS and select your EKS cluster.\n\n Go to the Access tab and click on \"Manage Access\" in the \"Access Configuration section\".\n\n Under Cluster Authentication Mode for Cluster Access settings.\n\n * Click `EKS API` to change `cluster will source authenticated IAM principals only from EKS access entry APIs`.\n* Click `ConfigMap` to change `cluster will source authenticated IAM principals only from the aws-auth ConfigMap`.\n* Note: `EKS API and ConfigMap` must be selected during Cluster creation and cannot be changed once the Cluster is provisioned.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + } + ], + "controlsIDs": [ + "C-0066", + "C-0067", + "C-0078", + "C-0167", + "C-0171", + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0178", + "C-0180", + "C-0181", + "C-0183", + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0193", + "C-0194", + "C-0195", + "C-0196", + "C-0197", + "C-0205", + "C-0206", + "C-0207", + "C-0209", + "C-0212", + "C-0221", + "C-0222", + "C-0223", + "C-0225", + "C-0227", + "C-0228", + "C-0229", + "C-0230", + "C-0231", + "C-0232", + "C-0234", + "C-0235", + "C-0238", + "C-0285" + ], + "subSections": { + "2": { + "guid": "", + "name": "Control Plane Configuration", + "id": "2", + "subSections": { + "1": { + "guid": "", + "name": "Logging", + "id": "2.1", + "controlsIDs": [ + "C-0067" + ] + } + } + }, + "3": { + "guid": "", + "name": "Worker Nodes", + "id": "3", + "subSections": { + "1": { + "guid": "", + "name": "Worker Node Configuration Files", + "id": "3.1", + "controlsIDs": [ + "C-0167", + "C-0171", + "C-0235", + "C-0238" + ] + }, + "2": { + "guid": "", + "name": "Kubelet", + "id": "3.2", + "controlsIDs": [ + "C-0172", + "C-0173", + "C-0174", + "C-0175", + "C-0176", + "C-0178", + "C-0180", + "C-0181", + "C-0183" + ] + } + } + }, + "4": { + "guid": "", + "name": "Policies", + "id": "4", + "subSections": { + "5": { + "guid": "", + "name": "General Policies", + "id": "4.5", + "controlsIDs": [ + "C-0209", + "C-0212" + ] + }, + "1": { + "guid": "", + "name": "RBAC and Service Accounts", + "id": "4.1", + "controlsIDs": [ + "C-0185", + "C-0186", + "C-0187", + "C-0188", + "C-0189", + "C-0190", + "C-0191", + "C-0285" + ] + }, + "2": { + "guid": "", + "name": "Pod Security Standards", + "id": "4.2", + "controlsIDs": [ + "C-0193", + "C-0194", + "C-0195", + "C-0196", + "C-0197" + ] + }, + "3": { + "guid": "", + "name": "CNI Plugin", + "id": "4.3", + "controlsIDs": [ + "C-0205", + "C-0206" + ] + }, + "4": { + "guid": "", + "name": "Secrets Management", + "id": "4.4", + "controlsIDs": [ + "C-0207", + "C-0234" + ] + } + } + }, + "5": { + "guid": "", + "name": "Managed services", + "id": "5", + "subSections": { + "5": { + "guid": "", + "name": "Authentication and Authorization", + "id": "5.5", + "controlsIDs": [ + "C-0232" + ] + }, + "1": { + "guid": "", + "name": "Image Registry and Image Scanning", + "id": "5.1", + "controlsIDs": [ + "C-0078", + "C-0221", + "C-0222", + "C-0223" + ] + }, + "2": { + "guid": "", + "name": "Identity and Access Management (IAM)", + "id": "5.2", + "controlsIDs": [ + "C-0225" + ] + }, + "3": { + "guid": "", + "name": "AWS EKS Key Management Service", + "id": "5.3", + "controlsIDs": [ + "C-0066" + ] + }, + "4": { + "guid": "", + "name": "Cluster Networking", + "id": "5.4", + "controlsIDs": [ + "C-0227", + "C-0228", + "C-0229", + "C-0230", + "C-0231" + ] + } + } + } + }, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "WorkloadScan", + "attributes": { + "builtin": true + }, + "creationTime": "", + "description": "Framework for scanning a workload", + "typeTags": [ + "security" + ], + "controls": [ + { + "guid": "", + "name": "Images from allowed registry", + "attributes": { + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ], + "actionRequired": "configuration" + }, + "controlID": "C-0078", + "creationTime": "", + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "Writable hostPath mount", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ], + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ] + }, + "controlID": "C-0045", + "creationTime": "", + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } + }, + { + "guid": "", + "name": "HostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" ] } ] }, - "controlID": "C-0009", + "controlID": "C-0048", "creationTime": "", - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } + }, + { + "guid": "", + "name": "Workload with PVC access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } + ] + }, + "controlID": "C-0257", + "creationTime": "", + "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } + }, + { + "guid": "", + "name": "Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "creationTime": "", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + } + } + }, + { + "guid": "", + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "controlID": "C-0034", + "creationTime": "", + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } }, { "guid": "", "name": "Applications credentials in configuration files", "attributes": { - "armoBuiltin": true, + "actionRequired": "configuration", "microsoftMitreColumns": [ "Credential access", "Lateral Movement" @@ -6002,20 +16442,6 @@ "security", "compliance", "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - }, - { - "attackTrack": "container", - "categories": [ - "Credential access" - ] - } ] }, "controlID": "C-0012", @@ -6023,49 +16449,225 @@ "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } + }, + { + "guid": "", + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "controlID": "C-0041", + "creationTime": "", + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Network", + "id": "Cat-4" + } + } + }, + { + "guid": "", + "name": "Missing network policy", + "attributes": { + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ], + "isFixedByNetworkPolicy": true, + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0260", + "creationTime": "", + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "controlID": "C-0044", + "creationTime": "", + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0038", + "creationTime": "", + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Insecure capabilities", + "attributes": { + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ], + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "controlID": "C-0046", + "creationTime": "", + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", "name": "Non-root containers", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } ] }, "controlID": "C-0013", "creationTime": "", - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", "name": "Allow privilege escalation", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } + "compliance", + "smartRemediation" ] }, "controlID": "C-0016", @@ -6073,22 +16675,1093 @@ "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", "name": "Immutable container filesystem", "attributes": { - "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "categories": [ + "Persistence" + ], + "attackTrack": "workload-external-track" + } + ] + }, + "controlID": "C-0017", + "creationTime": "", + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Linux hardening", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0055", + "creationTime": "", + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "controlID": "C-0057", + "creationTime": "", + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "controlID": "C-0270", + "creationTime": "", + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "controlID": "C-0271", + "creationTime": "", + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + } + ], + "controlsIDs": [ + "C-0078", + "C-0045", + "C-0048", + "C-0257", + "C-0207", + "C-0034", + "C-0012", + "C-0041", + "C-0260", + "C-0044", + "C-0038", + "C-0046", + "C-0013", + "C-0016", + "C-0017", + "C-0055", + "C-0057", + "C-0270", + "C-0271" + ] + }, + { + "guid": "", + "name": "ClusterScan", + "attributes": { + "builtin": true + }, + "creationTime": "", + "description": "Framework for scanning a cluster", + "typeTags": [ + "security" + ], + "controls": [ + { + "guid": "", + "name": "Secret/etcd encryption enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0066", + "creationTime": "", + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "RBAC enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0088", + "creationTime": "", + "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0067", + "creationTime": "", + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0005", + "creationTime": "", + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "rules": [], + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Anonymous access enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0262", + "creationTime": "", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "Authenticated user has sensitive permissions", + "controlID": "C-0265", + "creationTime": "", + "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "List Kubernetes secrets", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "rbacQuery": "Show who can access secrets", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "controlID": "C-0015", + "creationTime": "", + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Prevent containers from allowing command execution", + "attributes": { + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ], + "microsoftMitreColumns": [ + "Execution" + ] + }, + "controlID": "C-0002", + "creationTime": "", + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using “kubectl exec” command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit “kubectl exec” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Roles with delete capabilities", + "attributes": { + "microsoftMitreColumns": [ + "Impact" + ], + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ] + }, + "controlID": "C-0007", + "creationTime": "", + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Portforwarding privileges", + "attributes": { + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "controlID": "C-0063", + "creationTime": "", + "description": "Attackers with relevant RBAC permission can use “kubectl portforward” command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit “kubectl portforward” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Validate admission controller (validating)", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0036", + "creationTime": "", + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Validate admission controller (mutating)", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0039", + "creationTime": "", + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0035", + "creationTime": "", + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Minimize access to create pods", + "controlID": "C-0188", + "creationTime": "", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "creationTime": "", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Applications credentials in configuration files", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ], + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ] + }, + "controlID": "C-0012", + "creationTime": "", + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } + }, + { + "guid": "", + "name": "Missing network policy", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ], + "isFixedByNetworkPolicy": true + }, + "controlID": "C-0260", + "creationTime": "", + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "External facing", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "service-destruction", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-database-without-authentication", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "workload-unauthenticated-service", + "categories": [ + "Initial Access" + ] + } + ] + }, + "controlID": "C-0256", + "creationTime": "", + "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0038", + "creationTime": "", + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "HostNetwork access", + "attributes": { "controlTypeTags": [ "security", "compliance" ], "attackTracks": [ { - "attackTrack": "container", + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "controlID": "C-0041", + "creationTime": "", + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Network", + "id": "Cat-4" + } + } + }, + { + "guid": "", + "name": "HostPath mount", + "attributes": { + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ], + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "controlID": "C-0048", + "creationTime": "", + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } + }, + { + "guid": "", + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "controlID": "C-0057", + "creationTime": "", + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0013", + "creationTime": "", + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + } + ], + "controlsIDs": [ + "C-0066", + "C-0088", + "C-0067", + "C-0005", + "C-0262", + "C-0265", + "C-0015", + "C-0002", + "C-0007", + "C-0063", + "C-0036", + "C-0039", + "C-0035", + "C-0188", + "C-0187", + "C-0012", + "C-0260", + "C-0256", + "C-0038", + "C-0041", + "C-0048", + "C-0057", + "C-0013" + ] + }, + { + "guid": "", + "name": "security", + "attributes": { + "builtin": true + }, + "creationTime": "", + "description": "Controls that are used to assess security threats.", + "typeTags": [ + "security" + ], + "controls": [ + { + "guid": "", + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0005", + "creationTime": "", + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "rules": [], + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Applications credentials in configuration files", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Credential access", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "security-impact" + ] + }, + "controlID": "C-0012", + "creationTime": "", + "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } + }, + { + "guid": "", + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0013", + "creationTime": "", + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "controlID": "C-0016", + "creationTime": "", + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", "categories": [ - "Execution", "Persistence" ] } @@ -6097,25 +17770,23 @@ "controlID": "C-0017", "creationTime": "", "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Ingress and Egress blocked", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance" + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" ] }, - "controlID": "C-0030", - "creationTime": "", - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "rules": [], - "baseScore": 6 + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", @@ -6123,47 +17794,38 @@ "attributes": { "controlTypeTags": [ "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ], - "armoBuiltin": true + "compliance", + "smartRemediation" + ] }, "controlID": "C-0034", "creationTime": "", - "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } }, { "guid": "", - "name": "Cluster-admin binding", + "name": "Administrative Roles", "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], "rbacQuery": "Show cluster_admin", "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ], - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" ] }, "controlID": "C-0035", @@ -6171,77 +17833,94 @@ "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", "name": "Host PID/IPC privileges", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } ] }, "controlID": "C-0038", "creationTime": "", - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", "name": "HostNetwork access", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" ], "attackTracks": [ { - "attackTrack": "container", + "attackTrack": "workload-external-track", "categories": [ - "Discovery", - "Lateral movement", - "Impact - service access" + "Lateral Movement (Network)" ] } ] }, "controlID": "C-0041", "creationTime": "", - "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", - "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Network", + "id": "Cat-4" + } + } }, { "guid": "", "name": "Container hostPort", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance", "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } ] }, "controlID": "C-0044", @@ -6249,105 +17928,155 @@ "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Writable hostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "controlID": "C-0045", + "creationTime": "", + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } }, { "guid": "", "name": "Insecure capabilities", "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], "attackTracks": [ { - "attackTrack": "container", + "attackTrack": "workload-external-track", "categories": [ - "Privilege escalation" + "Privilege Escalation (Node)" ] } + ], + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" ] }, "controlID": "C-0046", "creationTime": "", - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", "remediation": "Remove all insecure capabilities which are not necessary for the container.", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", - "name": "Cluster internal networking", + "name": "HostPath mount", "attributes": { - "armoBuiltin": true, + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ], "microsoftMitreColumns": [ - "Lateral movement" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement" - ] - } + "Privilege escalation" ] }, - "controlID": "C-0054", + "controlID": "C-0048", "creationTime": "", - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Linux hardening", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" ] }, - "controlID": "C-0055", - "creationTime": "", - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "rules": [], - "baseScore": 4 + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } }, { "guid": "", "name": "Privileged container", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Privilege escalation" ], "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } + "security", + "smartRemediation" ] }, "controlID": "C-0057", @@ -6355,76 +18084,29 @@ "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" ] }, - "controlID": "C-0058", - "creationTime": "", - "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", - "rules": [], - "baseScore": 6 + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", - "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", + "name": "Secret/etcd encryption enabled", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "controlID": "C-0059", - "creationTime": "", - "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", - "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Secret/ETCD encryption enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } ] }, "controlID": "C-0066", @@ -6432,74 +18114,24 @@ "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Audit logs enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" ] }, - "controlID": "C-0067", - "creationTime": "", - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "PSP enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "categories": [ - "Impact - service injection" - ], - "attackTrack": "kubeapi" - } - ] - }, - "controlID": "C-0068", - "creationTime": "", - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "rules": [], - "baseScore": 1 + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "Disable anonymous access to Kubelet service", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } ] }, "controlID": "C-0069", @@ -6507,24 +18139,24 @@ "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", "remediation": "Start the kubelet with the --anonymous-auth=false flag.", "rules": [], - "baseScore": 10 + "baseScore": 10, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "Enforce Kubelet client TLS authentication", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } ] }, "controlID": "C-0070", @@ -6532,329 +18164,2162 @@ "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", "rules": [], - "baseScore": 9 + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Container runtime socket mounted", + "attributes": { + "controlTypeTags": [ + "devops", + "smartRemediation" + ] + }, + "controlID": "C-0074", + "creationTime": "", + "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", + "remediation": "Remove container runtime socket mount request or define an exception.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Apply Security Context to Your Pods and Containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "controlID": "C-0211", + "creationTime": "", + "description": "Apply Security Context to Your Pods and Containers", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Workload with secret access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Secret Access" + ] + } + ] + }, + "controlID": "C-0255", + "creationTime": "", + "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "External facing", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "service-destruction", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + }, + { + "categories": [ + "Initial Access" + ], + "attackTrack": "external-database-without-authentication" + }, + { + "attackTrack": "workload-unauthenticated-service", + "categories": [ + "Initial Access" + ] + } + ] + }, + "controlID": "C-0256", + "creationTime": "", + "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "Workload with PVC access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } + ] + }, + "controlID": "C-0257", + "creationTime": "", + "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } + }, + { + "guid": "", + "name": "Workload with configMap access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } + ] + }, + "controlID": "C-0258", + "creationTime": "", + "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Workload with credential access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "controlID": "C-0259", + "creationTime": "", + "description": "This control checks if workloads specifications have sensitive information in their environment variables.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Missing network policy", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "categories": [ + "Lateral Movement (Network)" + ], + "attackTrack": "workload-external-track" + } + ], + "isFixedByNetworkPolicy": true + }, + "controlID": "C-0260", + "creationTime": "", + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "ServiceAccount token mounted", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "controlID": "C-0261", + "creationTime": "", + "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", + "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Anonymous access enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0262", + "creationTime": "", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "PersistentVolume without encyption", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0264", + "creationTime": "", + "description": "This control detects PersistentVolumes without encyption", + "remediation": "Enable encryption on the PersistentVolume using the configuration in StorageClass", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "Authenticated user has sensitive permissions", + "controlID": "C-0265", + "creationTime": "", + "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "Workload with cluster takeover roles", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "displayRelatedResources": true, + "clickableResourceKind": "ServiceAccount", + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Cluster Access" + ] + } + ] + }, + "controlID": "C-0267", + "creationTime": "", + "description": "Cluster takeover roles include workload creation or update and secret access. They can easily lead to super privileges in the cluster. If an attacker can exploit this workload then the attacker can take over the cluster using the RBAC privileges this workload is assigned to.", + "remediation": "You should apply least privilege principle. Make sure each service account has only the permissions that are absolutely necessary.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "controlID": "C-0270", + "creationTime": "", + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "Ensure memory limits are set", + "attributes": { + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ], + "controlTypeTags": [ + "compliance", + "devops", + "security" + ] + }, + "controlID": "C-0271", + "creationTime": "", + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "Workload with administrative roles", + "controlID": "C-0272", + "creationTime": "", + "description": "This control identifies workloads where the associated service accounts have roles that grant administrative-level access across the cluster. Granting a workload such expansive permissions equates to providing it cluster admin roles. This level of access can pose a significant security risk, as it allows the workload to perform any action on any resource, potentially leading to unauthorized data access or cluster modifications.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use service accounts with such high permissions for daily operations.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Outdated Kubernetes version", + "controlID": "C-0273", + "creationTime": "", + "description": "Identifies Kubernetes clusters running on outdated versions. Using old versions can expose clusters to known vulnerabilities, compatibility issues, and miss out on improved features and security patches. Keeping Kubernetes up-to-date is crucial for maintaining security and operational efficiency.", + "remediation": "Regularly update Kubernetes clusters to the latest stable version to mitigate known vulnerabilities and enhance functionality. Plan and execute upgrades considering workload compatibility, testing in a staging environment before applying changes to production. Follow Kubernetes' best practices for version management and upgrades to ensure a smooth transition and minimal downtime.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Exposure to internet via Gateway API", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "categories": [ + "Initial Access" + ], + "attackTrack": "workload-external-track" + }, + { + "attackTrack": "service-destruction", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "workload-unauthenticated-service", + "categories": [ + "Initial Access" + ] + } + ] + }, + "controlID": "C-0266", + "creationTime": "", + "description": "This control detect workloads that are exposed on Internet through a Gateway API (HTTPRoute,TCPRoute, UDPRoute) or Istio Gateway. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "Verify Authenticated Service", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-unauthenticated-service", + "categories": [ + "Execution" + ] + } + ] + }, + "controlID": "C-0274", + "creationTime": "", + "description": "Verifies if the service is authenticated", + "remediation": "Configure the service to require authentication.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } } ], "controlsIDs": [ - "C-0002", "C-0005", - "C-0009", "C-0012", "C-0013", "C-0016", "C-0017", - "C-0030", "C-0034", "C-0035", "C-0038", "C-0041", "C-0044", + "C-0045", "C-0046", - "C-0054", - "C-0055", + "C-0048", "C-0057", - "C-0058", - "C-0059", "C-0066", - "C-0067", - "C-0068", "C-0069", - "C-0070" - ] + "C-0070", + "C-0074", + "C-0211", + "C-0255", + "C-0256", + "C-0257", + "C-0258", + "C-0259", + "C-0260", + "C-0261", + "C-0262", + "C-0264", + "C-0265", + "C-0267", + "C-0270", + "C-0271", + "C-0272", + "C-0273", + "C-0266", + "C-0274" + ], + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } } ], "Controls": [ { "guid": "", - "name": "Ensure that the admission control plugin NamespaceLifecycle is set", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0126", + "name": "Ensure that the API Server --request-timeout argument is set as appropriate", + "controlID": "C-0134", "creationTime": "", - "description": "Reject creating objects in a namespace that is undergoing termination.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", + "description": "Set global request timeout for API server requests as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", "rules": [], - "baseScore": 3 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "Ensure that the --client-cert-auth argument is set to true", + "name": "Delete Kubernetes events", "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0154", - "creationTime": "", - "description": "Enable client authentication on etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Ensure that the API Server --client-ca-file argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0139", - "creationTime": "", - "description": "Setup TLS connection on the API server.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Ensure that the --cert-file and --key-file arguments are set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0153", - "creationTime": "", - "description": "Configure TLS encryption for the etcd service.", - "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0098", - "creationTime": "", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0116", - "creationTime": "", - "description": "Enable certificate based kubelet authentication.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Workloads with RCE vulnerabilities exposed to external traffic", - "attributes": { - "armoBuiltin": true, + "microsoftMitreColumns": [ + "Defense evasion" + ], + "rbacQuery": "Show who can delete k8s events", "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } ] }, - "controlID": "C-0084", + "controlID": "C-0031", "creationTime": "", - "description": "Container images with known Remote Code Execution (RCE) vulnerabilities pose significantly higher risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if their POD has either LoadBalancer or NodePort service.", - "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the POD to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", + "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", + "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", "rules": [], - "baseScore": 8 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", - "name": "Resource limits", + "name": "Prefer using secrets as files over secrets as environment variables", + "controlID": "C-0207", + "creationTime": "", + "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Secrets", + "id": "Cat-3" + } + } + }, + { + "guid": "", + "name": "Minimize cluster access to read-only for Azure Container Registry (ACR)", + "controlID": "C-0250", + "creationTime": "", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Azure Container Registry (ACR)", + "remediation": "", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "Prefer using a container-optimized OS when possible", + "controlID": "C-0226", + "creationTime": "", + "description": "A container-optimized OS is an operating system image that is designed for secure managed hosting of containers on compute instances.\n\n Use cases for container-optimized OSes might include:\n\n * Docker container or Kubernetes support with minimal setup.\n* A small-secure container footprint.\n* An OS that is tested, hardened and verified for running Kubernetes nodes in your compute instances.", + "remediation": "", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "Verify Authenticated Service", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ], "attackTracks": [ { - "attackTrack": "container", + "attackTrack": "workload-unauthenticated-service", "categories": [ - "Impact - service destruction" + "Execution" ] } ] }, - "controlID": "C-0009", + "controlID": "C-0274", "creationTime": "", - "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", - "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.", + "description": "Verifies if the service is authenticated", + "remediation": "Configure the service to require authentication.", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } }, { "guid": "", - "name": "Ensure that the controller-manager.conf file ownership is set to root:root", + "name": "Access Kubernetes dashboard", "attributes": { - "armoBuiltin": true + "microsoftMitreColumns": [ + "Discovery", + "Lateral Movement" + ], + "rbacQuery": "Access k8s Dashboard", + "controlTypeTags": [ + "compliance" + ] }, - "controlID": "C-0109", + "controlID": "C-0014", "creationTime": "", - "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", + "remediation": "Make sure that the “Kubernetes Dashboard” service account is only bound to the Kubernetes dashboard following the least privilege principle.", "rules": [], - "baseScore": 6 + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Ensure Network Policy is Enabled and set as appropriate", + "controlID": "C-0240", + "creationTime": "", + "description": "When you run modern, microservices-based applications in Kubernetes, you often want to control which components can communicate with each other. The principle of least privilege should be applied to how traffic can flow between pods in an Azure Kubernetes Service (AKS) cluster. Let's say you likely want to block traffic directly to back-end applications. The Network Policy feature in Kubernetes lets you define rules for ingress and egress traffic between pods in a cluster.", + "remediation": "", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", + "controlID": "C-0156", + "creationTime": "", + "description": "etcd should be configured to make use of TLS encryption for peer connections.", + "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Workload with PVC access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } + ] + }, + "controlID": "C-0257", + "creationTime": "", + "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } + }, + { + "guid": "", + "name": "Pods in default namespace", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "controlID": "C-0061", + "creationTime": "", + "description": "It is recommended to avoid running pods in cluster without explicit namespace assignment. This control identifies all the pods running in the default namespace.", + "remediation": "Create necessary namespaces and move all the pods from default namespace there.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Verify that the RotateKubeletServerCertificate argument is set to true", + "controlID": "C-0183", + "creationTime": "", + "description": "Enable kubelet server certificate rotation.", + "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the admin.conf file permissions are set to 600", + "controlID": "C-0104", + "creationTime": "", + "description": "Ensure that the `admin.conf` file has permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", + "controlID": "C-0170", + "creationTime": "", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", + "controlID": "C-0147", + "creationTime": "", + "description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the scheduler.conf file ownership is set to root:root", + "controlID": "C-0107", + "creationTime": "", + "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "controlID": "C-0270", + "creationTime": "", + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "Minimize wildcard use in Roles and ClusterRoles", + "controlID": "C-0187", + "creationTime": "", + "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", + "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Workloads with Critical vulnerabilities exposed to external traffic", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0083", + "creationTime": "", + "description": "Container images with known critical vulnerabilities pose elevated risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if either LoadBalancer or NodePort service is assigned to them.", + "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the pod to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CVE-2022-23648-containerd-fs-escape", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0087", + "creationTime": "", + "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using specially-crafted manifests", + "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the admission control plugin AlwaysAdmit is not set", + "controlID": "C-0122", + "creationTime": "", + "description": "Do not allow all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Portforwarding privileges", + "attributes": { + "rbacQuery": "Port Forwarding", + "controlTypeTags": [ + "security-impact", + "compliance" + ] + }, + "controlID": "C-0063", + "creationTime": "", + "description": "Attackers with relevant RBAC permission can use “kubectl portforward” command to establish direct communication with pods from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit “kubectl portforward” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Ensure that a minimal audit policy is created", + "controlID": "C-0160", + "creationTime": "", + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "remediation": "Create an audit policy file for your cluster.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Use Azure RBAC for Kubernetes Authorization.", + "controlID": "C-0241", + "creationTime": "", + "description": "The ability to manage RBAC for Kubernetes resources from Azure gives you the choice to manage RBAC for the cluster resources either using Azure or native Kubernetes mechanisms.", + "remediation": "Set Azure RBAC as access system.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "Anonymous user has RoleBinding", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0262", + "creationTime": "", + "description": "Granting permissions to the system:unauthenticated or system:anonymous user is generally not recommended and can introduce security risks. Allowing unauthenticated access to your Kubernetes cluster can lead to unauthorized access, potential data breaches, and abuse of cluster resources.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that only authenticated and authorized users have appropriate permissions based on their roles and responsibilities within your system.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "Prefer using dedicated AKS Service Accounts", + "controlID": "C-0239", + "creationTime": "", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Azure AKS APIs. Each Kubernetes workload that needs to authenticate to other Azure Web Services using IAM should be provisioned with a dedicated Service account.", + "remediation": "Azure Active Directory integration\nThe security of AKS clusters can be enhanced with the integration of Azure Active Directory (AD). Built on decades of enterprise identity management, Azure AD is a multi-tenant, cloud-based directory, and identity management service that combines core directory services, application access management, and identity protection. With Azure AD, you can integrate on-premises identities into AKS clusters to provide a single source for account management and security.\n\n Azure Active Directory integration with AKS clusters\n\n With Azure AD-integrated AKS clusters, you can grant users or groups access to Kubernetes resources within a namespace or across the cluster. To obtain a kubectl configuration context, a user can run the az aks get-credentials command. When a user then interacts with the AKS cluster with kubectl, they're prompted to sign in with their Azure AD credentials. This approach provides a single source for user account management and password credentials. The user can only access the resources as defined by the cluster administrator.\n\n Azure AD authentication is provided to AKS clusters with OpenID Connect. OpenID Connect is an identity layer built on top of the OAuth 2.0 protocol. For more information on OpenID Connect, see the Open ID connect documentation. From inside of the Kubernetes cluster, Webhook Token Authentication is used to verify authentication tokens. Webhook token authentication is configured and managed as part of the AKS cluster.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "No impersonation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "rbacQuery": "Impersonation" + }, + "controlID": "C-0065", + "creationTime": "", + "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", + "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Ensure that the etcd data directory ownership is set to etcd:etcd", + "controlID": "C-0103", + "creationTime": "", + "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that default service accounts are not actively used", + "controlID": "C-0189", + "creationTime": "", + "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", + "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Disable anonymous access to Kubelet service", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0069", + "creationTime": "", + "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", + "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "rules": [], + "baseScore": 10, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the --service-account-extend-token-expiration parameter is set to false", + "controlID": "C-0290", + "creationTime": "", + "description": "By default Kubernetes extends service account token lifetimes to one year. This should be set to false for security.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n```\n--service-account-extend-token-expiration=false\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize the admission of containers with allowPrivilegeEscalation", + "controlID": "C-0197", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Naked pods", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0073", + "creationTime": "", + "description": "It is not recommended to create pods without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if pods may lead to a configuration drifts and other untracked changes in the system. Such pods won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every pod that does not have corresponding parental object.", + "remediation": "Create necessary Deployment object for every pod making any pod a first class citizen in your IaC architecture.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Minimize access to the service account token creation", + "controlID": "C-0282", + "creationTime": "", + "description": "Users with rights to create new service account tokens at a cluster level, can create long-lived privileged credentials in the cluster. This could allow for privilege escalation and persistent access to the cluster, even if the users account has been revoked.", + "remediation": "Where possible, remove access to the token sub-resource of serviceaccount objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Avoid use of system:masters group", + "attributes": { + "actionRequired": "manual review" + }, + "controlID": "C-0246", + "creationTime": "", + "description": "The special group `system:masters` should not be used to grant permissions to any user or service account, except where strictly necessary (e.g. bootstrapping access prior to RBAC being fully available)", + "remediation": "Remove the `system:masters` group from all users in the cluster.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "Minimize the admission of containers with capabilities assigned", + "controlID": "C-0201", + "creationTime": "", + "description": "Do not generally permit containers with capabilities", + "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the kube-proxy metrics service is bound to localhost", + "controlID": "C-0291", + "creationTime": "", + "description": "Do not bind the kube-proxy metrics port to non-loopback addresses.", + "remediation": "If running kube-proxy with a configuration file, edit the kube-proxy configuration file and set the metricsBindAddress to `127.0.0.1:10249`.\n\nIf running kube-proxy with command line arguments, set `--metrics-bind-address=127.0.0.1:10249`.\n\nRestart kube-proxy for changes to take effect.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize the admission of containers wishing to share the host network namespace", + "controlID": "C-0196", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", + "controlID": "C-0167", + "creationTime": "", + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Audit logs enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0067", + "creationTime": "", + "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", + "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the Kubernetes PKI directory and file ownership is set to root:root", + "controlID": "C-0110", + "creationTime": "", + "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Missing network policy", + "attributes": { + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ], + "isFixedByNetworkPolicy": true, + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0260", + "creationTime": "", + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Automatic mapping of service account", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "controlID": "C-0034", + "creationTime": "", + "description": "Potential attacker may gain access to a pod and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for pods that need to use them.", + "remediation": "Disable automatic mounting of service account tokens to pods either at the service account level or at the individual pod level, by specifying the automountServiceAccountToken: false. Note that pod level takes precedence.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Secrets", + "id": "Cat-3" + } + }, + { + "guid": "", + "name": "Ensure clusters are created with Private Nodes", + "controlID": "C-0229", + "creationTime": "", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "remediation": "\n```\naws eks update-cluster-config \\\n --region region-code \\\n --name my-cluster \\\n --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\",endpointPrivateAccess=true\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "Minimize user access to Azure Container Registry (ACR)", + "controlID": "C-0251", + "creationTime": "", + "description": "Restrict user access to Azure Container Registry (ACR), limiting interaction with build images to only authorized personnel and service accounts.", + "remediation": "Azure Container Registry\nIf you use Azure Container Registry (ACR) as your container image store, you need to grant permissions to the service principal for your AKS cluster to read and pull images. Currently, the recommended configuration is to use the az aks create or az aks update command to integrate with a registry and assign the appropriate role for the service principal. For detailed steps, see Authenticate with Azure Container Registry from Azure Kubernetes Service.\n\n To avoid needing an Owner or Azure account administrator role, you can configure a service principal manually or use an existing service principal to authenticate ACR from AKS. For more information, see ACR authentication with service principals or Authenticate from Kubernetes with a pull secret.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "Apply Security Context to Your Pods and Containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "controlID": "C-0211", + "creationTime": "", + "description": "Apply Security Context to Your Pods and Containers", + "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the admission control plugin NodeRestriction is set", + "controlID": "C-0127", + "creationTime": "", + "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Workloads with RCE vulnerabilities exposed to external traffic", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0084", + "creationTime": "", + "description": "Container images with known Remote Code Execution (RCE) vulnerabilities pose significantly higher risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if their pod has either LoadBalancer or NodePort service.", + "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the pod to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Linux hardening", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0055", + "creationTime": "", + "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", + "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Instance Metadata API", + "attributes": { + "microsoftMitreColumns": [ + "Discovery" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0052", + "creationTime": "", + "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", + "remediation": "Disable metadata services for pods in cloud provider settings.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cloud" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Container hostPort", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops" + ] + }, + "controlID": "C-0044", + "creationTime": "", + "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", + "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } }, { "guid": "", "name": "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, "controlID": "C-0096", "creationTime": "", "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "Cluster internal networking", + "name": "Access container service account", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ - "Lateral movement" + "Credential access" ], + "rbacQuery": "Container service account mapping", "controlTypeTags": [ - "security", - "compliance" - ], + "compliance", + "security-impact" + ] + }, + "controlID": "C-0053", + "creationTime": "", + "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All pods with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", + "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary pods have SA token mounted into them.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", + "controlID": "C-0133", + "creationTime": "", + "description": "Rotate log files on reaching 100 MB or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Workload with ConfigMap access", + "attributes": { "attackTracks": [ { + "attackTrack": "workload-external-track", "categories": [ - "Discovery", - "Lateral movement" - ], - "attackTrack": "container" + "Data Collection" + ] } - ] - }, - "controlID": "C-0054", - "creationTime": "", - "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", - "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Ingress and Egress blocked", - "attributes": { - "controlTypeTags": [ - "compliance" ], - "armoBuiltin": true - }, - "controlID": "C-0030", - "creationTime": "", - "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.", - "remediation": "Define a network policy that restricts ingress and egress connections.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Containers mounting Docker socket", - "attributes": { - "armoBuiltin": true, "controlTypeTags": [ - "devops" + "security" ] }, - "controlID": "C-0074", + "controlID": "C-0258", "creationTime": "", - "description": "Mounting Docker socket (Unix socket) enables container to access Docker internals, retrieve sensitive information and execute Docker commands, if Docker runtime is available. This control identifies PODs that attempt to mount Docker socket for accessing Docker runtime.", - "remediation": "Remove docker socket mount request or define an exception.", + "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } }, { "guid": "", - "name": "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", - "attributes": { - "armoBuiltin": true + "name": "Ensure that the etcd data directory permissions are set to 700 or more restrictive", + "controlID": "C-0102", + "creationTime": "", + "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] }, - "controlID": "C-0191", - "creationTime": "", - "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", - "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", - "rules": [], - "baseScore": 6 + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "Data Destruction", + "name": "Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", + "controlID": "C-0144", + "creationTime": "", + "description": "Activate garbage collector on pod termination, as appropriate.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --service-account-lookup argument is set to true", + "controlID": "C-0135", + "creationTime": "", + "description": "Validate service account before validating token.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize the admission of HostPath volumes", + "controlID": "C-0203", + "creationTime": "", + "description": "Do not generally admit containers which make use of `hostPath` volumes.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "List Kubernetes secrets", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ - "Impact" + "Credential access" ], - "rbacQuery": "Data destruction", + "rbacQuery": "Show who can access secrets", "controlTypeTags": [ + "security-impact", "compliance" ] }, - "controlID": "C-0007", + "controlID": "C-0015", "creationTime": "", - "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", - "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", + "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", "rules": [], - "baseScore": 5 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } }, { "guid": "", - "name": "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0100", + "name": "Ensure that the API Server --client-ca-file argument is set as appropriate", + "controlID": "C-0139", "creationTime": "", - "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "Ensure that the --auto-tls argument is not set to true", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0155", + "name": "Ensure that the cluster-admin role is only used where required", + "controlID": "C-0185", "creationTime": "", - "description": "Do not use self-signed certificates for TLS.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", + "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", + "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Resources memory limit and request", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ], + "actionRequired": "configuration" + }, + "controlID": "C-0004", + "creationTime": "", + "description": "This control identifies all Pods for which the memory limit is not set.", + "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", + "controlID": "C-0124", + "creationTime": "", + "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the --cert-file and --key-file arguments are set as appropriate", + "controlID": "C-0153", + "creationTime": "", + "description": "Configure TLS encryption for the etcd service.", + "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the Controller Manager --root-ca-file argument is set as appropriate", + "controlID": "C-0148", + "creationTime": "", + "description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } ] }, "controlID": "C-0059", @@ -6862,26 +20327,1128 @@ "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)", "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (>= v0.49.1 or >= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the --peer-auto-tls argument is not set to true", + "controlID": "C-0158", + "creationTime": "", + "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Enable audit Logs", + "attributes": { + "actionRequired": "manual review" + }, + "controlID": "C-0254", + "creationTime": "", + "description": "With Azure Kubernetes Service (AKS), the control plane components such as the kube-apiserver and kube-controller-manager are provided as a managed service. You create and manage the nodes that run the kubelet and container runtime, and deploy your applications through the managed Kubernetes API server. To help troubleshoot your application and services, you may need to view the logs generated by these control plane components.\n\n To help collect and review data from multiple sources, Azure Monitor logs provides a query language and analytics engine that provides insights to your environment. A workspace is used to collate and analyze the data, and can integrate with other Azure services such as Application Insights and Security Center.", + "remediation": "Azure audit logs are enabled and managed in the Azure portal. To enable log collection for the Kubernetes master components in your AKS cluster, open the Azure portal in a web browser and complete the following steps:\n\n 1. Select the resource group for your AKS cluster, such as myResourceGroup. Don't select the resource group that contains your individual AKS cluster resources, such as MC\\_myResourceGroup\\_myAKSCluster\\_eastus.\n2. On the left-hand side, choose Diagnostic settings.\n3. Select your AKS cluster, such as myAKSCluster, then choose to Add diagnostic setting.\n4. Enter a name, such as myAKSClusterLogs, then select the option to Send to Log Analytics.\n5. Select an existing workspace or create a new one. If you create a workspace, provide a workspace name, a resource group, and a location.\n6. In the list of available logs, select the logs you wish to enable. For this example, enable the kube-audit and kube-audit-admin logs. Common logs include the kube-apiserver, kube-controller-manager, and kube-scheduler. You can return and change the collected logs once Log Analytics workspaces are enabled.\n7. When ready, select Save to enable collection of the selected logs.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "Ensure that the --anonymous-auth argument is set to false", + "controlID": "C-0172", + "creationTime": "", + "description": "Disable anonymous requests to the Kubelet server.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "Minimize the admission of privileged containers", + "controlID": "C-0213", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.privileged` field is set to `false`.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "Secret/etcd encryption enabled", "attributes": { - "armoBuiltin": true + "controlTypeTags": [ + "security", + "compliance" + ] }, + "controlID": "C-0066", + "creationTime": "", + "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", + "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize the admission of containers with allowPrivilegeEscalation", + "controlID": "C-0217", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.allowPrivilegeEscalation` field is omitted or set to false.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Ensure that the seccomp profile is set to docker/default in your pod definitions", + "controlID": "C-0210", + "creationTime": "", + "description": "Enable `docker/default` seccomp profile in your pod definitions.", + "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CVE-2022-24348-argocddirtraversal", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0081", + "creationTime": "", + "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", + "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "CoreDNS poisoning", + "attributes": { + "microsoftMitreColumns": [ + "Lateral Movement" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "controlID": "C-0037", + "creationTime": "", + "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster’s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", + "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Minimize the admission of containers wishing to share the host process ID namespace", + "controlID": "C-0194", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Minimize the admission of containers wishing to share the host IPC namespace", + "controlID": "C-0195", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", + "controlID": "C-0108", + "creationTime": "", + "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Prefer using dedicated EKS Service Accounts", + "controlID": "C-0225", + "creationTime": "", + "description": "Kubernetes workloads should not use cluster node service accounts to authenticate to Amazon EKS APIs. Each Kubernetes workload that needs to authenticate to other AWS services using AWS IAM should be provisioned with a dedicated Service account.", + "remediation": "With IAM roles for service accounts on Amazon EKS clusters, you can associate an IAM role with a Kubernetes service account. This service account can then provide AWS permissions to the containers in any pod that uses that service account. With this feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs.\n\n Applications must sign their AWS API requests with AWS credentials. This feature provides a strategy for managing credentials for your applications, similar to the way that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances. Instead of creating and distributing your AWS credentials to the containers or using the Amazon EC2 instance’s role, you can associate an IAM role with a Kubernetes service account. The applications in the pod’s containers can then use an AWS SDK or the AWS CLI to make API requests to authorized AWS services.\n\n The IAM roles for service accounts feature provides the following benefits:\n\n * Least privilege — By using the IAM roles for service accounts feature, you no longer need to provide extended permissions to the worker node IAM role so that pods on that node can call AWS APIs. You can scope IAM permissions to a service account, and only pods that use that service account have access to those permissions. This feature also eliminates the need for third-party solutions such as kiam or kube2iam.\n* Credential isolation — A container can only retrieve credentials for the IAM role that is associated with the service account to which it belongs. A container never has access to credentials that are intended for another container that belongs to another pod.\n* Audit-ability — Access and event logging is available through CloudTrail to help ensure retrospective auditing.\n\n To get started, see list text hereEnabling IAM roles for service accounts on your cluster.\n\n For an end-to-end walkthrough using eksctl, see Walkthrough: Updating a DaemonSet to use IAM for service accounts.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "Ensure that the API Server --DenyServiceExternalIPs is set", + "controlID": "C-0283", + "creationTime": "", + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and add the `--enable-admission-plugins=DenyServiceExternalIPs` parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the admission control plugin AlwaysPullImages is set", + "controlID": "C-0123", + "creationTime": "", + "description": "Always pull images.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize the admission of containers wishing to share the host network namespace", + "controlID": "C-0216", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostNetwork` field is omitted or set to false.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Workload with secret access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Secret Access" + ] + } + ] + }, + "controlID": "C-0255", + "creationTime": "", + "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Insecure capabilities", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ], + "actionRequired": "configuration" + }, + "controlID": "C-0046", + "creationTime": "", + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Non-root containers", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0013", + "creationTime": "", + "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the pods running as root or can escalate to root.", + "remediation": "If your application does not need root privileges, make sure to define runAsNonRoot as true or explicitly set the runAsUser using ID 1000 or higher under the PodSecurityContext or container securityContext. In addition, set an explicit value for runAsGroup using ID 1000 or higher.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "API server insecure port is enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0005", + "creationTime": "", + "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", + "remediation": "Set the insecure-port flag of the API server to zero.", + "rules": [], + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Resources CPU limit and request", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "controlID": "C-0050", + "creationTime": "", + "description": "This control identifies all Pods for which the CPU limit is not set.", + "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "Ensure memory requests are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "controlID": "C-0269", + "creationTime": "", + "description": "This control identifies all Pods for which the memory requests are not set.", + "remediation": "Set the memory requests or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "HostNetwork access", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ] + }, + "controlID": "C-0041", + "creationTime": "", + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Network", + "id": "Cat-4" + } + } + }, + { + "guid": "", + "name": "Workload with credential access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] + }, + "controlID": "C-0259", + "creationTime": "", + "description": "This control checks if workloads specifications have sensitive information in their environment variables.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", + "controlID": "C-0118", + "creationTime": "", + "description": "Do not always authorize all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize the admission of containers wishing to share the host process ID namespace", + "controlID": "C-0275", + "creationTime": "", + "description": "Do not generally permit containers to be run with the hostPID flag set to true.", + "remediation": "Configure the Admission Controller to restrict the admission of `hostPID` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the --make-iptables-util-chains argument is set to true", + "controlID": "C-0178", + "creationTime": "", + "description": "Allow Kubelet to manage iptables.", + "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Restrict Access to the Control Plane Endpoint", + "controlID": "C-0247", + "creationTime": "", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "remediation": "", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "Ensure that the API Server --audit-log-path argument is set", + "controlID": "C-0130", + "creationTime": "", + "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure Kubernetes Secrets are encrypted", + "controlID": "C-0244", + "creationTime": "", + "description": "Encryption at Rest is a common security requirement. In Azure, organizations can encrypt data at rest without the risk or cost of a custom key management solution. Organizations have the option of letting Azure completely manage Encryption at Rest. Additionally, organizations have various options to closely manage encryption or encryption keys.", + "remediation": "", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "Privileged container", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "smartRemediation" + ] + }, + "controlID": "C-0057", + "creationTime": "", + "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", + "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive", + "controlID": "C-0235", + "creationTime": "", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 644 or more restrictive.", + "remediation": "Run the following command (using the config file location identified in the Audit step)\n\n \n```\nchmod 644 /etc/kubernetes/kubelet/kubelet-config.json\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "Ensure that the admission control plugin ServiceAccount is set", + "controlID": "C-0125", + "creationTime": "", + "description": "Automate service accounts management.", + "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Bootstrap token authentication should not be used for users", + "attributes": { + "actionRequired": "manual review" + }, + "id": "CIS-3.1.3", + "controlID": "C-0288", + "creationTime": "", + "description": "Kubernetes provides bootstrap tokens which are intended for use by new nodes joining the cluster\n\n These tokens are not designed for use by end-users they are specifically designed for the purpose of bootstrapping new nodes and not for general authentication", + "remediation": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of bootstrap tokens.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "Restrict Access to the Control Plane Endpoint", + "controlID": "C-0227", + "creationTime": "", + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC. You can also limit the IP addresses that can access your API server from the internet, or completely disable internet access to the API server.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n If you choose to also enable Public Endpoint Access then you should also configure a list of allowable CIDR blocks, resulting in restricted access from the internet. If you specify no CIDR blocks, then the public API server endpoint is able to receive and process requests from all IP addresses by defaulting to ['0.0.0.0/0'].\n\n For example, the following command would enable private access to the Kubernetes API as well as limited public access over the internet from a single IP address (noting the /32 CIDR suffix):\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs=\"203.0.113.5/32\"`\n\n Note:\n\n The CIDR blocks specified cannot include reserved addresses.\nThere is a maximum number of CIDR blocks that you can specify. For more information, see the EKS Service Quotas link in the references section.\nFor more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cloud" + ] + } + }, + { + "guid": "", + "name": "Minimize the admission of privileged containers", "controlID": "C-0193", "creationTime": "", "description": "Do not generally permit containers to be run with the `securityContext.privileged` flag set to `true`.", "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers.", "rules": [], - "baseScore": 8 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Kubernetes CronJob", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "compliance" + ] + }, + "controlID": "C-0026", + "creationTime": "", + "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a pod in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", + "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", + "rules": [], + "baseScore": 1, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the --auto-tls argument is not set to true", + "controlID": "C-0155", + "creationTime": "", + "description": "Do not use self-signed certificates for TLS.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the Controller Manager --use-service-account-credentials argument is set to true", + "controlID": "C-0146", + "creationTime": "", + "description": "Use individual service account credentials for each controller.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize access to webhook configuration objects", + "controlID": "C-0281", + "creationTime": "", + "description": "Users with rights to create/modify/delete validatingwebhookconfigurations or mutatingwebhookconfigurations can control webhooks that can read any object admitted to the cluster, and in the case of mutating webhooks, also mutate admitted objects. This could allow for privilege escalation or disruption of the operation of the cluster.", + "remediation": "Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Minimize access to secrets", + "controlID": "C-0186", + "creationTime": "", + "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", + "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Minimize the admission of containers with added capabilities", + "controlID": "C-0200", + "creationTime": "", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the etcd pod specification file ownership is set to root:root", + "controlID": "C-0099", + "creationTime": "", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Consider external secret storage", + "controlID": "C-0208", + "creationTime": "", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the kubelet service file ownership is set to root:root", + "controlID": "C-0163", + "creationTime": "", + "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Restrict untrusted workloads", + "attributes": { + "actionRequired": "manual review" + }, + "controlID": "C-0249", + "creationTime": "", + "description": "Restricting unstrusted workloads can be achieved by using ACI along with AKS.\n\n What is ACI?\nACI lets you quickly deploy container instances without additional infrastructure overhead. When you connect with AKS, ACI becomes a secured, logical extension of your AKS cluster. The virtual nodes component, which is based on Virtual Kubelet, is installed in your AKS cluster that presents ACI as a virtual Kubernetes node. Kubernetes can then schedule pods that run as ACI instances through virtual nodes, not as pods on VM nodes directly in your AKS cluster.\n\n Your application requires no modification to use virtual nodes. Deployments can scale across AKS and ACI and with no delay as cluster autoscaler deploys new nodes in your AKS cluster.\n\n Virtual nodes are deployed to an additional subnet in the same virtual network as your AKS cluster. This virtual network configuration allows the traffic between ACI and AKS to be secured. Like an AKS cluster, an ACI instance is a secure, logical compute resource that is isolated from other users.", + "remediation": "", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "controlID": "C-0173", + "creationTime": "", + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Workload with cluster takeover roles", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Cluster Access" + ], + "displayRelatedResources": true, + "clickableResourceKind": "ServiceAccount" + } + ] + }, + "controlID": "C-0267", + "creationTime": "", + "description": "Cluster takeover roles include workload creation or update and secret access. They can easily lead to super privileges in the cluster. If an attacker can exploit this workload then the attacker can take over the cluster using the RBAC privileges this workload is assigned to.", + "remediation": "You should apply least privilege principle. Make sure each service account has only the permissions that are absolutely necessary.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Immutable container filesystem", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Persistence" + ] + } + ] + }, + "controlID": "C-0017", + "creationTime": "", + "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", + "controlID": "C-0184", + "creationTime": "", + "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", + "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "Exposed sensitive interfaces", "attributes": { - "armoBuiltin": true, + "actionRequired": "configuration", "microsoftMitreColumns": [ "Initial access" ], @@ -6894,92 +21461,2428 @@ "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.", "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.", "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Secret/ETCD encryption enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Impact" - ] - } + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" ] }, - "controlID": "C-0066", - "creationTime": "", - "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.", - "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.", - "rules": [], - "baseScore": 6 + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", - "name": "List Kubernetes secrets", + "name": "ServiceAccount token mounted", "attributes": { - "rbacQuery": "Show who can access secrets", "controlTypeTags": [ - "security-impact", - "compliance" + "security" ], "attackTracks": [ { - "attackTrack": "kubeapi", + "attackTrack": "workload-external-track", "categories": [ "Credential access" ] } - ], - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" ] }, - "controlID": "C-0015", + "controlID": "C-0261", "creationTime": "", - "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.", - "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.", + "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", + "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", "rules": [], - "baseScore": 7 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } }, { "guid": "", - "name": "Ensure that Service Account Tokens are only mounted where necessary", + "name": "Container runtime socket mounted", + "attributes": { + "controlTypeTags": [ + "devops", + "smartRemediation" + ] + }, + "controlID": "C-0074", + "creationTime": "", + "description": "Mounting Container runtime socket (Unix socket) enables container to access Container runtime, retrieve sensitive information and execute commands, if Container runtime is available. This control identifies pods that attempt to mount Container runtime socket for accessing Container runtime.", + "remediation": "Remove container runtime socket mount request or define an exception.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Create administrative boundaries between resources using namespaces", + "controlID": "C-0209", + "creationTime": "", + "description": "Use namespaces to isolate your Kubernetes objects.", + "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Service account token authentication should not be used for users", + "attributes": { + "actionRequired": "manual review" + }, + "id": "CIS-3.1.2", + "controlID": "C-0287", + "creationTime": "", + "description": "Kubernetes provides service account tokens which are intended for use by workloads running in the Kubernetes cluster, for authentication to the API server.\n\n These tokens are not designed for use by end-users and do not provide for features such as revocation or expiry, making them insecure. A newer version of the feature (Bound service account token volumes) does introduce expiry but still does not allow for specific revocation.", + "remediation": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of service account tokens.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "PersistentVolume without encyption", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0264", + "creationTime": "", + "description": "This control detects PersistentVolumes without encyption", + "remediation": "Enable encryption on the PersistentVolume using the configuration in StorageClass", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "Ensure that the certificate authorities file permissions are set to 600 or more restrictive", + "controlID": "C-0168", + "creationTime": "", + "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster", + "controlID": "C-0191", + "creationTime": "", + "description": "Cluster roles and roles with the impersonate, bind or escalate permissions should not be granted unless strictly required. Each of these permissions allow a particular subject to escalate their privileges beyond those explicitly granted by cluster administrators", + "remediation": "Where possible, remove the impersonate, bind and escalate rights from subjects.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CVE-2022-47633-kyverno-signature-bypass", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0091", + "creationTime": "", + "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", + "remediation": "Update your Grafana to 9.2.4 or above", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Administrative Roles", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "rbacQuery": "Show cluster_admin", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0035", + "creationTime": "", + "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Ensure that the kubelet service file permissions are set to 600 or more restrictive", + "controlID": "C-0162", + "creationTime": "", + "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the cluster has at least one active policy control mechanism in place", + "controlID": "C-0192", + "creationTime": "", + "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Roles with delete capabilities", + "attributes": { + "rbacQuery": "Data destruction", + "controlTypeTags": [ + "compliance" + ], + "microsoftMitreColumns": [ + "Impact" + ] + }, + "controlID": "C-0007", + "creationTime": "", + "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.", + "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate", + "controlID": "C-0116", + "creationTime": "", + "description": "Enable certificate based kubelet authentication.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CVE-2022-0185-linux-kernel-container-escape", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0079", + "creationTime": "", + "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", + "remediation": "Patch Linux kernel version to 5.16.2 or above", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Writable hostPath mount", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ], + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ] + }, + "controlID": "C-0045", + "creationTime": "", + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } + }, + { + "guid": "", + "name": "Minimize the admission of containers wishing to share the host IPC namespace", + "controlID": "C-0276", + "creationTime": "", + "description": "Do not generally permit containers to be run with the hostIPC flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Client certificate authentication should not be used for users", + "attributes": { + "actionRequired": "manual review" + }, + "id": "CIS-3.1.1", + "controlID": "C-0286", + "creationTime": "", + "description": "Kubernetes provides the option to use client certificates for user authentication. However as there is no way to revoke these certificates when a user leaves an organization or loses their credential, they are not suitable for this purpose.\n\n It is not possible to fully disable client certificate use within a cluster as it is used for component to component authentication.", + "remediation": "Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of client certificates.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "Ensure that the controller manager pod specification file ownership is set to root:root", + "controlID": "C-0095", + "creationTime": "", + "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Label usage for resources", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0076", + "creationTime": "", + "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", + "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "RBAC enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0088", + "creationTime": "", + "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", + "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "The default namespace should not be used", + "controlID": "C-0212", + "creationTime": "", + "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", + "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Configured liveness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0056", + "creationTime": "", + "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the pods where the Liveness probe is not configured.", + "remediation": "Ensure Liveness probes are configured wherever possible.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "controlID": "C-0228", + "creationTime": "", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "remediation": "By enabling private endpoint access to the Kubernetes API server, all communication between your nodes and the API server stays within your VPC.\n\n With this in mind, you can update your cluster accordingly using the AWS CLI to ensure that Private Endpoint Access is enabled.\n\n For example, the following command would enable private access to the Kubernetes API and ensure that no public access is permitted:\n\n `aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true, endpointPublicAccess=false`\n\n Note: For more detailed information, see the EKS Cluster Endpoint documentation link in the references section.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "Ensure clusters are created with Private Nodes", + "controlID": "C-0248", + "creationTime": "", + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "remediation": "\n```\naz aks create \\\n--resource-group \\\n--name \\\n--load-balancer-sku standard \\\n--enable-private-cluster \\\n--network-plugin azure \\\n--vnet-subnet-id \\\n--docker-bridge-address \\\n--dns-service-ip \\\n--service-cidr \n\n```\n Where `--enable-private-cluster` is a mandatory flag for a private cluster.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "Configured readiness probe", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0018", + "creationTime": "", + "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the pods where the readiness probe is not configured.", + "remediation": "Ensure Readiness probes are configured wherever possible.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --encryption-provider-config argument is set as appropriate", + "controlID": "C-0141", + "creationTime": "", + "description": "Encrypt etcd key-value store.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Verify image signature", + "attributes": { + "actionRequired": "configuration" + }, + "controlID": "C-0236", + "creationTime": "", + "description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "Minimize the admission of containers wishing to share the host process ID namespace", + "controlID": "C-0214", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostPID` field is omitted or set to false.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Network mapping", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "microsoftMitreColumns": [ + "Discovery" + ] + }, + "controlID": "C-0049", + "creationTime": "", + "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", + "remediation": "Define network policies or use similar network protection mechanisms.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --authorization-mode argument includes Node", + "controlID": "C-0119", + "creationTime": "", + "description": "Restrict kubelet nodes to reading only objects associated with them.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Consider external secret storage", + "controlID": "C-0234", + "creationTime": "", + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "Ensure that the controller-manager.conf file ownership is set to root:root", + "controlID": "C-0109", + "creationTime": "", + "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure Network Policy is Enabled and set as appropriate", + "controlID": "C-0230", + "creationTime": "", + "description": "Amazon EKS provides two ways to implement network policy. You choose a network policy option when you create an EKS cluster. The policy option can't be changed after the cluster is created:\nCalico Network Policies, an open-source network and network security solution founded by Tigera.\nBoth implementations use Linux IPTables to enforce the specified policies. Policies are translated into sets of allowed and disallowed IP pairs. These pairs are then programmed as IPTable filter rules.", + "remediation": "", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "Image pull policy on latest tag", + "attributes": { + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0075", + "creationTime": "", + "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all pods with latest tag that have ImagePullSecret not set to Always.", + "remediation": "Set ImagePullPolicy to Always in all pods found by this control.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", + "controlID": "C-0180", + "creationTime": "", + "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", + "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the admission control plugin NamespaceLifecycle is set", + "controlID": "C-0126", + "creationTime": "", + "description": "Reject creating objects in a namespace that is undergoing termination.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the --client-ca-file argument is set as appropriate", + "controlID": "C-0174", + "creationTime": "", + "description": "Enable Kubelet authentication using certificates.", + "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Check if signature exists", + "controlID": "C-0237", + "creationTime": "", + "description": "Ensures that all images contain some signature", + "remediation": "Replace the image with a signed image", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", + "controlID": "C-0117", + "creationTime": "", + "description": "Verify kubelet's certificate before establishing connection.", + "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize access to the approval sub-resource of certificatesigningrequests objects", + "controlID": "C-0280", + "creationTime": "", + "description": "Users with access to the update the approval sub-resource of certificatesigningrequests objects can approve new client certificates for the Kubernetes API effectively allowing them to create new high-privileged user accounts.", + "remediation": "Where possible, remove access to the approval sub-resource of certificatesigningrequests objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", + "controlID": "C-0164", + "creationTime": "", + "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Exposure to internet via Gateway API or Istio Ingress", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "service-destruction", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "workload-unauthenticated-service", + "categories": [ + "Initial Access" + ] + } + ] + }, + "controlID": "C-0266", + "creationTime": "", + "description": "This control detect workloads that are exposed on Internet through a Gateway API (HTTPRoute,TCPRoute, UDPRoute) or Istio Gateway. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", + "controlID": "C-0152", + "creationTime": "", + "description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", + "controlID": "C-0131", + "creationTime": "", + "description": "Retain the logs for at least 30 days or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the admission control plugin EventRateLimit is set", + "controlID": "C-0121", + "creationTime": "", + "description": "Limit the rate at which the API server accepts requests.", + "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --secure-port argument is not set to 0", + "controlID": "C-0128", + "creationTime": "", + "description": "Do not disable the secure port.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Cluster Access Manager API to streamline and enhance the management of access controls within EKS clusters", "attributes": { "armoBuiltin": true }, - "controlID": "C-0190", + "id": "CIS-4.1.7", + "controlID": "C-0285", "creationTime": "", - "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", - "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "description": "Amazon EKS has introduced the Cluster Access Manager API to streamline and enhance the management of access controls within EKS clusters. This new approach is now the recommended method over the traditional `aws-auth` ConfigMap for managing Role-Based Access Control (RBAC) and Service Accounts.\n\n Key Advantages of Using the Cluster Access Manager API:\n\n 1. **Simplified Access Management:** The Cluster Access Manager API allows administrators to manage access directly through the Amazon EKS API, eliminating the need to modify the aws-auth ConfigMap manually. This reduces operational overhead and minimizes the risk of misconfigurations.\n2. **Enhanced Security Controls:** With this API, administrators can assign predefined AWS-managed Kubernetes permissions, known as \"access policies,\" to IAM principals. This provides a more secure and auditable way to manage permissions compared to manual ConfigMap edits.\n3. **Improved Visibility and Auditing:** The API offers better visibility into cluster access configurations, facilitating easier auditing and compliance checks. Administrators can list and describe access entries and policies directly through the EKS API.", + "remediation": "Log in to the AWS Management Console.\n\n Navigate to Amazon EKS and select your EKS cluster.\n\n Go to the Access tab and click on \"Manage Access\" in the \"Access Configuration section\".\n\n Under Cluster Authentication Mode for Cluster Access settings.\n\n * Click `EKS API` to change `cluster will source authenticated IAM principals only from EKS access entry APIs`.\n* Click `ConfigMap` to change `cluster will source authenticated IAM principals only from the aws-auth ConfigMap`.\n* Note: `EKS API and ConfigMap` must be selected during Cluster creation and cannot be changed once the Cluster is provisioned.", "rules": [], - "baseScore": 5 + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Outdated Kubernetes version", + "controlID": "C-0273", + "creationTime": "", + "description": "Identifies Kubernetes clusters running on outdated versions. Using old versions can expose clusters to known vulnerabilities, compatibility issues, and miss out on improved features and security patches. Keeping Kubernetes up-to-date is crucial for maintaining security and operational efficiency.", + "remediation": "Regularly update Kubernetes clusters to the latest stable version to mitigate known vulnerabilities and enhance functionality. Plan and execute upgrades considering workload compatibility, testing in a staging environment before applying changes to production. Follow Kubernetes' best practices for version management and upgrades to ensure a smooth transition and minimal downtime.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize the admission of root containers", + "controlID": "C-0198", + "creationTime": "", + "description": "Do not generally permit containers to be run as the root user.", + "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Encrypt traffic to HTTPS load balancers with TLS certificates", + "controlID": "C-0245", + "creationTime": "", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "remediation": "", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "Resource limits", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0009", + "creationTime": "", + "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.", + "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/pod manifests.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "controlID": "C-0181", + "creationTime": "", + "description": "Setup TLS connection on the Kubelets.", + "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the CNI in use supports Network Policies", + "controlID": "C-0205", + "creationTime": "", + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "controlID": "C-0143", + "creationTime": "", + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Forbidden Container Registries", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ], + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Initial Access" + ] + }, + "controlID": "C-0001", + "creationTime": "", + "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster’s management layer.", + "remediation": "Limit the registries from which you pull container images from", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the --client-cert-auth argument is set to true", + "controlID": "C-0154", + "creationTime": "", + "description": "Enable client authentication on etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", + "controlID": "C-0137", + "creationTime": "", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Host PID/IPC privileges", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0038", + "creationTime": "", + "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all pods using hostPID or hostIPC privileges.", + "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Ensure that all Namespaces have Network Policies defined", + "controlID": "C-0206", + "creationTime": "", + "description": "Use network policies to isolate traffic in your cluster network.", + "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Ensure that the API server pod specification file ownership is set to root:root", + "controlID": "C-0093", + "creationTime": "", + "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Hostile multi-tenant workloads", + "controlID": "C-0242", + "creationTime": "", + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.\n\n For these types of hostile multi-tenant workloads, you should use physically isolated clusters. For more information on ways to isolate workloads, see Best practices for cluster isolation in AKS.", + "remediation": "", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "Ensure that the --streaming-connection-idle-timeout argument is not set to 0", + "controlID": "C-0176", + "creationTime": "", + "description": "Do not disable timeouts on streaming connections.", + "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the admin.conf file ownership is set to root:root", + "controlID": "C-0105", + "creationTime": "", + "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize cluster access to read-only for Amazon ECR", + "controlID": "C-0223", + "creationTime": "", + "description": "Configure the Cluster Service Account with Storage Object Viewer Role to only allow read-only access to Amazon ECR.", + "remediation": "You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.\n\n The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess the following IAM policy permissions for Amazon ECR.\n\n \n```\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"ecr:BatchCheckLayerAvailability\",\n \"ecr:BatchGetImage\",\n \"ecr:GetDownloadUrlForLayer\",\n \"ecr:GetAuthorizationToken\"\n ],\n \"Resource\": \"*\"\n }\n ]\n}\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cloud" + ] + } + }, + { + "guid": "", + "name": "Ensure that the Kubelet is configured to limit pod PIDS", + "controlID": "C-0284", + "creationTime": "", + "description": "Ensure that the Kubelet sets limits on the number of PIDs that can be created by pods running on the node.", + "remediation": "Decide on an appropriate level for this parameter and set it, either via the `--pod-max-pids` command line parameter or the `PodPidsLimit` configuration file setting.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Cluster internal networking", + "attributes": { + "microsoftMitreColumns": [ + "Lateral movement" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0054", + "creationTime": "", + "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.", + "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Deprecated Kubernetes image registry", + "controlID": "C-0253", + "creationTime": "", + "description": "Kubernetes team has deprecated GCR (k8s.gcr.io) registry and recommends pulling Kubernetes components from the new registry (registry.k8s.io). This is mandatory from 1.27", + "remediation": "Change the images to be pulled from the new registry (registry.k8s.io).", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", + "controlID": "C-0171", + "creationTime": "", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the Kubernetes PKI key file permissions are set to 600", + "controlID": "C-0112", + "creationTime": "", + "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize user access to Amazon ECR", + "controlID": "C-0222", + "creationTime": "", + "description": "Restrict user access to Amazon ECR, limiting interaction with build images to only authorized personnel and service accounts.", + "remediation": "Before you use IAM to manage access to Amazon ECR, you should understand what IAM features are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.\n\n **Topics**\n\n * Amazon ECR Identity-Based Policies\n* Amazon ECR Resource-Based Policies\n* Authorization Based on Amazon ECR Tags\n* Amazon ECR IAM Roles\n\n **Amazon ECR Identity-Based Policies**\n\n With IAM identity-based policies, you can specify allowed or denied actions and resources as well as the conditions under which actions are allowed or denied. Amazon ECR supports specific actions, resources, and condition keys. To learn about all of the elements that you use in a JSON policy, see IAM JSON Policy Elements Reference in the IAM User Guide.\n\n **Actions**\nThe Action element of an IAM identity-based policy describes the specific action or actions that will be allowed or denied by the policy. Policy actions usually have the same name as the associated AWS API operation. The action is used in a policy to grant permissions to perform the associated operation.\n\n Policy actions in Amazon ECR use the following prefix before the action: ecr:. For example, to grant someone permission to create an Amazon ECR repository with the Amazon ECR CreateRepository API operation, you include the ecr:CreateRepository action in their policy. Policy statements must include either an Action or NotAction element. Amazon ECR defines its own set of actions that describe tasks that you can perform with this service.\n\n To specify multiple actions in a single statement, separate them with commas as follows:\n\n `\"Action\": [ \"ecr:action1\", \"ecr:action2\"`\n\n You can specify multiple actions using wildcards (\\*). For example, to specify all actions that begin with the word Describe, include the following action:\n\n `\"Action\": \"ecr:Describe*\"`\n\n To see a list of Amazon ECR actions, see Actions, Resources, and Condition Keys for Amazon Elastic Container Registry in the IAM User Guide.\n\n **Resources**\nThe Resource element specifies the object or objects to which the action applies. Statements must include either a Resource or a NotResource element. You specify a resource using an ARN or using the wildcard (\\*) to indicate that the statement applies to all resources.\n\n An Amazon ECR repository resource has the following ARN:\n\n `arn:${Partition}:ecr:${Region}:${Account}:repository/${Repository-name}`\n\n For more information about the format of ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces.\n\n For example, to specify the my-repo repository in the us-east-1 Region in your statement, use the following ARN:\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/my-repo\"`\n\n To specify all repositories that belong to a specific account, use the wildcard (\\*):\n\n `\"Resource\": \"arn:aws:ecr:us-east-1:123456789012:repository/*\"`\n\n To specify multiple resources in a single statement, separate the ARNs with commas.\n\n `\"Resource\": [ \"resource1\", \"resource2\"`\n\n To see a list of Amazon ECR resource types and their ARNs, see Resources Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions you can specify the ARN of each resource, see Actions Defined by Amazon Elastic Container Registry.\n\n **Condition Keys**\nThe Condition element (or Condition block) lets you specify conditions in which a statement is in effect. The Condition element is optional. You can build conditional expressions that use condition operators, such as equals or less than, to match the condition in the policy with values in the request.\n\n If you specify multiple Condition elements in a statement, or multiple keys in a single Condition element, AWS evaluates them using a logical AND operation. If you specify multiple values for a single condition key, AWS evaluates the condition using a logical OR operation. All of the conditions must be met before the statement's permissions are granted.\n\n You can also use placeholder variables when you specify conditions. For example, you can grant an IAM user permission to access a resource only if it is tagged with their IAM user name. For more information, see IAM Policy Elements: Variables and Tags in the IAM User Guide.\n\n Amazon ECR defines its own set of condition keys and also supports using some global condition keys. To see all AWS global condition keys, see AWS Global Condition Context Keys in the IAM User Guide.\n\n Most Amazon ECR actions support the aws:ResourceTag and ecr:ResourceTag condition keys. For more information, see Using Tag-Based Access Control.\n\n To see a list of Amazon ECR condition keys, see Condition Keys Defined by Amazon Elastic Container Registry in the IAM User Guide. To learn with which actions and resources you can use a condition key, see Actions Defined by Amazon Elastic Container Registry.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", + "controlID": "C-0094", + "creationTime": "", + "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --profiling argument is set to false", + "controlID": "C-0129", + "creationTime": "", + "description": "Disable profiling, if not needed.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", + "controlID": "C-0138", + "creationTime": "", + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure CPU requests are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops" + ] + }, + "controlID": "C-0268", + "creationTime": "", + "description": "This control identifies all Pods for which the CPU requests are not set.", + "remediation": "Set the CPU requests or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "Verify that the --read-only-port argument is set to 0", + "controlID": "C-0175", + "creationTime": "", + "description": "Disable the read-only port.", + "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ingress uses TLS", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0263", + "creationTime": "", + "description": "This control detect Ingress resources that do not use TLS", + "remediation": "The user needs to implement TLS for the Ingress resource in order to encrypt the incoming traffic", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Ensure Image Vulnerability Scanning using Azure Defender image scanning or a third party provider", + "controlID": "C-0243", + "creationTime": "", + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities.\n\n Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security.\n\n When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file.\n\n When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "remediation": "", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "Ensure that the API Server --service-account-key-file argument is set as appropriate", + "controlID": "C-0136", + "creationTime": "", + "description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --token-auth-file parameter is not set", + "controlID": "C-0114", + "creationTime": "", + "description": "Do not use token based authentication.", + "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize the admission of containers wishing to share the host IPC namespace", + "controlID": "C-0215", + "creationTime": "", + "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.hostIPC` field is omitted or set to false.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Ensure that the Container Network Interface file ownership is set to root:root", + "controlID": "C-0101", + "creationTime": "", + "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Sudo in container entrypoint", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0062", + "creationTime": "", + "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the pod to find those that have sudo command.", + "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Validate admission controller (mutating)", + "attributes": { + "microsoftMitreColumns": [ + "Persistence" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0039", + "creationTime": "", + "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Minimize access to the proxy sub-resource of nodes", + "controlID": "C-0279", + "creationTime": "", + "description": "Users with access to the Proxy sub-resource of Node objects automatically have permissions to use the Kubelet API, which may allow for privilege escalation or bypass cluster security controls such as audit logs.", + "remediation": "Where possible, remove access to the proxy sub-resource of node objects.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", + "controlID": "C-0106", + "creationTime": "", + "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize the admission of containers with capabilities assigned", + "controlID": "C-0220", + "creationTime": "", + "description": "Do not generally permit containers with capabilities", + "remediation": "Review the use of capabilities in applications running on your cluster. Where a namespace contains applications which do not require any Linux capabilities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Ensure that a unique Certificate Authority is used for etcd", + "controlID": "C-0159", + "creationTime": "", + "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Allow privilege escalation", + "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] + }, + "controlID": "C-0016", + "creationTime": "", + "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", + "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } + }, + { + "guid": "", + "name": "Ensure that the API Server --anonymous-auth argument is set to false", + "controlID": "C-0113", + "creationTime": "", + "description": "Disable anonymous requests to the API server.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the Scheduler --profiling argument is set to false", + "controlID": "C-0151", + "creationTime": "", + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Prevent containers from allowing command execution", + "attributes": { + "microsoftMitreColumns": [ + "Execution" + ], + "rbacQuery": "Show who can access into pods", + "controlTypeTags": [ + "compliance", + "security-impact" + ] + }, + "controlID": "C-0002", + "creationTime": "", + "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using “kubectl exec” command. This control determines which subjects have permissions to use this command.", + "remediation": "It is recommended to prohibit “kubectl exec” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Ensure memory limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "controlID": "C-0271", + "creationTime": "", + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "Ensure that the API Server --authorization-mode argument includes RBAC", + "controlID": "C-0120", + "creationTime": "", + "description": "Turn on Role Based Access Control.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --etcd-cafile argument is set as appropriate", + "controlID": "C-0140", + "creationTime": "", + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Workload with administrative roles", + "controlID": "C-0272", + "creationTime": "", + "description": "This control identifies workloads where the associated service accounts have roles that grant administrative-level access across the cluster. Granting a workload such expansive permissions equates to providing it cluster admin roles. This level of access can pose a significant security risk, as it allows the workload to perform any action on any resource, potentially leading to unauthorized data access or cluster modifications.", + "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use service accounts with such high permissions for daily operations.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Enforce Kubelet client TLS authentication", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0070", + "creationTime": "", + "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "rules": [], + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ingress and Egress blocked", + "attributes": { + "controlTypeTags": [ + "compliance" + ] + }, + "controlID": "C-0030", + "creationTime": "", + "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new pods, and then enable sources/destinations that this pod must communicate with.", + "remediation": "Define a network policy that restricts ingress and egress connections.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", + "controlID": "C-0132", + "creationTime": "", + "description": "Retain 10 or an appropriate number of old log files.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the API Server --DenyServiceExternalIPs is not set", + "controlID": "C-0115", + "creationTime": "", + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "CVE-2022-3172-aggregated-API-server-redirect", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [] + }, + "controlID": "C-0089", + "creationTime": "", + "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", + "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "PSP enabled", + "attributes": { + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0068", + "creationTime": "", + "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", + "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", + "rules": [], + "baseScore": 1, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Minimize the admission of containers which use HostPorts", + "controlID": "C-0204", + "creationTime": "", + "description": "Do not generally permit containers which require the use of HostPorts.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Minimize the admission of Windows HostProcess Containers", + "controlID": "C-0202", + "creationTime": "", + "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Ensure that the audit policy covers key security concerns", + "controlID": "C-0161", + "creationTime": "", + "description": "Ensure that the audit policy created for the cluster covers key security concerns.", + "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Workloads with excessive amount of vulnerabilities", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0085", + "creationTime": "", + "description": "Container images with multiple Critical and High sevirity vulnerabilities increase the risk of potential exploit. This control lists all such images according to the threashold provided by the customer.", + "remediation": "Update your workload images as soon as possible when fixes become available.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Images from allowed registry", + "attributes": { + "actionRequired": "configuration", + "microsoftMitreColumns": [ + "Collection" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0078", + "creationTime": "", + "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", + "remediation": "You should enable all trusted repositories in the parameters of this control.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "Ensure that the API Server only makes use of Strong Cryptographic Ciphers", + "controlID": "C-0277", + "creationTime": "", + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_RC4_128_SHA.\n\n```", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Consider Fargate for running untrusted workloads", + "controlID": "C-0233", + "creationTime": "", + "description": "It is Best Practice to restrict or fence untrusted workloads when running in a multi-tenant environment.", + "remediation": "**Create a Fargate profile for your cluster**\nBefore you can schedule pods running on Fargate in your cluster, you must define a Fargate profile that specifies which pods should use Fargate when they are launched. For more information, see AWS Fargate profile.\n\n **Note**\nIf you created your cluster with eksctl using the --fargate option, then a Fargate profile has already been created for your cluster with selectors for all pods in the kube-system and default namespaces. Use the following procedure to create Fargate profiles for any other namespaces you would like to use with Fargate.\n\n **via eksctl CLI**\nCreate your Fargate profile with the following eksctl command, replacing the variable text with your own values. You must specify a namespace, but the labels option is not required.\n\n \n```\neksctl create fargateprofile --cluster cluster_name --name fargate_profile_name --namespace kubernetes_namespace --labels key=value\n\n```\n **via AWS Management Console**\n\n To create a Fargate profile for a cluster with the AWS Management Console\n\n 1. Open the Amazon EKS console at .\n2. Choose the cluster to create a Fargate profile for.\n3. Under Fargate profiles, choose Add Fargate profile.\n4. On the Configure Fargate profile page, enter the following information and choose Next.\n\n * For Name, enter a unique name for your Fargate profile.\n* For Pod execution role, choose the pod execution role to use with your Fargate profile. Only IAM roles with the eks-fargate-pods.amazonaws.com service principal are shown. If you do not see any roles listed here, you must create one. For more information, see Pod execution role.\n* For Subnets, choose the subnets to use for your pods. By default, all subnets in your cluster's VPC are selected. Only private subnets are supported for pods running on Fargate; you must deselect any public subnets.\n* For Tags, you can optionally tag your Fargate profile. These tags do not propagate to other resources associated with the profile, such as its pods.\n\n 5. On the Configure pods selection page, enter the following information and choose Next.\n\n * list text hereFor Namespace, enter a namespace to match for pods, such as kube-system or default.\n* Add Kubernetes labels to the selector that pods in the specified namespace must have to match the selector. For example, you could add the label infrastructure: fargate to the selector so that only pods in the specified namespace that also have the infrastructure: fargate Kubernetes label match the selector.\n\n 6. On the Review and create page, review the information for your Fargate profile and choose Create.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", + "controlID": "C-0111", + "creationTime": "", + "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the --hostname-override argument is not set", + "controlID": "C-0179", + "creationTime": "", + "description": "Do not override node hostnames.", + "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the scheduler pod specification file ownership is set to root:root", + "controlID": "C-0097", + "creationTime": "", + "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that encryption providers are appropriately configured", + "controlID": "C-0142", + "creationTime": "", + "description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } ] }, "controlID": "C-0058", @@ -6987,27 +23890,353 @@ "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files & directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741", "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.", "rules": [], - "baseScore": 6 + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Minimize the admission of containers with the NET_RAW capability", + "controlID": "C-0199", + "creationTime": "", + "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", + "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "system:authenticated user has elevated roles", + "controlID": "C-0265", + "creationTime": "", + "description": "Granting permissions to the system:authenticated group is generally not recommended and can introduce security risks. This control ensures that system:authenticated users do not have cluster risking permissions.", + "remediation": "Review and modify your cluster's RBAC configuration to ensure that system:authenticated will have minimal permissions.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1", + "subCategory": { + "name": "Supply chain", + "id": "Cat-6" + } + } + }, + { + "guid": "", + "name": "Encrypt traffic to HTTPS load balancers with TLS certificates", + "controlID": "C-0231", + "creationTime": "", + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "remediation": "", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "Minimize access to create pods", + "controlID": "C-0188", + "creationTime": "", + "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", + "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "CVE-2022-39328-grafana-auth-bypass", + "attributes": { + "controlTypeTags": [ + "security" + ] + }, + "controlID": "C-0090", + "creationTime": "", + "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", + "remediation": "Update your Grafana to 9.2.4 or above", + "rules": [], + "baseScore": 9, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "K8s common labels usage", + "attributes": { + "actionRequired": "configuration", + "controlTypeTags": [ + "devops" + ] + }, + "controlID": "C-0077", + "creationTime": "", + "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", + "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Validate admission controller (validating)", + "attributes": { + "microsoftMitreColumns": [ + "Credential access" + ], + "controlTypeTags": [ + "security", + "compliance" + ] + }, + "controlID": "C-0036", + "creationTime": "", + "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", + "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled", + "controlID": "C-0252", + "creationTime": "", + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "AKS" + ] + } + }, + { + "guid": "", + "name": "SSH server running inside container", + "attributes": { + "controlTypeTags": [ + "compliance" + ], + "microsoftMitreColumns": [ + "Execution" + ] + }, + "controlID": "C-0042", + "creationTime": "", + "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", + "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Minimize access to create persistent volumes", + "controlID": "C-0278", + "creationTime": "", + "description": "The ability to create persistent volumes in a cluster can provide an opportunity for privilege escalation, via the creation of hostPath volumes. ", + "remediation": "Where possible, remove `create` access to `persistentvolume` objects in the cluster.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Access control", + "id": "Cat-2" + } + }, + { + "guid": "", + "name": "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive", + "controlID": "C-0098", + "creationTime": "", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the --protect-kernel-defaults argument is set to true", + "controlID": "C-0177", + "creationTime": "", + "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", + "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 2, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider", + "controlID": "C-0221", + "creationTime": "", + "description": "Scan images being deployed to Amazon EKS for vulnerabilities.", + "remediation": "To utilize AWS ECR for Image scanning please follow the steps below:\n\n To create a repository configured for scan on push (AWS CLI)\n\n \n```\naws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n To edit the settings of an existing repository (AWS CLI)\n\n \n```\naws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\n\n```\n Use the following steps to start a manual image scan using the AWS Management Console.2. Open the Amazon ECR console at.\n3. From the navigation bar, choose the Region to create your repository in.\n4. In the navigation pane, choose Repositories.\n5. On the Repositories page, choose the repository that contains the image to scan.\n6. On the Images page, select the image to scan and then choose Scan.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive", + "controlID": "C-0100", + "creationTime": "", + "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Ensure that the Controller Manager --profiling argument is set to false", + "controlID": "C-0145", + "creationTime": "", + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", + "rules": [], + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "If proxy kubeconfig file exists ensure ownership is set to root:root", + "controlID": "C-0165", + "creationTime": "", + "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", "name": "Applications credentials in configuration files", "attributes": { - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Credential access" - ] - }, - { - "categories": [ - "Credential access" - ], - "attackTrack": "container" - } - ], - "armoBuiltin": true, + "actionRequired": "configuration", "microsoftMitreColumns": [ "Credential access", "Lateral Movement" @@ -7023,1230 +24252,76 @@ "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.", "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "PSP enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0068", - "creationTime": "", - "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it", - "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans", - "rules": [], - "baseScore": 1 - }, - { - "guid": "", - "name": "HostNetwork access", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery", - "Lateral movement", - "Impact - service access" - ] - } + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" ] }, - "controlID": "C-0041", - "creationTime": "", - "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.", - "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.", - "rules": [], - "baseScore": 7 + "category": { + "name": "Secrets", + "id": "Cat-3" + } }, { "guid": "", - "name": "Ensure that the API Server only makes use of Strong Cryptographic Ciphers", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0143", + "name": "Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", + "controlID": "C-0150", "creationTime": "", - "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", - "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```", + "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Ensure that the API Server --audit-log-maxage argument is set to 30 or as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0131", - "creationTime": "", - "description": "Retain the logs for at least 30 days or as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0181", - "creationTime": "", - "description": "Setup TLS connection on the Kubelets.", - "remediation": "If using a Kubelet config file, edit the file to set tlsCertFile to the location of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile to the location of the corresponding private key file.\n\n If using command line arguments, edit the kubelet service file /etc/kubernetes/kubelet.conf on each worker node and set the below parameters in KUBELET\\_CERTIFICATE\\_ARGS variable.\n\n --tls-cert-file= --tls-private-key-file=\nBased on your system, restart the kubelet service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Exec into container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "rbacQuery": "Show who can access into pods", - "controlTypeTags": [ - "compliance", - "security-impact" + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster" ] }, - "controlID": "C-0002", - "creationTime": "", - "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using “kubectl exec” command. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit “kubectl exec” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "rules": [], - "baseScore": 5 + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0156", + "name": "Ensure that the API server pod specification file permissions are set to 600 or more restrictive", + "controlID": "C-0092", "creationTime": "", - "description": "etcd should be configured to make use of TLS encryption for peer connections.", - "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```", + "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Sudo in container entrypoint", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" ] }, - "controlID": "C-0062", - "creationTime": "", - "description": "Adding sudo to a container entry point command may escalate process privileges and allow access to forbidden resources. This control checks all the entry point commands in all containers in the POD to find those that have sudo command.", - "remediation": "Remove sudo from the command line and use Kubernetes native root and capabilities controls to provide necessary privileges where they are required.", - "rules": [], - "baseScore": 5 + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "Ensure that the admission control plugin AlwaysPullImages is set", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0123", + "name": "Minimize the admission of root containers", + "controlID": "C-0218", "creationTime": "", - "description": "Always pull images.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```", + "description": "Do not generally permit containers to be run as the root user.", + "remediation": "Create a PSP as described in the Kubernetes documentation, ensuring that the `.spec.runAsUser.rule` is set to either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0.", "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Malicious admission controller (mutating)", - "attributes": { - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ], - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "security", - "compliance" + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" ] - }, - "controlID": "C-0039", - "creationTime": "", - "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Ensure that the cluster has at least one active policy control mechanism in place", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0192", - "creationTime": "", - "description": "Every Kubernetes cluster should have at least one policy control mechanism in place to enforce the other requirements in this section. This could be the in-built Pod Security Admission controller, or a third party policy control system.", - "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Minimize the admission of HostPath volumes", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0203", - "creationTime": "", - "description": "Do not generally admit containers which make use of `hostPath` volumes.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPath` volumes.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Non-root containers", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0013", - "creationTime": "", - "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.", - "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "If proxy kubeconfig file exists ensure ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0165", - "creationTime": "", - "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the API Server --DenyServiceExternalIPs is not set", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0115", - "creationTime": "", - "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "The default namespace should not be used", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0212", - "creationTime": "", - "description": "Kubernetes provides a default namespace, where objects are placed if no namespace is specified for them. Placing objects in this namespace makes application of RBAC and other controls more difficult.", - "remediation": "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Ensure that the API Server --authorization-mode argument is not set to AlwaysAllow", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0118", - "creationTime": "", - "description": "Do not always authorize all requests.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Portforwarding privileges", - "attributes": { - "controlTypeTags": [ - "security-impact", - "compliance" - ], - "attackTracks": [ - { - "categories": [ - "Impact - data destruction", - "Discovery", - "Lateral movement" - ], - "attackTrack": "kubeapi" - } - ], - "armoBuiltin": true, - "rbacQuery": "Port Forwarding" - }, - "controlID": "C-0063", - "creationTime": "", - "description": "Attackers with relevant RBAC permission can use “kubectl portforward” command to establish direct communication with PODs from within the cluster or even remotely. Such communication will most likely bypass existing security measures in the cluster. This control determines which subjects have permissions to use this command.", - "remediation": "It is recommended to prohibit “kubectl portforward” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Ensure that the CNI in use supports Network Policies", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0205", - "creationTime": "", - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Ensure that the API Server --request-timeout argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0134", - "creationTime": "", - "description": "Set global request timeout for API server requests as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Ensure that the etcd data directory permissions are set to 700 or more restrictive", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0102", - "creationTime": "", - "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Minimize access to create pods", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0188", - "creationTime": "", - "description": "The ability to create pods in a namespace can provide a number of opportunities for privilege escalation, such as assigning privileged service accounts to these pods or mounting hostPaths with access to sensitive data (unless Pod Security Policies are implemented to restrict this access)\n\n As such, access to create new pods should be restricted to the smallest possible group of users.", - "remediation": "Where possible, remove `create` access to `pod` objects in the cluster.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0180", - "creationTime": "", - "description": "Security relevant information should be captured. The `--event-qps` flag on the Kubelet can be used to limit the rate at which events are gathered. Setting this too low could result in relevant events not being logged, however the unlimited setting of `0` could result in a denial of service on the kubelet.", - "remediation": "If using a Kubelet config file, edit the file to set `eventRecordQPS:` to an appropriate level.\n\n If using command line arguments, edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "rules": [], - "baseScore": 2 - }, - { - "guid": "", - "name": "Ensure that a minimal audit policy is created", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0160", - "creationTime": "", - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "remediation": "Create an audit policy file for your cluster.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "CVE-2022-39328-grafana-auth-bypass", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "controlID": "C-0090", - "creationTime": "", - "description": "CVE-2022-39328 is a critical vulnerability in Grafana, it might enable attacker to access unauthorized endpoints under heavy load.", - "remediation": "Update your Grafana to 9.2.4 or above", - "rules": [], - "baseScore": 9 - }, - { - "guid": "", - "name": "Ensure that the scheduler pod specification file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0097", - "creationTime": "", - "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0124", - "creationTime": "", - "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Writable hostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence", - "Lateral Movement" - ], - "controlTypeTags": [ - "security", - "compliance", - "devops", - "security-impact" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Persistence", - "Impact - Data access in container" - ] - } - ] - }, - "controlID": "C-0045", - "creationTime": "", - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", - "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Ensure that the API Server --tls-cert-file and --tls-private-key-file arguments are set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0138", - "creationTime": "", - "description": "Setup TLS connection on the API server.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Ensure that the API Server --kubelet-certificate-authority argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0117", - "creationTime": "", - "description": "Verify kubelet's certificate before establishing connection.", - "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Verify that the RotateKubeletServerCertificate argument is set to true", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0183", - "creationTime": "", - "description": "Enable kubelet server certificate rotation.", - "remediation": "Edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_CERTIFICATE_ARGS` variable.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0106", - "creationTime": "", - "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Minimize the admission of containers with the NET_RAW capability", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0199", - "creationTime": "", - "description": "Do not generally permit containers with the potentially dangerous NET\\_RAW capability.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the admission control plugin EventRateLimit is set", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0121", - "creationTime": "", - "description": "Limit the rate at which the API server accepts requests.", - "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Images from allowed registry", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Collection" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "controlID": "C-0078", - "creationTime": "", - "description": "This control is intended to ensure that all the used container images are taken from the authorized repositories. It allows user to list all the approved repositories and will fail all the images taken from any repository outside of this list.", - "remediation": "You should enable all trusted repositories in the parameters of this control.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "HostPath mount", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - Data access in container" - ] - } - ] - }, - "controlID": "C-0048", - "creationTime": "", - "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.", - "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Ensure that the scheduler.conf file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0107", - "creationTime": "", - "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Insecure capabilities", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0046", - "creationTime": "", - "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).", - "remediation": "Remove all insecure capabilities which are not necessary for the container.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "CVE-2022-0492-cgroups-container-escape", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0086", - "creationTime": "", - "description": "Linux Kernel vulnerability CVE-2022-0492 may allow malicious code running inside container to escape container isolation and gain root privileges on the entire node. When fixed Kernel version numbers will become available, this control will be modified to verify them and avoid false positive detections. This control identifies all the resources that don't deploy neither AppArmor nor SELinux, run as root or allow privileged escalation or have corresponding dangerous capabilities.", - "remediation": "Activate AppArmor or SELinux. Follow the least privilege principle and remove root privileges or privilege escalation option and CAP_DAC_OVERRIDE capability. Make sure you don't allow container images from potentially dangerous sources and that containers that must have high privileges are taken from protected repositories.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Ensure that the API Server --secure-port argument is not set to 0", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0128", - "creationTime": "", - "description": "Do not disable the secure port.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "CoreDNS poisoning", - "attributes": { - "microsoftMitreColumns": [ - "Lateral Movement" - ], - "controlTypeTags": [ - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - service injection" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0037", - "creationTime": "", - "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster’s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.", - "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "CVE-2022-3172-aggregated-API-server-redirect", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [] - }, - "controlID": "C-0089", - "creationTime": "", - "description": "The API server allows an aggregated API to redirect client traffic to any URL. This could lead to the client performing unexpected actions as well as forwarding the client's API server credentials to third parties", - "remediation": "Upgrade the Kubernetes version to one of the following versions (or higher patchs): `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Configured liveness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "controlID": "C-0056", - "creationTime": "", - "description": "Liveness probe is intended to ensure that workload remains healthy during its entire execution lifecycle, or otherwise restrat the container. It is highly recommended to define liveness probe for every worker container. This control finds all the PODs where the Liveness probe is not configured.", - "remediation": "Ensure Liveness probes are configured wherever possible.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Verify that the --read-only-port argument is set to 0", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0175", - "creationTime": "", - "description": "Disable the read-only port.", - "remediation": "If using a Kubelet config file, edit the file to set `readOnlyPort` to `0`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--read-only-port=0\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Workloads with Critical vulnerabilities exposed to external traffic", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "controlID": "C-0083", - "creationTime": "", - "description": "Container images with known critical vulnerabilities pose elevated risk if they are exposed to the external traffic. This control lists all images with such vulnerabilities if either LoadBalancer or NodePort service is assigned to them.", - "remediation": "Either update the container image to fix the vulnerabilities (if such fix is available) or reassess if this workload must be exposed to the outseide traffic. If no fix is available, consider periodic restart of the POD to minimize the risk of persistant intrusion. Use exception mechanism if you don't want to see this report again.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Network mapping", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Discovery" - ] - } - ] - }, - "controlID": "C-0049", - "creationTime": "", - "description": "If no network policy is defined, attackers who gain access to a single container may use it to probe the network. This control lists all namespaces in which no network policies are defined.", - "remediation": "Define network policies or use similar network protection mechanisms.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Ensure that the Scheduler --profiling argument is set to false", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0151", - "creationTime": "", - "description": "Disable profiling, if not needed.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Ensure that the API Server --audit-log-maxbackup argument is set to 10 or as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0132", - "creationTime": "", - "description": "Retain 10 or an appropriate number of old log files.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Check if signature exists", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0237", - "creationTime": "", - "description": "Ensures that all images contain some signature", - "remediation": "Replace the image with a signed image", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Ensure that the API Server --audit-log-path argument is set", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0130", - "creationTime": "", - "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Ensure that the --anonymous-auth argument is set to false", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0172", - "creationTime": "", - "description": "Disable anonymous requests to the Kubelet server.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to `false`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--anonymous-auth=false\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0171", - "creationTime": "", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Verify image signature", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0236", - "creationTime": "", - "description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Ensure that the API Server --etcd-cafile argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0140", - "creationTime": "", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Ensure that the API Server --authorization-mode argument includes Node", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0119", - "creationTime": "", - "description": "Restrict kubelet nodes to reading only objects associated with them.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Ensure that the Scheduler --bind-address argument is set to 127.0.0.1", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0152", - "creationTime": "", - "description": "Do not bind the scheduler service to non-loopback insecure addresses.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Automatic mapping of service account", - "attributes": { - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Impact - K8s API access" - ] - } - ], - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "controlID": "C-0034", - "creationTime": "", - "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.", - "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the kubelet service file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0163", - "creationTime": "", - "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Malicious admission controller (validating)", - "attributes": { - "microsoftMitreColumns": [ - "Credential access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0036", - "creationTime": "", - "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.", - "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Instance Metadata API", - "attributes": { - "microsoftMitreColumns": [ - "Discovery" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Credential access", - "Discovery", - "Impact - service access" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0052", - "creationTime": "", - "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.", - "remediation": "Disable metadata services for pods in cloud provider settings.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Ensure that the API Server --encryption-provider-config argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0141", - "creationTime": "", - "description": "Encrypt etcd key-value store.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Enforce Kubelet client TLS authentication", - "attributes": { - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } - ], - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "controlID": "C-0070", - "creationTime": "", - "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "rules": [], - "baseScore": 9 - }, - { - "guid": "", - "name": "Ensure that the API Server --audit-log-maxsize argument is set to 100 or as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0133", - "creationTime": "", - "description": "Rotate log files on reaching 100 MB or as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Naked PODs", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "controlID": "C-0073", - "creationTime": "", - "description": "It is not recommended to create PODs without parental Deployment, ReplicaSet, StatefulSet etc.Manual creation if PODs may lead to a configuration drifts and other untracked changes in the system. Such PODs won't be automatically rescheduled by Kubernetes in case of a crash or infrastructure failure. This control identifies every POD that does not have corresponding parental object.", - "remediation": "Create necessary Deployment object for every POD making any POD a first class citizen in your IaC architecture.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Minimize the admission of containers wishing to share the host IPC namespace", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0195", - "creationTime": "", - "description": "Do not generally permit containers to be run with the `hostIPC` flag set to true.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Ensure that the etcd pod specification file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0099", - "creationTime": "", - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Create administrative boundaries between resources using namespaces", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0209", - "creationTime": "", - "description": "Use namespaces to isolate your Kubernetes objects.", - "remediation": "Follow the documentation and create namespaces for objects in your deployment as you need them.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0108", - "creationTime": "", - "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Workloads with excessive amount of vulnerabilities", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "controlID": "C-0085", - "creationTime": "", - "description": "Container images with multiple Critical and High sevirity vulnerabilities increase the risk of potential exploit. This control lists all such images according to the threashold provided by the customer.", - "remediation": "Update your workload images as soon as possible when fixes become available.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Linux hardening", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0055", - "creationTime": "", - "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.", - "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.", - "rules": [], - "baseScore": 4 + } }, { "guid": "", "name": "Mount service principal", "attributes": { - "armoBuiltin": true, "microsoftMitreColumns": [ "Credential Access" ], @@ -8259,1611 +24334,983 @@ "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.", "remediation": "Refrain from using path mount to known cloud credentials folders or files .", "rules": [], - "baseScore": 4 + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", - "name": "Ensure that the API Server --anonymous-auth argument is set to false", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0113", + "name": "Ensure that the client certificate authorities file ownership is set to root:root", + "controlID": "C-0169", "creationTime": "", - "description": "Disable anonymous requests to the API server.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```", + "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", "rules": [], - "baseScore": 8 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } }, { "guid": "", - "name": "Minimize the admission of containers with allowPrivilegeEscalation", + "name": "Configure Image Provenance using ImagePolicyWebhook admission controller", "attributes": { - "armoBuiltin": true + "actionRequired": "manual review" }, - "controlID": "C-0197", + "id": "CIS-5.5.1", + "controlID": "C-0289", "creationTime": "", - "description": "Do not generally permit containers to be run with the `allowPrivilegeEscalation` flag set to true. Allowing this right can lead to a process running a container getting more rights than it started with.\n\n It's important to note that these rights are still constrained by the overall container sandbox, and this setting does not relate to the use of privileged containers.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of conatiners with `.spec.allowPrivilegeEscalation`set to `true`.", + "description": "Configure Image Provenance for your deployment.", + "remediation": "Follow the Kubernetes documentation and setup image provenance.", "rules": [], - "baseScore": 6 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + } }, { "guid": "", - "name": "Ensure that the API server pod specification file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0092", + "name": "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156", + "controlID": "C-0232", "creationTime": "", - "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "description": "Amazon EKS uses IAM to provide authentication to your Kubernetes cluster through the AWS IAM Authenticator for Kubernetes. You can configure the stock kubectl client to work with Amazon EKS by installing the AWS IAM Authenticator for Kubernetes and modifying your kubectl configuration file to use it for authentication.", + "remediation": "Refer to the '[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)' in Amazon EKS documentation.\n\n Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS IAM Authenticator anymore.\n\n The relevant AWS CLI commands, depending on the use case, are:\n\n \n```\naws eks update-kubeconfig\naws eks get-token\n\n```", "rules": [], - "baseScore": 6 + "baseScore": 7, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "Ensure that Service Account Tokens are only mounted where necessary", + "controlID": "C-0190", + "creationTime": "", + "description": "Service accounts tokens should not be mounted in pods except where the workload running in the pod explicitly needs to communicate with the API server", + "remediation": "Modify the definition of pods and service accounts which do not need to mount service account tokens to disable it.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } + }, + { + "guid": "", + "name": "Minimize the admission of containers with added capabilities", + "controlID": "C-0219", + "creationTime": "", + "description": "Do not generally permit containers with capabilities assigned beyond the default set.", + "remediation": "Ensure that `allowedCapabilities` is not present in PSPs for the cluster unless it is set to an empty array.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Ensure that the --rotate-certificates argument is not set to false", + "controlID": "C-0182", + "creationTime": "", + "description": "Enable kubelet client certificate rotation.", + "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "External facing", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Initial Access" + ] + }, + { + "categories": [ + "Initial Access" + ], + "attackTrack": "service-destruction" + }, + { + "categories": [ + "Initial Access" + ], + "attackTrack": "external-workload-with-cluster-takeover-roles" + }, + { + "attackTrack": "external-database-without-authentication", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "workload-unauthenticated-service", + "categories": [ + "Initial Access" + ] + } + ] + }, + "controlID": "C-0256", + "creationTime": "", + "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + } + }, + { + "guid": "", + "name": "HostPath mount", + "attributes": { + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] + }, + "controlID": "C-0048", + "creationTime": "", + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } + }, + { + "guid": "", + "name": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "controlID": "C-0238", + "creationTime": "", + "description": "If kubelet is running, and if it is configured by a kubeconfig file, ensure that the proxy kubeconfig file has permissions of 644 or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker\nnode. For example,\n\n \n```\nchmod 644 \n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "EKS" + ] + } + }, + { + "guid": "", + "name": "Ensure that the --peer-client-cert-auth argument is set to true", + "controlID": "C-0157", + "creationTime": "", + "description": "etcd should be configured for peer authentication.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter. ```--peer-client-cert-auth=true```", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", + "controlID": "C-0166", + "creationTime": "", + "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + }, + { + "guid": "", + "name": "Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", + "controlID": "C-0149", + "creationTime": "", + "description": "Enable kubelet server certificate rotation on controller-manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Control plane", + "id": "Cat-1" + } + } + ], + "AttackTrackControls": [ + { + "guid": "", + "name": "Verify Authenticated Service", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-unauthenticated-service", + "categories": [ + "Execution" + ] + } + ] + }, + "controlID": "C-0274", + "creationTime": "", + "description": "Verifies if the service is authenticated", + "remediation": "Configure the service to require authentication.", + "rules": [], + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } + }, + { + "guid": "", + "name": "Workload with PVC access", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Data Collection" + ] + } + ] + }, + "controlID": "C-0257", + "creationTime": "", + "description": "This control detects workloads that have mounted PVC. Workloads with PVC access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these PVCs. Remove PVC access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", + "rules": [], + "baseScore": 4, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } + }, + { + "guid": "", + "name": "Ensure CPU limits are set", + "attributes": { + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] + }, + "controlID": "C-0270", + "creationTime": "", + "description": "This control identifies all Pods for which the CPU limits are not set.", + "remediation": "Set the CPU limits or use exception mechanism to avoid unnecessary notifications.", + "rules": [], + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } + }, + { + "guid": "", + "name": "Missing network policy", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Lateral Movement (Network)" + ] + } + ], + "isFixedByNetworkPolicy": true + }, + "controlID": "C-0260", + "creationTime": "", + "description": "This control detects workloads that has no NetworkPolicy configured in labels. If a network policy is not configured, it means that your applications might not have necessary control over the traffic to and from the pods, possibly leading to a security vulnerability.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to configure a network policy for them.", + "rules": [], + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Network", + "id": "Cat-4" + } }, { "guid": "", "name": "Apply Security Context to Your Pods and Containers", "attributes": { - "armoBuiltin": true + "controlTypeTags": [ + "security", + "compliance" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ] }, "controlID": "C-0211", "creationTime": "", "description": "Apply Security Context to Your Pods and Containers", "remediation": "Follow the Kubernetes documentation and apply security contexts to your pods. For a suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker Containers.", "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Access Kubernetes dashboard", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Discovery", - "Lateral Movement" - ], - "rbacQuery": "Access k8s Dashboard", - "controlTypeTags": [ - "compliance" + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" ] }, - "controlID": "C-0014", - "creationTime": "", - "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.", - "remediation": "Make sure that the “Kubernetes Dashboard” service account is only bound to the Kubernetes dashboard following the least privilege principle.", - "rules": [], - "baseScore": 2 + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", - "name": "Ensure that the Controller Manager --bind-address argument is set to 127.0.0.1", + "name": "Workload with ConfigMap access", "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0150", - "creationTime": "", - "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Ensure that default service accounts are not actively used", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0189", - "creationTime": "", - "description": "The `default` service account should not be used to ensure that rights granted to applications can be more easily audited and reviewed.", - "remediation": "Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server.\n\n Modify the configuration of each default service account to include this value\n\n \n```\nautomountServiceAccountToken: false\n\n```", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Minimize the admission of containers which use HostPorts", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0204", - "creationTime": "", - "description": "Do not generally permit containers which require the use of HostPorts.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Resources CPU limit and request", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "controlID": "C-0050", - "creationTime": "", - "description": "This control identifies all Pods for which the CPU limit is not set.", - "remediation": "Set the CPU limit or use exception mechanism to avoid unnecessary notifications.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Image pull policy on latest tag", - "attributes": { - "controlTypeTags": [ - "devops" - ], - "armoBuiltin": true - }, - "controlID": "C-0075", - "creationTime": "", - "description": "While usage of the latest tag is not generally recommended, in some cases this is necessary. If it is, the ImagePullPolicy must be set to Always, otherwise Kubernetes may run an older image with the same name that happens to be present in the node cache. Note that using Always will not cause additional image downloads because Kubernetes will check the image hash of the local local against the registry and only pull the image if this hash has changed, which is exactly what users want when use the latest tag. This control will identify all PODs with latest tag that have ImagePullSecret not set to Always.", - "remediation": "Set ImagePullPolicy to Always in all PODs found by this control.", - "rules": [], - "baseScore": 2 - }, - { - "guid": "", - "name": "Ensure that the API server pod specification file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0093", - "creationTime": "", - "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the API Server --etcd-certfile and --etcd-keyfile arguments are set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0137", - "creationTime": "", - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Ensure that the Container Network Interface file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0101", - "creationTime": "", - "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the controller manager pod specification file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0095", - "creationTime": "", - "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Cluster-admin binding", - "attributes": { - "rbacQuery": "Show cluster_admin", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Impact - data destruction", - "Impact - service injection" - ] - } - ], - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ] - }, - "controlID": "C-0035", - "creationTime": "", - "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.", - "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0094", - "creationTime": "", - "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the seccomp profile is set to docker/default in your pod definitions", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0210", - "creationTime": "", - "description": "Enable `docker/default` seccomp profile in your pod definitions.", - "remediation": "Use security context to enable the `docker/default` seccomp profile in your pod definitions. An example is as below:\n\n \n```\n securityContext:\n seccompProfile:\n type: RuntimeDefault\n\n```", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Kubernetes CronJob", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Persistence" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "controlID": "C-0026", - "creationTime": "", - "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.", - "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.", - "rules": [], - "baseScore": 1 - }, - { - "guid": "", - "name": "Delete Kubernetes events", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Defense evasion" - ], - "rbacQuery": "Show who can delete k8s events", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Defense evasion" - ] - } - ] - }, - "controlID": "C-0031", - "creationTime": "", - "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.", - "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Ensure that the --streaming-connection-idle-timeout argument is not set to 0", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0176", - "creationTime": "", - "description": "Do not disable timeouts on streaming connections.", - "remediation": "If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a value other than 0.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--streaming-connection-idle-timeout=5m\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Ensure that the --peer-client-cert-auth argument is set to true", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0157", - "creationTime": "", - "description": "etcd should be configured for peer authentication.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Ensure that the client certificate authorities file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0169", - "creationTime": "", - "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", - "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Minimize the admission of containers wishing to share the host process ID namespace", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0194", - "creationTime": "", - "description": "Do not generally permit containers to be run with the `hostPID` flag set to true.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "CVE-2022-0185-linux-kernel-container-escape", - "attributes": { - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ], - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ] - }, - "controlID": "C-0079", - "creationTime": "", - "description": "CVE-2022-0185 is a kernel vulnerability enabling privilege escalation and it can lead attackers to escape containers and take control over nodes. This control alerts on vulnerable kernel versions of Kubernetes nodes", - "remediation": "Patch Linux kernel version to 5.16.2 or above", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "RBAC enabled", - "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access", - "Privilege escalation" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0088", - "creationTime": "", - "description": "RBAC is the most advanced and well accepted mode of authorizing users of the Kubernetes API", - "remediation": "Enable RBAC either in the API server configuration or with the Kubernetes provider API", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Ensure that the admin.conf file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0105", - "creationTime": "", - "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Minimize wildcard use in Roles and ClusterRoles", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0187", - "creationTime": "", - "description": "Kubernetes Roles and ClusterRoles provide access to resources based on sets of objects and actions that can be taken on those objects. It is possible to set either of these to be the wildcard \"\\*\" which matches all items.\n\n Use of wildcards is not optimal from a security perspective as it may allow for inadvertent access to be granted when new resources are added to the Kubernetes API either as CRDs or in later versions of the product.", - "remediation": "Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Privileged container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Privilege escalation" - ], "controlTypeTags": [ "security" ], "attackTracks": [ { - "attackTrack": "container", "categories": [ - "Privilege escalation" - ] + "Data Collection" + ], + "attackTrack": "workload-external-track" } ] }, - "controlID": "C-0057", + "controlID": "C-0258", "creationTime": "", - "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.", - "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.", + "description": "This control detects workloads that have mounted ConfigMaps. Workloads with ConfigMap access can potentially expose sensitive information and elevate the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these configMaps. Remove configMaps access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Minimize the admission of Windows HostProcess Containers", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0202", - "creationTime": "", - "description": "Do not generally permit Windows containers to be run with the `hostProcess` flag set to true.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostProcess` containers.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0111", - "creationTime": "", - "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Ensure that the Kubernetes PKI directory and file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0110", - "creationTime": "", - "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Ensure that the admission control plugin ServiceAccount is set", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0125", - "creationTime": "", - "description": "Automate service accounts management.", - "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Ensure that the admin.conf file permissions are set to 600", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0104", - "creationTime": "", - "description": "Ensure that the `admin.conf` file has permissions of `600`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0167", - "creationTime": "", - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the API Server --authorization-mode argument includes RBAC", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0120", - "creationTime": "", - "description": "Turn on Role Based Access Control.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Ensure that the Kubernetes PKI key file permissions are set to 600", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0112", - "creationTime": "", - "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Audit logs enabled", - "attributes": { - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Defense evasion - KubeAPI" - ] - } - ], - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" + "baseScore": 5, + "scanningScope": { + "matches": [ + "cluster", + "file" ] - }, - "controlID": "C-0067", - "creationTime": "", - "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes", - "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details", - "rules": [], - "baseScore": 5 + } }, { "guid": "", - "name": "API server insecure port is enabled", + "name": "Workload with secret access", "attributes": { - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Initial access" - ] - } - ], - "armoBuiltin": true - }, - "controlID": "C-0005", - "creationTime": "", - "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.", - "remediation": "Set the insecure-port flag of the API server to zero.", - "rules": [], - "baseScore": 9 - }, - { - "guid": "", - "name": "Ensure that all Namespaces have Network Policies defined", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0206", - "creationTime": "", - "description": "Use network policies to isolate traffic in your cluster network.", - "remediation": "Follow the documentation and create `NetworkPolicy` objects as you need them.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "CVE-2022-23648-containerd-fs-escape", - "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ], "attackTracks": [ { - "attackTrack": "container", + "attackTrack": "workload-external-track", "categories": [ - "Privilege escalation", - "Impact - Data access in container" + "Secret Access" ] } ] }, - "controlID": "C-0087", + "controlID": "C-0255", "creationTime": "", - "description": "CVE-2022-23648 is a vulnerability of containerd enabling attacker to gain access to read-only copies of arbitrary files from the host using aspecially-crafted POD configuration yamls", - "remediation": "Patch containerd to 1.6.1, 1.5.10, 1.4.12 or above", + "description": "This control identifies workloads that have mounted secrets. Workloads with secret access can potentially expose sensitive information and increase the risk of unauthorized access to critical resources.", + "remediation": "Review the workloads identified by this control and assess whether it's necessary to mount these secrets. Remove secret access from workloads that don't require it or ensure appropriate access controls are in place to protect sensitive information.", "rules": [], - "baseScore": 7 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } }, { "guid": "", - "name": "Ensure that the --rotate-certificates argument is not set to false", + "name": "Insecure capabilities", "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0182", - "creationTime": "", - "description": "Enable kubelet client certificate rotation.", - "remediation": "If using a Kubelet config file, edit the file to add the line `rotateCertificates: true` or remove it altogether to use the default value.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove `--rotate-certificates=false` argument from the `KUBELET_CERTIFICATE_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the API Server --token-auth-file parameter is not set", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0114", - "creationTime": "", - "description": "Do not use token based authentication.", - "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Minimize the admission of containers wishing to share the host network namespace", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0196", - "creationTime": "", - "description": "Do not generally permit containers to be run with the `hostNetwork` flag set to true.", - "remediation": "Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Container hostPort", - "attributes": { - "armoBuiltin": true, + "actionRequired": "configuration", "controlTypeTags": [ "security", "compliance", - "devops" + "smartRemediation" ], "attackTracks": [ { - "attackTrack": "container", + "attackTrack": "workload-external-track", "categories": [ - "Initial access" + "Privilege Escalation (Node)" ] } ] }, - "controlID": "C-0044", + "controlID": "C-0046", "creationTime": "", - "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.", - "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.", + "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the pods with dangerous capabilities (see documentation pages for details).", + "remediation": "Remove all insecure capabilities which are not necessary for the container.", "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Ensure that the Controller Manager --terminated-pod-gc-threshold argument is set as appropriate", - "attributes": { - "armoBuiltin": true + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] }, - "controlID": "C-0144", - "creationTime": "", - "description": "Activate garbage collector on pod termination, as appropriate.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```", - "rules": [], - "baseScore": 4 + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", - "name": "Disable anonymous access to Kubelet service", + "name": "HostNetwork access", "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security", "compliance" ], "attackTracks": [ { - "attackTrack": "kubeapi", + "attackTrack": "workload-external-track", "categories": [ - "Initial access" + "Lateral Movement (Network)" ] } ] }, - "controlID": "C-0069", + "controlID": "C-0041", "creationTime": "", - "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.", - "remediation": "Start the kubelet with the --anonymous-auth=false flag.", + "description": "Potential attackers may gain access to a pod and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the pods with host network access enabled.", + "remediation": "Only connect pods to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those pods that must have access to host network by design.", "rules": [], - "baseScore": 10 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Network", + "id": "Cat-4" + } + } }, { "guid": "", - "name": "Ensure that the API Server --service-account-key-file argument is set as appropriate", + "name": "Workload with credential access", "attributes": { - "armoBuiltin": true + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Credential access" + ] + } + ] }, - "controlID": "C-0136", + "controlID": "C-0259", "creationTime": "", - "description": "Explicitly set a service account public key file for service accounts on the apiserver.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```", + "description": "This control checks if workloads specifications have sensitive information in their environment variables.", + "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.", "rules": [], - "baseScore": 5 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } + }, + { + "guid": "", + "name": "Workload with cluster takeover roles", + "attributes": { + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "clickableResourceKind": "ServiceAccount", + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Cluster Access" + ], + "displayRelatedResources": true + } + ] + }, + "controlID": "C-0267", + "creationTime": "", + "description": "Cluster takeover roles include workload creation or update and secret access. They can easily lead to super privileges in the cluster. If an attacker can exploit this workload then the attacker can take over the cluster using the RBAC privileges this workload is assigned to.", + "remediation": "You should apply least privilege principle. Make sure each service account has only the permissions that are absolutely necessary.", + "rules": [], + "baseScore": 6, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5" + } }, { "guid": "", "name": "Immutable container filesystem", "attributes": { + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ], "attackTracks": [ { - "attackTrack": "container", + "attackTrack": "workload-external-track", "categories": [ - "Execution", "Persistence" ] } - ], - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" ] }, "controlID": "C-0017", "creationTime": "", "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.", - "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", + "remediation": "Set the filesystem of the container to read-only when possible (pod securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.", "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Ensure that the admission control plugin AlwaysAdmit is not set", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0122", - "creationTime": "", - "description": "Do not allow all requests.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Forbidden Container Registries", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Initial Access" - ], - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access" - ] - } + "baseScore": 3, + "scanningScope": { + "matches": [ + "cluster", + "file" ] }, - "controlID": "C-0001", - "creationTime": "", - "description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster’s management layer.", - "remediation": "Limit the registries from which you pull container images from", - "rules": [], - "baseScore": 7 + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Node escape", + "id": "Cat-9" + } + } }, { "guid": "", - "name": "No impersonation", + "name": "ServiceAccount token mounted", "attributes": { - "armoBuiltin": true, - "rbacQuery": "Impersonation", - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "kubeapi", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0065", - "creationTime": "", - "description": "Impersonation is an explicit RBAC permission to use other roles rather than the one assigned to a user, group or service account. This is sometimes needed for testing purposes. However, it is highly recommended not to use this capability in the production environments for daily operations. This control identifies all subjects whose roles include impersonate verb.", - "remediation": "Either remove the impersonate verb from the role where it was found or make sure that this role is not bound to users, groups or service accounts used for ongoing cluster operations. If necessary, bind this role to a subject only for specific needs for limited time period.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0166", - "creationTime": "", - "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the API Server --profiling argument is set to false", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0129", - "creationTime": "", - "description": "Disable profiling, if not needed.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Prefer using secrets as files over secrets as environment variables", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0207", - "creationTime": "", - "description": "Kubernetes supports mounting secrets as data volumes or as environment variables. Minimize the use of environment variable secrets.", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Resources memory limit and request", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Impact - service destruction" - ] - } - ] - }, - "controlID": "C-0004", - "creationTime": "", - "description": "This control identifies all Pods for which the memory limit is not set.", - "remediation": "Set the memory limit or use exception mechanism to avoid unnecessary notifications.", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Ensure that the Controller Manager --root-ca-file argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0148", - "creationTime": "", - "description": "Allow pods to verify the API server's serving certificate before establishing connections.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Pods in default namespace", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "compliance", - "devops" - ] - }, - "controlID": "C-0061", - "creationTime": "", - "description": "It is recommended to avoid running PODs in cluster without explicit namespace assignment. This control identifies all the PODs running in the default namespace.", - "remediation": "Create necessary namespaces and move all the PODs from default namespace there.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Ensure that the kubelet service file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0162", - "creationTime": "", - "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the audit policy covers key security concerns", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0161", - "creationTime": "", - "description": "Ensure that the audit policy created for the cluster covers key security concerns.", - "remediation": "Consider modification of the audit policy in use on the cluster to include these items, at a minimum.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Ensure that the API Server --service-account-lookup argument is set to true", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0135", - "creationTime": "", - "description": "Validate service account before validating token.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "SSH server running inside container", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Execution" - ], - "controlTypeTags": [ - "compliance" - ] - }, - "controlID": "C-0042", - "creationTime": "", - "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).", - "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Configured readiness probe", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "controlID": "C-0018", - "creationTime": "", - "description": "Readiness probe is intended to ensure that workload is ready to process network traffic. It is highly recommended to define readiness probe for every worker container. This control finds all the PODs where the readiness probe is not configured.", - "remediation": "Ensure Readiness probes are configured wherever possible.", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Minimize the admission of containers with capabilities assigned", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0201", - "creationTime": "", - "description": "Do not generally permit containers with capabilities", - "remediation": "Review the use of capabilites in applications runnning on your cluster. Where a namespace contains applicaions which do not require any Linux capabities to operate consider adding a policy which forbids the admission of containers which do not drop all capabilities.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Ensure that the --protect-kernel-defaults argument is set to true", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0177", - "creationTime": "", - "description": "Protect tuned kernel parameters from overriding kubelet default kernel parameter values.", - "remediation": "If using a Kubelet config file, edit the file to set `protectKernelDefaults: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n \n```\n--protect-kernel-defaults=true\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "rules": [], - "baseScore": 2 - }, - { - "guid": "", - "name": "Consider external secret storage", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0208", - "creationTime": "", - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Minimize the admission of root containers", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0198", - "creationTime": "", - "description": "Do not generally permit containers to be run as the root user.", - "remediation": "Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the Controller Manager --profiling argument is set to false", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0145", - "creationTime": "", - "description": "Disable profiling, if not needed.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Ensure that encryption providers are appropriately configured", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0142", - "creationTime": "", - "description": "Where `etcd` encryption is used, appropriate providers should be configured.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Allow privilege escalation", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0016", - "creationTime": "", - "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.", - "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the --make-iptables-util-chains argument is set to true", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0178", - "creationTime": "", - "description": "Allow Kubelet to manage iptables.", - "remediation": "If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains: true`.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and remove the `--make-iptables-util-chains` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0184", - "creationTime": "", - "description": "Ensure that the Kubelet is configured to only use strong cryptographic ciphers.", - "remediation": "If using a Kubelet config file, edit the file to set `TLSCipherSuites:` to `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256` or to a subset of these values.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the `--tls-cipher-suites` parameter as follows, or to a subset of these values.\n\n \n```\n --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "rules": [], - "baseScore": 5 - }, - { - "guid": "", - "name": "Ensure that the Controller Manager RotateKubeletServerCertificate argument is set to true", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0149", - "creationTime": "", - "description": "Enable kubelet server certificate rotation on controller-manager.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "K8s common labels usage", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "devops" - ] - }, - "controlID": "C-0077", - "creationTime": "", - "description": "Kubernetes common labels help manage and monitor Kubernetes cluster using different tools such as kubectl, dashboard and others in an interoperable way. Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ for more information. This control helps you find objects that don't have any of these labels defined.", - "remediation": "Define applicable labels or use the exception mechanism to prevent further notifications.", - "rules": [], - "baseScore": 2 - }, - { - "guid": "", - "name": "Access container service account", - "attributes": { - "armoBuiltin": true, - "microsoftMitreColumns": [ - "Credential access" - ], - "rbacQuery": "Container service account mapping", - "controlTypeTags": [ - "compliance", - "security-impact" - ], - "attackTracks": [ - { - "categories": [ - "Credential access", - "Impact - K8s API access" - ], - "attackTrack": "container" - } - ] - }, - "controlID": "C-0053", - "creationTime": "", - "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.", - "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that a unique Certificate Authority is used for etcd", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0159", - "creationTime": "", - "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", - "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Ensure that the Controller Manager --service-account-private-key-file argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0147", - "creationTime": "", - "description": "Explicitly set a service account private key file for service accounts on the controller manager.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the etcd data directory ownership is set to etcd:etcd", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0103", - "creationTime": "", - "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Host PID/IPC privileges", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Privilege escalation" - ] - } - ] - }, - "controlID": "C-0038", - "creationTime": "", - "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.", - "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Ensure that the cluster-admin role is only used where required", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0185", - "creationTime": "", - "description": "The RBAC role `cluster-admin` provides wide-ranging powers over the environment and should be used only where and when needed.", - "remediation": "Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges.\n\n Where possible, first bind users to a lower privileged role and then remove the clusterrolebinding to the cluster-admin role :\n\n \n```\nkubectl delete clusterrolebinding [name]\n\n```", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0170", - "creationTime": "", - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "CVE-2022-47633-kyverno-signature-bypass", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security" - ], - "attackTracks": [ - { - "attackTrack": "container", - "categories": [ - "Initial access", - "Execution" - ] - } - ] - }, - "controlID": "C-0091", - "creationTime": "", - "description": "CVE-2022-47633 is a high severity vulnerability in Kyverno, it enables attackers to bypass the image signature validation of policies using a malicious image repository or MITM proxy", - "remediation": "Update your Grafana to 9.2.4 or above", - "rules": [], - "baseScore": 8 - }, - { - "guid": "", - "name": "Ensure that the --hostname-override argument is not set", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0179", - "creationTime": "", - "description": "Do not override node hostnames.", - "remediation": "Edit the kubelet service file `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` on each worker node and remove the `--hostname-override` argument from the `KUBELET_SYSTEM_PODS_ARGS` variable.\n\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "rules": [], - "baseScore": 3 - }, - { - "guid": "", - "name": "Ensure that the Controller Manager --use-service-account-credentials argument is set to true", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0146", - "creationTime": "", - "description": "Use individual service account credentials for each controller.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```", - "rules": [], - "baseScore": 4 - }, - { - "guid": "", - "name": "Minimize access to secrets", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0186", - "creationTime": "", - "description": "The Kubernetes API stores secrets, which may be service account tokens for the Kubernetes API or credentials used by workloads in the cluster. Access to these secrets should be restricted to the smallest possible group of users to reduce the risk of privilege escalation.", - "remediation": "Where possible, remove `get`, `list` and `watch` access to `secret` objects in the cluster.", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the --client-ca-file argument is set as appropriate", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0174", - "creationTime": "", - "description": "Enable Kubelet authentication using certificates.", - "remediation": "If using a Kubelet config file, edit the file to set `authentication: x509: clientCAFile` to the location of the client CA file.\n\n If using command line arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--client-ca-file=\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", - "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "Ensure that the certificate authorities file permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true - }, - "controlID": "C-0168", - "creationTime": "", - "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", - "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", - "rules": [], - "baseScore": 7 - }, - { - "guid": "", - "name": "Read-only port enabled", - "attributes": { - "armoBuiltin": true, - "controlTypeTags": [ - "security", - "compliance" - ], - "attackTracks": [ - { - "attackTrack": "node", - "categories": [ - "Initial access" - ] - } - ] - }, - "controlID": "C-0082", - "creationTime": "", - "description": "", - "remediation": "Start the kubelet with the --read-only-port flag set to 0.", - "rules": [], - "baseScore": 9 - }, - { - "guid": "", - "name": "CVE-2022-24348-argocddirtraversal", - "attributes": { - "armoBuiltin": true, "controlTypeTags": [ "security" ], "attackTracks": [ { "categories": [ - "Privilege escalation" + "Credential access" ], - "attackTrack": "container" + "attackTrack": "workload-external-track" } ] }, - "controlID": "C-0081", + "controlID": "C-0261", "creationTime": "", - "description": "CVE-2022-24348 is a major software supply chain 0-day vulnerability in the popular open source CD platform Argo CD which can lead to privilege escalation and information disclosure.", - "remediation": "Update your ArgoCD deployment to fixed versions (v2.1.9,v2.2.4 or v2.3.0)", + "description": "Potential attacker may gain access to a workload and steal its ServiceAccount token. Therefore, it is recommended to disable automatic mapping of the ServiceAccount tokens in ServiceAccount configuration. Enable it only for workloads that need to use them and ensure that this ServiceAccount is not bound to an unnecessary ClusterRoleBinding or RoleBinding.", + "remediation": "Disable automatic mounting of service account tokens to pods at the workload level, by specifying automountServiceAccountToken: false. Enable it only for workloads that need to use them and ensure that this ServiceAccount doesn't have unnecessary permissions", "rules": [], - "baseScore": 4 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + } }, { "guid": "", - "name": "Label usage for resources", + "name": "Writable hostPath mount", "attributes": { - "armoBuiltin": true, + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ], + "microsoftMitreColumns": [ + "Persistence", + "Lateral Movement" + ], "controlTypeTags": [ - "devops" + "security", + "compliance", + "devops", + "security-impact", + "smartRemediation" ] }, - "controlID": "C-0076", + "controlID": "C-0045", "creationTime": "", - "description": "It is recommended to set labels that identify semantic attributes of your application or deployment. For example, { app: myapp, tier: frontend, phase: test, deployment: v3 }. These labels can used to assign policies to logical groups of the deployments as well as for presentation and tracking purposes. This control helps you find deployments without any of the expected labels.", - "remediation": "Define labels that are most suitable to your needs of use the exceptions to prevent further notifications.", + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.", + "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.", "rules": [], - "baseScore": 2 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } }, { "guid": "", - "name": "Minimize the admission of containers with added capabilities", + "name": "Exposure to internet via Gateway API or Istio Ingress", "attributes": { - "armoBuiltin": true + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "service-destruction", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "workload-unauthenticated-service", + "categories": [ + "Initial Access" + ] + } + ] }, - "controlID": "C-0200", + "controlID": "C-0266", "creationTime": "", - "description": "Do not generally permit containers with capabilities assigned beyond the default set.", - "remediation": "Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array.", + "description": "This control detect workloads that are exposed on Internet through a Gateway API (HTTPRoute,TCPRoute, UDPRoute) or Istio Gateway. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", "rules": [], - "baseScore": 5 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + } }, { "guid": "", - "name": "Ensure that the --authorization-mode argument is not set to AlwaysAllow", + "name": "Ensure memory limits are set", "attributes": { - "armoBuiltin": true + "controlTypeTags": [ + "compliance", + "devops", + "security" + ], + "attackTracks": [ + { + "attackTrack": "service-destruction", + "categories": [ + "Denial of service" + ] + } + ] }, - "controlID": "C-0173", + "controlID": "C-0271", "creationTime": "", - "description": "Do not allow all requests. Enable explicit authorization.", - "remediation": "If using a Kubelet config file, edit the file to set `authorization: mode` to `Webhook`.\n\n If using executable arguments, edit the kubelet service file `/etc/kubernetes/kubelet.conf` on each worker node and set the below parameter in `KUBELET_AUTHZ_ARGS` variable.\n\n \n```\n--authorization-mode=Webhook\n\n```\n Based on your system, restart the `kubelet` service. For example:\n\n \n```\nsystemctl daemon-reload\nsystemctl restart kubelet.service\n\n```", + "description": "This control identifies all Pods for which the memory limits are not set.", + "remediation": "Set the memory limits or use exception mechanism to avoid unnecessary notifications.", "rules": [], - "baseScore": 6 + "baseScore": 8, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] + }, + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Resource management", + "id": "Cat-7" + } + } }, { "guid": "", - "name": "Ensure that the admission control plugin NodeRestriction is set", + "name": "External facing", "attributes": { - "armoBuiltin": true + "controlTypeTags": [ + "security" + ], + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "service-destruction", + "categories": [ + "Initial Access" + ] + }, + { + "attackTrack": "external-workload-with-cluster-takeover-roles", + "categories": [ + "Initial Access" + ] + }, + { + "categories": [ + "Initial Access" + ], + "attackTrack": "external-database-without-authentication" + }, + { + "attackTrack": "workload-unauthenticated-service", + "categories": [ + "Initial Access" + ] + } + ] }, - "controlID": "C-0127", + "controlID": "C-0256", "creationTime": "", - "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", - "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```", + "description": "This control detect workloads that are exposed on Internet through a Service (NodePort or LoadBalancer) or Ingress. It fails in case it find workloads connected with these resources.", + "remediation": "The user can evaluate its exposed resources and apply relevant changes wherever needed.", "rules": [], - "baseScore": 4 + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster" + ] + } }, { "guid": "", - "name": "Ensure that the --peer-auto-tls argument is not set to true", + "name": "HostPath mount", "attributes": { - "armoBuiltin": true + "attackTracks": [ + { + "attackTrack": "workload-external-track", + "categories": [ + "Privilege Escalation (Node)" + ] + } + ], + "microsoftMitreColumns": [ + "Privilege escalation" + ], + "controlTypeTags": [ + "security", + "compliance", + "smartRemediation" + ] }, - "controlID": "C-0158", + "controlID": "C-0048", "creationTime": "", - "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```", + "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the pods using hostPath mount.", + "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.", "rules": [], - "baseScore": 6 - }, - { - "guid": "", - "name": "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive", - "attributes": { - "armoBuiltin": true + "baseScore": 7, + "scanningScope": { + "matches": [ + "cluster", + "file" + ] }, - "controlID": "C-0164", - "creationTime": "", - "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", - "rules": [], - "baseScore": 6 + "category": { + "name": "Workload", + "id": "Cat-5", + "subCategory": { + "name": "Storage", + "id": "Cat-8" + } + } } ], "Rules": [ { "guid": "", - "name": "pod-specific-version-tag", + "name": "rule-can-access-proxy-subresource", "attributes": { - "armoBuiltin": true + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" }, "creationTime": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n is_latest_image_tag(container)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in pod: %v has latest image tag.\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n is_latest_image_tag(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has latest image tag.\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n is_latest_image_tag(container)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has latest image tag.\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_latest_image_tag(container) {\n endswith(container.image, \":latest\")\n}", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has access to proxy subresources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"create\", \"connect\",\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"nodes/proxy\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can access proxy subresources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ { "apiGroups": [ - "" + "rbac.authorization.k8s.io" ], "apiVersions": [ "v1" ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Fails if container has image tag set to latest", - "remediation": "Make sure you define a specific image tag for container and not 'latest'.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-create-bind-escalate-role", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "\npackage armo_builtins\nimport data.cautils as cautils\n\n# ================= create/update ===============================\n\n# fails if user has access to create/update rolebindings/clusterrolebindings\n# RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n can_create_update_to_role_resource(rule)\n can_create_update_to_role_verb(rule)\n \n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v, can create/update rolebinding/clusterrolebinding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 3,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user has access to create/update rolebindings/clusterrolebindings\n# RoleBinding to ClusterRole\ndeny [msga]{\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n can_create_update_to_role_resource(rule)\n can_create_update_to_role_verb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v, can create/update rolebinding/clusterrolebinding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 3,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user has access to create/update rolebindings/clusterrolebindings\n# ClusterRoleBinding to ClusterRole\ndeny [msga]{\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n can_create_update_to_role_resource(rule)\n can_create_update_to_role_verb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v, can create/update rolebinding/clusterrolebinding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 3,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\n# RoleBinding to Role\ndeny [msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\n can_bind_to_role_resource(rule)\n can_bind_to_role_verb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v, can bind roles/clusterroles\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 3,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user has access to bind clusterroles/roles\n# RoleBinding to ClusterRole\ndeny [msga]{\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n \n can_bind_to_role_resource(rule)\n can_bind_to_role_verb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v, can bind roles/clusterroles\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 3,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user has access to bind clusterroles/roles\n# ClusterRoleBinding to ClusterRole\ndeny [msga]{\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\ncan_bind_to_role_resource(rule)\n can_bind_to_role_verb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n \t\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v, can bind roles/clusterroles\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 3,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# ================= escalate ===============================\n\n\n# fails if user has access to escalate rolebindings/clusterrolebindings\n# RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n can_escalate_to_role_resource(rule)\n can_escalate_to_role_verb(rule)\n \n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v, can escalate rolebinding/clusterrolebinding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 3,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user has access to escalate rolebindings/clusterrolebindings\n# RoleBinding to ClusterRole\ndeny [msga]{\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n can_escalate_to_role_resource(rule)\n can_escalate_to_role_verb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v, can escalate rolebinding/clusterrolebinding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 3,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user has access to escalate rolebindings/clusterrolebindings\n# ClusterRoleBinding to ClusterRole\ndeny [msga]{\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n can_escalate_to_role_resource(rule)\n can_escalate_to_role_verb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n \t\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v, can escalate rolebinding/clusterrolebinding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 3,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# ============== escalate =====================\n\ncan_escalate_to_role_resource(rule){\n cautils.list_contains(rule.resources,\"clusterroles\")\n}\n\ncan_escalate_to_role_resource(rule){\n cautils.list_contains(rule.resources,\"roles\")\n}\n\ncan_escalate_to_role_resource(rule){\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\ncan_escalate_to_role_verb(rule) {\n cautils.list_contains(rule.verbs, \"escalate\")\n}\n\ncan_escalate_to_role_verb(rule) {\n cautils.list_contains(rule.verbs, \"*\")\n}\n\n\n# ============== bind =====================\n\ncan_bind_to_role_resource(rule){\n cautils.list_contains(rule.resources,\"clusterroles\")\n}\n\ncan_bind_to_role_resource(rule){\n cautils.list_contains(rule.resources,\"roles\")\n}\n\ncan_bind_to_role_resource(rule){\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\n\ncan_bind_to_role_verb(rule) {\n cautils.list_contains(rule.verbs, \"bind\")\n}\n\ncan_bind_to_role_verb(rule) {\n cautils.list_contains(rule.verbs, \"*\")\n}\n\n# ============== create/update =====================\n\ncan_create_update_to_role_resource(rule) {\n cautils.list_contains(rule.resources,\"rolebindings\")\n}\n\ncan_create_update_to_role_resource(rule) {\n cautils.list_contains(rule.resources,\"clusterrolebindings\")\n}\n\ncan_create_update_to_role_resource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\n\ncan_create_update_to_role_verb(rule) {\n cautils.list_contains(rule.verbs, \"create\")\n}\n\ncan_create_update_to_role_verb(rule) {\n cautils.list_contains(rule.verbs, \"update\")\n}\n\ncan_create_update_to_role_verb(rule) {\n cautils.list_contains(rule.verbs, \"patch\")\n}\n\ncan_create_update_to_role_verb(rule) {\n cautils.list_contains(rule.verbs, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"rbac.authorization.k8s.io\"\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], "resources": [ "Role", "ClusterRole", @@ -9872,63 +25319,18 @@ ] } ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, + "ruleDependencies": [], "controlConfigInputs": null, - "description": "determines which users can create/update rolebindings/clusterrolebindings or bind roles/clusterroles", + "description": "determines which users can access proxy subresources", "remediation": "", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true", - "armoBuiltin": true - }, + "name": "ensure-that-the-api-server-DenyServiceExternalIPs-is-set", "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t]) \n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to use SSL Certificate Authority file for etcd\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--etcd-cafile\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--etcd-cafile=\",\n\t\t}],\n\t}\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin DenyServiceExternalIPs is not enabled.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"DenyServiceExternalIPs\" in flag.values\n\n\t# get fixed and failed paths\n\tresult = get_retsult(i)\n}\n\nget_retsult(i) = result {\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [\"DenyServiceExternalIPs\"]),\n\t\t}],\n\t}\n}\n", "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", "ruleLanguage": "Rego", "match": [ @@ -9945,21 +25347,17 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-cafile` is not set.", + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and add the `--enable-admission-plugins=DenyServiceExternalIPs` parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`\n\n#### Impact Statement\nWhen enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", "name": "naked-pods", - "attributes": { - "armoBuiltin": true - }, "creationTime": "", - "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"metadata.ownerReferences\", \"value\": \"YOUR_VALUE\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n", + "rule": "package armo_builtins\n\n\n# Fails if workload is Pod\ndeny[msga] {\n pod := input[_]\n\tpod.kind == \"Pod\"\n\tnot pod.metadata.ownerReferences\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v not associated with ReplicaSet or Deployment\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -9976,22 +25374,18 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "Don't use naked Pods (that is, Pods not bound to a ReplicaSet or Deployment) if you can avoid it. Naked Pods will not be rescheduled in the event of a node failure.", - "remediation": "", + "remediation": "Create necessary deployment object for every Pod making any Pod a first class citizen in your IaC architecture. Example command: kubectl create deployment nginx-depl --image=nginx:1.19", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "alert-mount-potential-credentials-paths", - "attributes": { - "armoBuiltin": true - }, + "name": "ensure-https-loadbalancers-encrypted-with-tls-aws", "creationTime": "", - "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tbeggining_of_path := volumes_data[\"beggining_of_path\"]\n result := is_unsafe_paths(volume, beggining_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"beggining_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"beggining_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"beggining_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, beggining_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [beggining_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n", - "resourceEnumerator": "", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# deny LoadBalancer services that are configured for ssl connection (port: 443), but don't have TLS certificate set.\ndeny[msga] {\n\n\twl_kind := \"Service\"\n\twl_type := \"LoadBalancer\"\n\twl_required_annotation := \"service.beta.kubernetes.io/aws-load-balancer-ssl-cert\"\n\n\t# filterring LoadBalancers\n\twl := \tinput[_]\n\twl.kind == wl_kind\n\twl.spec.type == wl_type\n\n\t# filterring loadbalancers with port 443.\n\twl.spec.ports[_].port == 443\n\n\t# filterring annotations without ssl cert confgiured.\n\tannotations := object.get(wl, [\"metadata\", \"annotations\"], [])\n\tssl_cert_annotations := [annotations[i] | annotation = i; startswith(i, wl_required_annotation)]\n\tcount(ssl_cert_annotations) == 0\n\n\t# prepare message data.\n\talert_message := sprintf(\"LoadBalancer '%v' has no TLS configured\", [wl.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\": sprintf(\"metadata.annotations['%v']\", [wl_required_annotation]), \"value\": \"AWS_LOADBALANCER_SSL_CERT\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wl\n\t\t}\n\t}\n}\n\n", + "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\tobj := input[_]\n\tobj.kind == \"Service\"\n\tobj.spec.type == \"LoadBalancer\"\n\tmsga := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n", "ruleLanguage": "Rego", "match": [ { @@ -10002,208 +25396,24 @@ "v1" ], "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" + "Service" ] } ], "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "relevantCloudProviders": [ - "EKS", - "GKE", - "AKS" - ] - }, - { - "guid": "", - "name": "has-image-signature", - "attributes": { - "armoBuiltin": true, - "useFromKubescapeVersion": "v2.0.184" - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\twl_kinds[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n not cosign.has_signature(container.image)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensures that all images contain some signature", - "remediation": "Replace the image with a signed image", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-create-pod", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users can create pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "sudo-in-container-entrypoint", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, beggining_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "", "remediation": "", "ruleQuery": "armo_builtins", - "relevantCloudProviders": null + "relevantCloudProviders": [ + "EKS" + ] }, { "guid": "", - "name": "container-image-repository", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "armoBuiltin": true - }, + "name": "ensure-external-secrets-storage-is-in-use", "creationTime": "", - "rule": "package armo_builtins\nimport data\nimport future.keywords.if\n# import data.kubernetes.api.client as client\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'. \n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.kubernetes.api.client\n\n# deny workloads that doesn't support external service provider (secretProviderClass)\n# reference - https://secrets-store-csi-driver.sigs.k8s.io/concepts.html\ndeny[msga] {\n\n resources := input[_]\n\n\t# get volume paths for each resource\n\tvolumes_path := get_volumes_path(resources)\n\n\t# get volumes for each resources\n\tvolumes := object.get(resources, volumes_path, [])\n\n\t# continue if secretProviderClass not found in resource\n\thaving_secretProviderClass := {i | volumes[i].csi.volumeAttributes.secretProviderClass}\n \tcount(having_secretProviderClass) == 0\n\n\n\t# prepare message data.\n\talert_message := sprintf(\"%s: %v is not using external secret storage\", [resources.kind, resources.metadata.name])\n\tfailed_paths := []\n\tfixed_paths := [{\"path\":sprintf(\"%s[0].csi.volumeAttributes.secretProviderClass\",[concat(\".\", volumes_path)]), \"value\":\"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_paths,\n\t\t\"fixPaths\": fixed_paths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\n}\n\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -10246,61 +25456,20 @@ } ], "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.imageRepositoryAllowList" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.imageRepositoryAllowList", - "name": "Allowed image repositories", - "description": "Kubescape checks that all the containers are using images from the allowed repositories provided in the following list." - } - ], - "description": "Fails if image is not from allowed repository", - "remediation": "", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-admission-control-plugin-EventRateLimit-is-set", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"EventRateLimit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"EventRateLimit\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=EventRateLimit\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "Limit the rate at which the API server accepts requests.", - "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```\n\n#### Impact Statement\nYou need to carefully tune in limits as per your environment.\n\n#### Default Value\nBy default, `EventRateLimit` is not set.", - "ruleQuery": "", - "relevantCloudProviders": null + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": [] }, { "guid": "", - "name": "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive", + "name": "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [], @@ -10322,104 +25491,22 @@ "packageName": "cautils" } ], - "configInputs": null, "controlConfigInputs": null, - "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "rule-exposed-dashboard", + "name": "kubelet-strong-cryptographics-ciphers", "attributes": { - "m$K8sThreatMatrix": "Initial Access::Exposed Dashboard", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" + "hostSensorRule": "true" }, "creationTime": "", - "rule": "\tpackage armo_builtins\n\n\t# input: pods\n\t# apiversion: v1\n\t# fails if dashboard exists and is exposed\n\n\tdeny[msga] {\n\t\tdeployment := input[_]\n\t\tstartswith(deployment.metadata.name, \"kubernetes-dashboard\")\n\t\tcontainer := deployment.spec.template.spec.containers[j]\n\t\tversion := trim_prefix(container.image, \"kubernetesui/dashboard:v\")\n\t\tto_number(replace(version, \".\", \"\")) < 201\n\t\t\n\t\tservice := input[_]\n\t\tservice.kind == \"Service\"\n\t\tisNodePortLbService(service)\n\t\tcount({x | service.spec.selector[x]; deployment.metadata.labels[x]}) == count(service.spec.selector)\n\t\tpath := sprintf(\"spec.template.spec.containers[%v]\", [format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"dashboard exists and is exposed %s\", [container.image]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [deployment]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nisNodePortLbService(service) {\n\tservice.spec.type == \"NodePort\"\n}\n\nisNodePortLbService(service) {\n\tservice.spec.type == \"LoadBalancer\"\n}", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.13 https://workbench.cisecurity.org/sections/1126668/recommendations/1838663\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--tls-cipher-suites\")\n\n\tnot has_strong_cipher_set_via_cli(command)\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.TLSCipherSuites\n\n\tnot is_value_in_strong_cliphers_set(yamlConfig.TLSCipherSuites)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [\"TLSCipherSuites\"],\n\t\t\"failedPaths\": [\"TLSCipherSuites\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\nhas_strong_cipher_set_via_cli(command) {\n\tcontains(command, \"--tls-cipher-suites=\")\n\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome i\n\tcontains(command, sprintf(\"%v%v\", [\"--tls-cipher-suites=\", strong_cliphers[i]]))\n}\n\nis_value_in_strong_cliphers_set(value) {\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome x\n\tstrong_cliphers[x] == value\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment", - "Service" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if dashboard exists and is exposed", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "label-usage-for-resources", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n \n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Pod", - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet", - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.recommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.recommendedLabels", - "name": "Recommended Labels", - "description": "Kubescape checks that workloads have at least one of the following labels." - } - ], - "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "strict-file-permissions-600", - "attributes": { - "hostSensorRule": "true", - "useFromKubescapeVersion": "v2.0.159", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\n# Fail for every file in data.postureControlInputs.fileObjPath\n# if the permissions of the file are more permissive that 600.\n# Expect (supposed to be fixed per control, not user configurable): \n# \t(required) data.postureControlInputs.fileObjPath - list of paths strings. The item delimiter is `.`.\n# \t(optional) data.postureControlInputs.kindFilter \n# \t(optional) data.postureControlInputs.pathGlob \ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tfilter_kind(obj.kind)\n\n\t# Get the file info using the input object-paths\n\trawObjPath = data.postureControlInputs.fileObjPath[_]\n\tobjPath := split(rawObjPath, \"/\")\n\tsubject := object.get(obj, objPath, false)\n\tsubject != false\n\n\t# Run the test for every file\n\tfiles := get_files(subject)\n\tfile = files[file_index]\n\tfile_path_glob(file.path)\n\n\t# Actual permissions test \n\tallowed_perms := 384 # 0o600 == 384\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Filter out irrelevant data from the alert object\n\tfile_filtered := filter_file(obj, objPath, file_index)\n\tobj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\"])\n\toutput := object.union(file_filtered, obj_filtered)\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": output},\n\t}\n}\n\n# Return always a list\nget_files(obj) = files {\n\tis_array(obj)\n\tfiles = obj\n}\n\nget_files(obj) = files {\n\tnot is_array(obj)\n\tfiles = [obj]\n}\n\n# Filter only kinds that are in data.postureControlInputs.kindFilter.\n# If no kindFilter - match everything\nfilter_kind(kind) {\n\tkind in data.postureControlInputs.kindFilter\n}\n\nfilter_kind(kind) {\n\tnot data.postureControlInputs.kindFilter\n}\n\n# Filter file path globs from data.postureControlInputs.pathGlob\nfile_path_glob(path) {\n\tpatterns = data.postureControlInputs.pathGlob\n\tcount({true | patterns[i]; glob.match(patterns[i], null, path)}) > 0\n}\n\nfile_path_glob(path) {\n\tnot data.postureControlInputs.pathGlob\n}\n\n# Filter only the current file\nfilter_file(obj, objPath, file_index) = ret {\n\tis_array(object.get(obj, objPath, false))\n\tfull_path := array.concat(objPath, [format_int(file_index, 10)])\n\tfinal_path := concat(\"/\", full_path)\n\tret := json.filter(obj, [final_path])\n}\n\nfilter_file(obj, objPath, file_index) = ret {\n\tnot is_array(object.get(obj, objPath, false))\n\tret = object.filter(obj, objPath)\n}", - "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\n# Filter only kinds that are in data.postureControlInputs.kindFilter.\n# Filter out non-host-sensor as well.\n# If no kindFilter - match every kind\ndeny[msg] {\n\tobj = input[_]\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tfilter_kind(obj.kind)\n\tmsg := {\"alertObject\": {\"externalObjects\": obj}}\n}\n\n# Filter only kinds that are in data.postureControlInputs.kindFilter.\n# If no kindFilter - match everything\nfilter_kind(kind) {\n\tkind in data.postureControlInputs.kindFilter\n}\n\nfilter_kind(kind) {\n\tnot data.postureControlInputs.kindFilter\n}\n", - "ruleLanguage": "Rego", "match": [ { "apiGroups": [], @@ -10436,32 +25523,146 @@ "v1beta0" ], "resources": [ - "KubeletInfo", - "KubeProxyInfo", - "ControlPlaneInfo" + "KubeletInfo" ] } ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, + "ruleDependencies": [], "controlConfigInputs": null, - "description": "Ensure that file has permissions of 600 or more restrictive", - "remediation": "Set the permission of the failed file file to 600 or more restrictive", + "description": "Determines if the Kubelet is configured to only use strong cryptographic ciphers.", + "remediation": "Change --tls-cipher-suites value of TLSCipherSuites property of config file to use strong cryptographics ciphers", "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "etcd-tls-enabled", + "name": "exposure-to-internet-via-gateway-api", "attributes": { - "armoBuiltin": true + "useFromKubescapeVersion": "v3.0.9" }, "creationTime": "", - "rule": "package armo_builtins\n\n# Check if tls is configured in a etcd service\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--cert-file\", \"\"],\n\t\t[\"--key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "rule": "package armo_builtins\nimport future.keywords.in\n\n\ndeny[msga] {\n httproute := input[_]\n httproute.kind in [\"HTTPRoute\", \"TCPRoute\", \"UDPRoute\"]\n\n svc := input[_]\n svc.kind == \"Service\"\n\n # Make sure that they belong to the same namespace\n svc.metadata.namespace == httproute.metadata.namespace\n\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n is_same_namespace(wl.metadata, svc.metadata)\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n pod := get_pod_spec(wl)[\"spec\"]\n wl_connected_to_service(pod, svc)\n\n result := svc_connected_to_httproute(svc, httproute)\n\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through httproute '%v'\", [wl.metadata.name, httproute.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [\n\t\t{\n\t \"object\": httproute,\n\t\t \"reviewPaths\": result,\n\t \"failedPaths\": result,\n\t },\n\t\t{\n\t \"object\": svc,\n\t\t}\n ]\n }\n}\n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) { \n \n\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\n\n\n# get_volume - get resource spec paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_pod_spec(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"spec\": resources.spec.template, \"start_of_path\": \"spec.template.\"}\n}\n\n# get_volume - get resource spec paths for \"Pod\"\nget_pod_spec(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"spec\": resources, \"start_of_path\": \"\"}\n}\n\n# get_volume - get resource spec paths for \"CronJob\"\nget_pod_spec(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"spec\": resources.spec.jobTemplate.spec.template.spec, \"start_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n\n\nsvc_connected_to_httproute(svc, httproute) = result {\n rule := httproute.spec.rules[i]\n ref := rule.backendRefs[j]\n ref.kind == \"Service\"\n svc.metadata.name == ref.name\n result := [sprintf(\"spec.rules[%d].backendRefs[%d].name\", [i,j])]\n}\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "gateway.networking.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "HTTPRoute", + "TCPRoute", + "UDPRoute" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "fails if the running workload is bound to a Service that is exposed to the Internet through a Gateway.", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "automount-service-account", + "creationTime": "", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tstart_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tstart_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, start_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, start_of_path, wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, start_of_path, wl_metadata) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_metadata)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [start_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, start_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [start_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "etcd-tls-enabled", + "creationTime": "", + "rule": "package armo_builtins\n\n# Check if tls is configured in a etcd service\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--cert-file\", \"\"],\n\t\t[\"--key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n", "ruleLanguage": "Rego", "match": [ @@ -10478,7 +25679,6 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "Configure TLS encryption for the etcd service.", "remediation": "Follow the etcd service documentation and configure TLS encryption.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--cert-file=\n--key-file=\n\n```\n\n#### Impact Statement\nClient connections only over TLS would be served.\n\n#### Default Value\nBy default, TLS encryption is not set.", @@ -10487,384 +25687,9 @@ }, { "guid": "", - "name": "rule-access-dashboard", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, + "name": "ensure-endpointprivateaccess-is-enabled", "creationTime": "", - "rule": "package armo_builtins\n\n# input: roleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"RoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [\"\"],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: clusterRoleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"ClusterRoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [\"\"],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.jobTemplate.spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "verify-image-signature", - "attributes": { - "useFromKubescapeVersion": "v2.0.184", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\t\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tverified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [container.image],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.trustedCosignPublicKeys", - "name": "Trusted Cosign public keys", - "description": "Trusted Cosign public keys" - } - ], - "description": "Verifies the signature of each image with given public keys", - "remediation": "Replace the image with an image that is signed correctly", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-portforward-v1", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-bind-escalate", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users can or bind escalate roles/clusterroles", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"--use-service-account-credentials is set to false in the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--use-service-account-credentials=false\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--use-service-account-credentials=true\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--use-service-account-credentials\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--use-service-account-credentials=true\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Use individual service account credentials for each controller.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```\n\n#### Impact Statement\nWhatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup.\n\n If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.\n\n#### Default Value\nBy default, `--use-service-account-credentials` is set to false.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "container-hostPort", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if container has hostPort", - "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is not set at all \ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\n\tcmd := obj.spec.containers[0].command\n\tnot contains(concat(\" \", cmd), \"--encryption-provider-config\")\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config file not set\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--encryption-provider-config=\",\n\t\t}],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n# Encryption config is set but not covering secrets\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# Check if the config conver secrets\n\tcount({true | \"secrets\" in config_file_content.resources[_].resources}) == 0\n\t\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not covering secrets\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := data {\n\tdata := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tfilter_input(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nfilter_input(obj){\n\tis_api_server(obj)\n}\nfilter_input(obj){\n\tis_control_plane_info(obj)\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": null, - "configInputs": null, - "controlConfigInputs": null, - "description": "Encrypt etcd key-value store.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--encryption-provider-config` is not set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--request-timeout\")\n\tresult = {\n\t\t\"alert\": \"Please validate the request timeout flag is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Set global request timeout for API server requests as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--request-timeout` is set to 60 seconds.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-etcd-data-directory-ownership-is-set-to-etcd-etcd", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n\n# Fail if etcd data dir not owned by etcd:etcd\ndeny[msg] {\n\tobj = input[_]\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n\n\t# Test\n\tfile := obj.data.etcdDataDir\n\tnot valid_ownership(file.ownership)\n\n\t# Add name to match the externalObject structure\n\toutput := json.patch(obj, [{\"op\": \"add\", \"path\": \"name\", \"value\": \"ControlPlaneInfo\"}])\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"%s is not owned by `etcd:etcd`\", [file.path]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown etcd:etcd %s\", [file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": output},\n\t}\n}\n\n\nvalid_ownership(ownership) {\n\townership.err != \"\" # Don't fail if host-sensor can't get ownership\n}\nvalid_ownership(ownership) {\n\townership.username == \"etcd\"\n\townership.groupname == \"etcd\"\n}", + "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -10877,1686 +25702,6 @@ "dynamicMatch": [ { "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, etcd data directory ownership is set to `etcd:etcd`.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-controller-manager-profiling-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-controller-manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Disable profiling, if not needed.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-create-modify-pod-v1", - "attributes": { - "m$K8sThreatMatrix": "Execution::New container, Persistence::Backdoor container", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if subject has create/modify access to pods \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"patch\", \"update\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"deployments\", \"daemonsets\", \"replicasets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create/modify workloads\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users have create/modify permissions on pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "CVE-2022-3172", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\tversion = get_api_server_version(api_infos)\n\tis_api_server_version_affected(version)\n\n\t# Find the service that exposes the extended API\n\tservices = [obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\tcount(services) == 1\n\tservice = services[0]\n\n\tmsg := {\n\t\t\"alertMessage\": \"the following pair of APIService and Service may redirect client traffic to any URL\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj, service]},\n\t}\n}\n\n# current kubescpae version (v2.0.171) still not support this resource\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tsemver.is_valid(v)\n\tversion = v\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tnot semver.is_valid(v)\n\tversion := \"\"\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) != 1\n\tversion = \"\"\n}\n\nis_api_server_version_affected(version) {\n\tversion == \"\"\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.25.0\") == 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.24.0\") >= 0\n\tsemver.compare(version, \"1.24.4\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.23.0\") >= 0\n\tsemver.compare(version, \"1.23.10\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.22.0\") >= 0\n\tsemver.compare(version, \"1.22.13\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.21.14\") <= 0\n}\n", - "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\t# Find the service that exposes the extended API\n\tservices = [ obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\n\tmsg := {\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apiregistration.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "APIService" - ] - }, - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "apiserverinfo.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "APIServerInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "List aggregated API server APIServices if kube-api-server version is vulnerable to CVE-2022-3172", - "remediation": "Upgrade the Kubernetes version to one of the fixed versions. The following versions are fixed: `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "access-tiller-endpoint", - "attributes": { - "microsoftK8sThreatMatrix": "Lateral movement::Access tiller endpoint", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# input: deployment\n# fails if tiller exists in cluster\n\ndeny[msga] {\n\tdeployment := \tinput[_]\n\tdeployment.kind == \"Deployment\"\n deployment.metadata.name == \"tiller-deploy\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"tiller exists in namespace: %v\", [deployment.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"metadata.name\"],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [deployment]\n\t\t}\n\t}\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: deployment\n# fails if tiller exists in cluster\n\ndeny[msga] {\n\tdeployment := \tinput[_]\n\tdeployment.kind == \"Deployment\"\n deployment.metadata.name == \"tiller-deploy\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"tiller exists in namespace: %v\", [deployment.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"metadata.name\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [deployment]\n\t\t}\n\t}\n}", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if tiller exists in cluster", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-list-all-cluster-admins", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data.cautils as cautils\n\n# input: roles\n# apiversion: v1\n# does: returns roles+ related subjects in rolebinding\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does: returns clusterroles+ related subjects in rolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n \n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does:\treturns clusterroles+ related subjects in clusterrolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n canCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanCreate(rule, i) {\n\tverb := rule.verbs[j]\n\tverb == \"*\"\n}\n\ncanCreateResources(rule, i){\n\tis_api_group(rule)\n\tresource := rule.resources[j]\n\tresource == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-cni-in-use-supports-network-policies", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\n# Deny CNIs that don't support Network Policies.\n# Deny when CNIName in input and in CNINotSupportsNetworkPolicies list.\n# Pass when CNIName not in input, or when CNIName in input but not in CNINotSupportsNetworkPolicies\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n is_control_plane_info(obj)\n \n\t# list of CNIs not supporting support Network Policies\n \tCNINotSupportsNetworkPolicies := [\"Flannel\"]\n\n\t# filter CNIs not supporting Network Policies\n CNINotSupportsNetworkPolicies[_] = obj.data.CNIName\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNIName\"])\n \n\talert := sprintf(\"''%s' CNI doesn't support Network Policies.\", [obj.data.CNIName])\n\n msg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", - "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\twanted = [\n\t\t\"TLS_AES_128_GCM_SHA256\",\n\t\t\"TLS_AES_256_GCM_SHA384\",\n\t\t\"TLS_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t]\n\tresult = invalid_flag(obj.spec.containers[0].command, wanted)\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to use strong cryptographic ciphers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--tls-cipher-suites=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd, wanted) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tmissing = [x | x = wanted[_]; not x in flag.values]\n\tcount(missing) > 0\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, missing)\n\tfixed_flag = sprintf(\"%s=%s\", [\"--tls-cipher-suites\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd, wanted) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--tls-cipher-suites\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--tls-cipher-suites=%s\", [concat(\",\", wanted)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", - "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```\n\n#### Impact Statement\nAPI server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.\n\n#### Default Value\nBy default the Kubernetes API server supports a wide range of TLS ciphers", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"DenyServiceExternalIPs\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"DenyServiceExternalIPs\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`\n\n#### Impact Statement\nWhen enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "exposed-rce-pods", - "attributes": { - "m$K8sThreatMatrix": "exposed-rce-pods", - "armoBuiltin": true, - "useFromKubescapeVersion": "v2.0.150", - "imageScanRelated": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\" ]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n # At least one rce vulnerability\n filter_rce_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = { \n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with rce vulnerability\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 8,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_rce_vulnerabilities(vuln) {\n data := vuln.data[_]\n data.categories.isRce == true\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", - "resourceEnumerator": "package armo_builtins\n \ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ; x.apiVersion == \"v1\"]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ; x.apiVersion == \"v1\"]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"] # TODO: x.apiVersion == \"--input--\" || x.apiVersion == \"--input--\" ]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = { \n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with rce vulnerability\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 8,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service", - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "armo.vuln.images", - "image.vulnscan.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ImageVulnerabilities" - ] - } - ], - "ruleDependencies": null, - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if known pods have exposed services and known vulnerabilities with remote code execution", - "remediation": "The image of the listed pods might have a fix in a newer version. Alternatively, the pod service might not need to be external facing", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "CVE-2022-0492", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\n# Case 1: \n# -\tContainer runs as root OR allows privilege escalation (allowPrivilegeEscalation = true or not present), AND\n# -\tNo AppArmor , AND\n# -\tNo SELinux, AND\n# -\tNo Seccomp\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\t\n\t# Path to send\n\tbeggining_of_path := \"spec\"\n\t\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec\"\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n # If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\n\tpod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n \n \t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\t\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_seccomp_pod(pod)\n\n is_no_SELinux_container(container)\n is_no_Seccomp_Container(container)\n\n\t# Check if is running as root\n alertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\n\t# CAP_DAC_OVERRIDE will fail on second check\n\tnot isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n # Get paths\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n#################################################################################\n# Case 2: \n# - Container has CAP_DAC_OVERRIDE capability, AND\n# - No AppArmor, AND\n# - No SELinux\n# If container is privileged or has CAP_SYS_ADMIN, don't fail\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.\"\n\t\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n pod := wl.spec.template\n container := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec.\"\n\n result := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n\tmsga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n \n pod := wl.spec.jobTemplate.spec.template\n container = pod.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\n \tresult := isCAP_DAC_OVERRIDE(container, beggining_of_path, i)\n\n\t# If container is privileged or has CAP_SYS_ADMIN, pass\n not container.securityContext.privileged == true\n\tnot is_cap_sys_admin(container, beggining_of_path)\n\n\tis_no_SELinux_No_AppArmor_Pod(pod)\n is_no_SELinux_container(container)\n\n msga := {\n\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-0492\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [result],\n \"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\nis_cap_sys_admin(container, beggining_of_path) {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"SYS_ADMIN\"\n}\n\nisCAP_DAC_OVERRIDE(container, beggining_of_path, i) = path {\n\tcapability = container.securityContext.capabilities.add[k]\n capability == \"DAC_OVERRIDE\"\n path = sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) \n}\n\n\n\n#################################################################################\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n\n\n\n\n#################################################################################\n\n# Check if appArmor or SELinux or seccompProfile is used\n# Fails if none of them is used\nis_no_SELinux_No_AppArmor_Pod(pod){\n not pod.spec.securityContext.seLinuxOptions\n\tannotations := [pod.metadata.annotations[i] | annotaion = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tnot count(annotations) > 0\n}\n\nis_no_SELinux_container(container){\n not container.securityContext.seLinuxOptions\n}\n\nis_no_seccomp_pod(pod) {\n not pod.spec.securityContext.seccompProfile\n}\n\nis_no_Seccomp_Container(container) {\n not container.securityContext.seccompProfile\n}\n\n\n\n\n\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n\n# Checking for non-root and allowPrivilegeEscalation enabled\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.container[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.container[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.container[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": \"spec.securityContext.runAsNonRoot\", \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.container[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-ssh-to-pod-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "denies pods with SSH ports opened(22/222)", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "strict-file-permissions-700", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true", - "useFromKubescapeVersion": "v2.0.159" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\n# Fail for every file in data.postureControlInputs.fileObjPath\n# if the permissions of the file are more permissive that 600.\n# Expect (supposed to be fixed per control, not user configurable): \n# \t(required) data.postureControlInputs.fileObjPath - list of paths strings. The item delimiter is `.`.\n# \t(optional) data.postureControlInputs.kindFilter \n# \t(optional) data.postureControlInputs.pathGlob \ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tfilter_kind(obj.kind)\n\n\t# Get the file info using the input object-paths\n\trawObjPath = data.postureControlInputs.fileObjPath[_]\n\tobjPath := split(rawObjPath, \"/\")\n\tsubject := object.get(obj, objPath, false)\n\tsubject != false\n\n\t# Run the test for every file\n\tfiles := get_files(subject)\n\tfile = files[file_index]\n\tfile_path_glob(file.path)\n\n\t# Actual permissions test \n\tallowed_perms := 448 # 0o700 == 448\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Filter out irrelevant data from the alert object\n\tfile_filtered := filter_file(obj, objPath, file_index)\n\tobj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\"])\n\toutput := object.union(file_filtered, obj_filtered)\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": output},\n\t}\n}\n\n# Return always a list\nget_files(obj) = files {\n\tis_array(obj)\n\tfiles = obj\n}\n\nget_files(obj) = files {\n\tnot is_array(obj)\n\tfiles = [obj]\n}\n\n# Filter only kinds that are in data.postureControlInputs.kindFilter.\n# If no kindFilter - match everything\nfilter_kind(kind) {\n\tkind in data.postureControlInputs.kindFilter\n}\n\nfilter_kind(kind) {\n\tnot data.postureControlInputs.kindFilter\n}\n\n# Filter file path globs from data.postureControlInputs.pathGlob\nfile_path_glob(path) {\n\tpatterns = data.postureControlInputs.pathGlob\n\tcount({true | patterns[i]; glob.match(patterns[i], null, path)}) > 0\n}\n\nfile_path_glob(path) {\n\tnot data.postureControlInputs.pathGlob\n}\n\n# Filter only the current file\nfilter_file(obj, objPath, file_index) = ret {\n\tis_array(object.get(obj, objPath, false))\n\tfull_path := array.concat(objPath, [format_int(file_index, 10)])\n\tfinal_path := concat(\"/\", full_path)\n\tret := json.filter(obj, [final_path])\n}\n\nfilter_file(obj, objPath, file_index) = ret {\n\tnot is_array(object.get(obj, objPath, false))\n\tret = object.filter(obj, objPath)\n}", - "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\n# Filter only kinds that are in data.postureControlInputs.kindFilter.\n# Filter out non-host-sensor as well.\n# If no kindFilter - match every kind\ndeny[msg] {\n\tobj = input[_]\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tfilter_kind(obj.kind)\n\tmsg := {\"alertObject\": {\"externalObjects\": obj}}\n}\n\n# Filter only kinds that are in data.postureControlInputs.kindFilter.\n# If no kindFilter - match everything\nfilter_kind(kind) {\n\tkind in data.postureControlInputs.kindFilter\n}\n\nfilter_kind(kind) {\n\tnot data.postureControlInputs.kindFilter\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo", - "KubeProxyInfo", - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that file has permissions of 700 or more restrictive", - "remediation": "Set the permission of the failed file file to 700 or more restrictive", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "set-seccomp-profile", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if container does not define seccompProfile", - "remediation": "Make sure you define seccompProfile at workload or container lever.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-encryption-providers-are-appropriately-configured", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is set but not using one of the recommended providers\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# For each resource check if it does not have allowed provider\n\tfix_paths := [{\n\t\t\"path\": sprintf(\"resources[%d].providers[%d]\", [i, count(resource.providers)]),\n\t\t\"value\": \"{\\\"aescbc\\\" | \\\"secretbox\\\" | \\\"kms\\\" : }\", # must be string\n\t} |\n\t\tresource := config_file_content.resources[i]\n\t\tcount({true |\n\t\t\tsome provider in resource.providers\n\t\t\thas_one_of_keys(provider, [\"aescbc\", \"secretbox\", \"kms\"])\n\t\t}) == 0\n\t]\n\n\tcount(fix_paths) > 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using one of the allowed providers (aescbc, secretbox, kms)\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := data {\n\tdata := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_key(x, k) {\n\t_ = x[k]\n}\n\nhas_one_of_keys(x, keys) {\n\thas_key(x, keys[_])\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": null, - "configInputs": null, - "controlConfigInputs": null, - "description": "Where `etcd` encryption is used, appropriate providers should be configured.", - "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, no encryption provider is set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "audit-policy-content", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.if\nimport future.keywords.in\n\n# CIS 3.2.2 https://workbench.cisecurity.org/sections/1126657/recommendations/1838583\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\tapi_server_info := obj.data.APIServerInfo\n\n\tnot contains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": api_server_info.cmdLine,\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\n\tapi_server_info := obj.data.APIServerInfo\n\n\tcontains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\trawPolicyFile := api_server_info.auditPolicyFile\n\tpolicyFile = yaml.unmarshal(base64.decode(rawPolicyFile.content))\n\n\tare_audit_file_rules_valid(policyFile.rules)\n\n\tfailed_obj := json.patch(policyFile, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"metadata\",\n\t\t\"value\": {\"name\": sprintf(\"%s - Audit policy file\", [obj.metadata.name])},\n\t}])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit policy rules do not cover key security areas or audit levels are invalid\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\n# Sample rules object\n#rules:\n# - level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nare_audit_file_rules_valid(rules) if {\n\tseeked_resources_with_audit_level := {\n\t\t\"secrets\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"configmaps\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"tokenreviews\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"pods\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"deployments\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/exec\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/portforward\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"services/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t}\n\n\t# Policy file must contain every resource\n\tsome resource, config in seeked_resources_with_audit_level\n\n\t# Every seeked resource mu have valid audit levels \n\tnot test_all_rules_against_one_seeked_resource(resource, config, rules)\n}\n\ntest_all_rules_against_one_seeked_resource(seeked_resource, value_of_seeked_resource, rules) if {\n\t# Filter down rules to only those concerning a seeked resource\n\trules_with_seeked_resource := [rule | rule := rules[_]; is_rule_concering_seeked_resource(rule, seeked_resource)]\n\trules_count := count(rules_with_seeked_resource)\n\n\t# Move forward only if there are some\n\trules_count > 0\n\n\t# Check if rules concerning seeked resource have valid audit levels \n\tvalid_rules := [rule | rule := rules_with_seeked_resource[_]; validate_rule_audit_level(rule, value_of_seeked_resource)]\n\tvalid_rules_count := count(valid_rules)\n\n\tvalid_rules_count > 0\n\n\t# Compare all rules for that specififc resource with those with valid rules, if amount of them differs,\n\t# it means that there are also some rules which invalid audit level\n\tvalid_rules_count == rules_count\n}\n\nis_rule_concering_seeked_resource(rule, seeked_resource) if {\n\trule.resources[_].resources[_] == seeked_resource\n}\n\n# Sample single rule:\n# \t level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nvalidate_rule_audit_level(rule, value_of_seeked_resource) := result if {\n\tvalue_of_seeked_resource.mode == \"equal\"\n\tresult := rule.level == value_of_seeked_resource.auditLevel\n} else := result {\n\tresult := rule.level != value_of_seeked_resource.auditLevel\n}\n\nis_api_server_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "APIServerInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "remediation": "Create an audit policy file for your cluster.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "kubelet-hostname-override", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the --hostname-override argument is not set.", - "remediation": "Unset the --hostname-override argument.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-authorization-mode-argument-includes-Node", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubelet nodes can read objects that are not associated with them\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"Node\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"Node\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=Node\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Restrict kubelet nodes to reading only objects associated with them.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `Node` authorization is not enabled.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "security-context-in-pod", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n is_not_security_context(pod, container)\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].securityContext\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in pod: %v does not define a securityContext.\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tis_not_security_context(wl.spec.template, container)\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].securityContext\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not define a securityContext.\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tis_not_security_context(wl.spec.jobTemplate.spec.template, container)\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].securityContext\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not define a securityContext.\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_not_security_context(pod, container) {\n\tnot pod.spec.securityContext \n\tnot container.securityContext\n}\n\nis_not_security_context(pod, container) {\n\tcount(pod.spec.securityContext) == 0\n\tnot container.securityContext\n}\n\n\nis_not_security_context(pod, container) {\n\tnot pod.spec.securityContext \n\tcontainer.securityContext\n\tcount(container.securityContext) == 0\n}\n\nis_not_security_context(pod, container) {\n \tcount(pod.spec.securityContext) == 0\n \tcontainer.securityContext\n \tcount(container.securityContext) == 0\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if pod/container does not define a security context.", - "remediation": "Make sure that the securityContext field is defined for pod/container.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-delete-create-service", - "attributes": { - "m$K8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "\npackage armo_builtins\nimport data.cautils as cautils\n\n\n\n# fails if user has create/delete access to services\n# RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canCreateDeleteToServiceResource(rule)\n canCreateDeleteToServiceVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can create/delete services\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user has create/delete access to services\n# RoleBinding to ClusterRole\ndeny [msga]{\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\n canCreateDeleteToServiceResource(rule)\n canCreateDeleteToServiceVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can create/delete services\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n\n}\n\n# fails if user has create/delete access to services\n# ClusterRoleBinding to ClusterRole\ndeny [msga]{\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n\n canCreateDeleteToServiceResource(rule)\n canCreateDeleteToServiceVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can create/delete services\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanCreateDeleteToServiceResource(rule) {\n cautils.list_contains(rule.resources, \"services\")\n}\n\ncanCreateDeleteToServiceResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\ncanCreateDeleteToServiceVerb(rule) {\n cautils.list_contains(rule.verbs, \"create\")\n}\n\ncanCreateDeleteToServiceVerb(rule) {\n cautils.list_contains(rule.verbs, \"delete\")\n}\n\ncanCreateDeleteToServiceVerb(rule) {\n cautils.list_contains(rule.verbs, \"deletecollection\")\n}\n\ncanCreateDeleteToServiceVerb(rule) {\n cautils.list_contains(rule.verbs, \"*\")\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users have create/delete permissions on services", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-privilege-escalation", - "attributes": { - "mitreCode": "TA0004", - "armoBuiltin": true, - "m$K8sThreatMatrix": "Privilege Escalation::privileged container", - "mitre": "Privilege Escalation" - }, - "creationTime": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines if pods/deployments defined as privileged true", - "remediation": "avoid defining pods as privilleged", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "internal-networking", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "lists namespaces in which no network policies are defined", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeProxyInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "k8s-audit-logs-enabled-native-cis", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# CIS 3.2.1 https://workbench.cisecurity.org/sections/1126657/recommendations/1838582\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server(obj)\n\tcmd := obj.spec.containers[0].command\n\taudit_policy := [command | command := cmd[_]; contains(command, \"--audit-policy-file=\")]\n\tcount(audit_policy) < 1\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", - "remediation": "Create an audit policy file for your cluster.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"NamespaceLifecycle\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"NamespaceLifecycle\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Reject creating objects in a namespace that is undergoing termination.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NamespaceLifecycle` is set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "anonymous-requests-to-kubelet-service", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data.kubernetes.api.client as client\n\ndeny[msga] {\n\t\tkubelet_config := input[_]\n\t\tkubelet_config.kind == \"KubeletConfiguration\"\n\t\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\t\tkubelet_cli := input[_] \n\t\tkubelet_cli.kind == \"KubeletCommandLine\"\n\t\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\t\t\n\t\tresult := is_anonymou_requests_disabled_both(kubelet_config, kubelet_cli)\n\t\texternal_obj := result.obj\n\t\tfailed_paths := result.failedPaths\n\t\tfix_paths := result.fixPaths\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"failedPaths\": failed_paths,\n\t\t\t\"fixPaths\": fix_paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"externalObjects\": external_obj\n\t\t\t}\n\t\t}\n\t}\n\n\ndeny[msga] {\n\t\tresult := is_anonymou_requests_disabled_single(input)\n\t\texternal_obj := result.obj\n\t\tfailed_paths := result.failedPaths\n\t\tfix_paths := result.fixPaths\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"failedPaths\": failed_paths,\n\t\t\t\"fixPaths\": fix_paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"externalObjects\": external_obj\n\t\t\t}\n\t\t}\n\t}\n\n# CLI overrides config\nis_anonymou_requests_disabled_both(kubelet_config, kubelet_cli) = {\"obj\": obj,\"failedPaths\": [], \"fixPaths\": []} {\n\tkubelet_cli_data := kubelet_cli.data\n\tcontains(kubelet_cli_data[\"fullCommand\"], \"anonymous-auth=true\")\n\tobj = kubelet_cli\n}\n\nis_anonymou_requests_disabled_both(kubelet_config, kubelet_cli) = {\"obj\": obj,\"failedPaths\": [\"data.authentication.anonymous.enabled\"], \"fixPaths\": []} {\n\tkubelet_config.data.authentication.anonymous.enabled == true\n\tkubelet_cli_data := kubelet_cli.data\n\tnot contains(kubelet_cli_data[\"fullCommand\"], \"anonymous-auth=false\")\n not contains(kubelet_cli_data[\"fullCommand\"], \"anonymous-auth=true\")\n\tobj = kubelet_config\n}\n\n# only kubelet config\nis_anonymou_requests_disabled_single(resources) = {\"obj\": obj,\"failedPaths\": [\"data.authentication.anonymous.enabled\"], \"fixPaths\": []} {\n\tkubelet_config := resources[_]\n\tkubelet_config.kind == \"KubeletConfiguration\"\n\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_cli := [cli | cli = resources[_]; cli.kind == \"KubeletCommandLine\"]\n\tcount(kubelet_cli) == 0\n\n\tobj = isAnonymouRequestsDisabledKubeletConfig(kubelet_config) \n}\n\n# only kubelet cli\nis_anonymou_requests_disabled_single(resources) = {\"obj\": obj,\"failedPaths\": [], \"fixPaths\": []} {\n\tkubelet_cli := resources[_] \n\tkubelet_cli.kind == \"KubeletCommandLine\"\n\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_config := [config | config = resources[_]; config.kind == \"KubeletConfiguration\"]\n\tcount(kubelet_config) == 0\n\n\tobj = isAnonymouRequestsDisabledKubeletCli(kubelet_cli)\n}\n\n\nisAnonymouRequestsDisabledKubeletConfig(kubelet_config) = obj {\n\tkubelet_config.data.authentication.anonymous.enabled == true\n\tobj = kubelet_config\n}\n\n\nisAnonymouRequestsDisabledKubeletCli(kubelet_cli) = obj {\n\tkubelet_cli_data := kubelet_cli.data\n\tcontains(kubelet_cli_data[\"fullCommand\"], \"anonymous-auth=true\")\n obj = kubelet_cli\n}", - "resourceEnumerator": "package armo_builtins\nimport data.kubernetes.api.client as client\n\ndeny[msga] {\n\t\tkubelet_config := input[_]\n\t\tkubelet_config.kind == \"KubeletConfiguration\"\n\t\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\t\tkubelet_cli := input[_] \n\t\tkubelet_cli.kind == \"KubeletCommandLine\"\n\t\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\t\texternal_obj := getObjBoth(kubelet_config, kubelet_cli)\n\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"failedPaths\": [],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"externalObjects\": external_obj\n\t\t\t}\n\t\t}\n\t}\n\n\n\t\ndeny[msga] {\n\n\t\texternal_obj := getObjSingle(input)\n\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"failedPaths\": [],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"externalObjects\": external_obj\n\t\t\t}\n\t\t}\n\t}\n\n\n# Both cli and config present. Return only relevant (priority to cli)\ngetObjBoth(kubelet_config, kubelet_cli) = obj {\n\tkubelet_cli_data := kubelet_cli.data\n\tcontains(kubelet_cli_data[\"fullCommand\"], \"anonymous-auth=\")\n obj = kubelet_cli\n}\n\n\ngetObjBoth(kubelet_config, kubelet_cli) = obj {\n\tkubelet_cli_data := kubelet_cli.data\n\tnot contains(kubelet_cli_data[\"fullCommand\"], \"anonymous-auth=\")\n obj = kubelet_config\n}\n\n# Only cli or only config\ngetObjSingle(resources) = obj {\n\tkubelet_cli := resources[_] \n\tkubelet_cli.kind == \"KubeletCommandLine\"\n\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_config := [config | config = resources[_]; config.kind == \"KubeletConfiguration\"]\n\tcount(kubelet_config) == 0\n\n\tobj = kubelet_cli\n}\n\ngetObjSingle(resources) = obj {\n\tkubelet_config := resources[_]\n\tkubelet_config.kind == \"KubeletConfiguration\"\n\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_cli := [cli | cli = resources[_]; cli.kind == \"KubeletCommandLine\"]\n\tcount(kubelet_cli) == 0\n\n\tobj = kubelet_config\n}", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletConfiguration", - "KubeletCommandLine" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Determines if anonymous requests are to kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "namespace-without-service-account", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", - "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Namespace", - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if namespace does not have service accounts (not incluiding default)", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "k8s-audit-logs-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data.cautils as cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\t\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-create-bind-escalate-role-v1", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133", - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= create/update ===============================\n\n# fails if user has access to create/update rolebindings/clusterrolebindings\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"rolebindings\", \"clusterrolebindings\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create/update rolebinding/clusterrolebinding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate rolebindings/clusterrolebindings\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"rolebindings\", \"clusterrolebindings\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate rolebinding/clusterrolebinding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users can create/update rolebindings/clusterrolebindings or bind roles/clusterroles", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".key\")\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-create-pod-kube-system", - "attributes": { - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133", - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server" - }, - "creationTime": "", - "rule": "\npackage armo_builtins\nimport data.cautils as cautils\n\n# fails if user has create access to pods within kube-system namespace\n# RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n can_create_to_pod_namespace(role)\n can_create_to_pod_resource(rule)\n can_create_to_pod_verb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can create pods in kube-system\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 3,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\n# fails if user has create access to pods within kube-system namespace\n# RoleBinding to ClusterRole\ndeny [msga]{\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n can_create_to_pod_namespace(rolebinding)\n can_create_to_pod_resource(rule)\n can_create_to_pod_verb(rule)\n\n\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n \t\"alertMessage\": sprintf(\"The following %v: %v can create pods in kube-system\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 3,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\n# fails if user has create access to pods within kube-system namespace\n# ClusterRoleBinding to ClusterRole\ndeny [msga]{\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n can_create_to_pod_resource(rule)\n can_create_to_pod_verb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can create pods in kube-system\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 3,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncan_create_to_pod_resource(rule){\n cautils.list_contains(rule.resources,\"pods\")\n}\n\ncan_create_to_pod_resource(rule){\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\ncan_create_to_pod_verb(rule) {\n cautils.list_contains(rule.verbs, \"create\")\n}\n\n\ncan_create_to_pod_verb(rule) {\n cautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_create_to_pod_namespace(role) {\n role.metadata.namespace == \"kube-system\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users can create pods in kube-system namespace", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "alert-any-hostpath", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.\"\n\tresult := is_dangerous_host_path(volume, beggining_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/etc\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/var\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines if any workload contains a hostPath volume", - "remediation": "Try to refrain from using hostPath mounts", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "resources-notpods-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresource_kinds := {\"ConfigMap\",\"Endpoints\",\"Event\",\"LimitRange\",\"PersistentVolumeClaim\",\"PodTemplate\",\n\t\t\t\t\t\t\"ReplicationController\",\"ResourceQuota\",\"Secret\",\"ServiceAccount\",\"Service\",\n\t\t\t\t\t\t\"ControllerRevision\",\"HorizontalPodAutoscaler\",\"Lease\",\"EndpointSlice\",\"Event\",\n\t\t\t\t\t\t\"Ingress\",\"NetworkPolicy\",\"PodDisruptionBudget\",\"RoleBinding\",\"Role\",\"CSIStorageCapacity\"}\n\tresource_kinds[resource.kind]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ConfigMap", - "Endpoints", - "Event", - "LimitRange", - "PersistentVolumeClaim", - "PodTemplate", - "ReplicationController", - "ResourceQuota", - "Secret", - "ServiceAccount", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ControllerRevision" - ] - }, - { - "apiGroups": [ - "autoscaling" - ], - "apiVersions": [ - "v2" - ], - "resources": [ - "HorizontalPodAutoscaler" - ] - }, - { - "apiGroups": [ - "coordination.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Lease" - ] - }, - { - "apiGroups": [ - "discovery.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "EndpointSlice" - ] - }, - { - "apiGroups": [ - "events.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Event" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Ingress", - "NetworkPolicy" - ] - }, - { - "apiGroups": [ - "policy" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "PodDisruptionBudget" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "Role" - ] - }, - { - "apiGroups": [ - "storage.k8s.io" - ], - "apiVersions": [ - "v1beta1" - ], - "resources": [ - "CSIStorageCapacity" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "access-container-service-account-v1", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n service_account_name := service_account.metadata.name\n\n not saTokenNotAutoMount(service_account)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == service_account_name\n rolesubject.namespace == service_account.metadata.namespace\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n savector = {\"name\": service_account.metadata.name,\n\t\t\t\t\"namespace\": service_account.metadata.namespace,\n\t\t\t\t\"kind\": service_account.kind,\n\t\t\t\t\"relatedObjects\": [role, rolebinding]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster: %v\", [service_account_name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": savector\n\t\t}\n\t}\n}\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n service_account_name := service_account.metadata.name\n\n not saTokenNotAutoMount(service_account)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == service_account_name\n rolesubject.namespace == service_account.metadata.namespace\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n savector = {\"name\": service_account.metadata.name,\n\t\t\t\t\"namespace\": service_account.metadata.namespace,\n\t\t\t\t\"kind\": service_account.kind,\n\t\t\t\t\"relatedObjects\": [role, rolebinding]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster: %v\", [service_account_name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": savector\n\t\t}\n\t}\n}\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n service_account_name := service_account.metadata.name\n\n not saTokenNotAutoMount(service_account)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == service_account_name\n rolesubject.namespace == service_account.metadata.namespace\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n savector = {\"name\": service_account.metadata.name,\n\t\t\t\t\"namespace\": service_account.metadata.namespace,\n\t\t\t\t\"kind\": service_account.kind,\n\t\t\t\t\"relatedObjects\": [role, rolebinding]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster: %v\", [service_account_name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"fixPaths\":[],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": savector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\nsaTokenNotAutoMount(service_account) {\n service_account.automountServiceAccountToken == false\n}\n\n", - "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n service_account_name := service_account.metadata.name\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == service_account_name\n rolesubject.namespace == service_account.metadata.namespace\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n savector = {\"name\": service_account.metadata.name,\n\t\t\t\t\"namespace\": service_account.metadata.namespace,\n\t\t\t\t\"kind\": service_account.kind,\n\t\t\t\t\"relatedObjects\": [role, rolebinding]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster: %v\", [service_account_name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [\"\"],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": savector\n\t\t}\n\t}\n}\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n service_account_name := service_account.metadata.name\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == service_account_name\n rolesubject.namespace == service_account.metadata.namespace\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n savector = {\"name\": service_account.metadata.name,\n\t\t\t\t\"namespace\": service_account.metadata.namespace,\n\t\t\t\t\"kind\": service_account.kind,\n\t\t\t\t\"relatedObjects\": [role, rolebinding]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster: %v\", [service_account_name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": savector\n\t\t}\n\t}\n}\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n service_account_name := service_account.metadata.name\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == service_account_name\n rolesubject.namespace == service_account.metadata.namespace\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n savector = {\"name\": service_account.metadata.name,\n\t\t\t\t\"namespace\": service_account.metadata.namespace,\n\t\t\t\t\"kind\": service_account.kind,\n\t\t\t\t\"relatedObjects\": [role, rolebinding]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster: %v\", [service_account_name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": savector\n\t\t}\n\t}\n}", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which service accounts can be used to access other resources in the cluster", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "automount-default-service-account", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", - "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if default service account mounts service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "enforce-kubelet-client-tls-authentication-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\n#CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "secret-etcd-encryption-cloud", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "container.googleapis.com", "eks.amazonaws.com" ], "apiVersions": [ @@ -12568,551 +25713,50 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "", "remediation": "", "ruleQuery": "armo_builtins", "relevantCloudProviders": [ - "EKS", - "GKE" + "EKS" ] }, { "guid": "", - "name": "kubelet-ip-tables", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, + "name": "endpointslice-in-default-namespace", "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensures that the --make-iptables-util-chains argument is set to true.", - "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "instance-metadata-api-access", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "cloudProviderInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-delete-k8s-events-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ { "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users can delete events", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-list-get-secrets-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-secrets-in-env-var", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\t\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" + "discovery.k8s.io" ], "apiVersions": [ "v1" ], "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" + "EndpointSlice" ] } ], "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if Pods have secrets in environment variables", - "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-create-pod-kube-system-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods within kube-system namespace\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\tcan_create_to_pod_namespace(rolebinding)\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods in kube-system\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# 1. rolebinding in kubesystem ns + role in kubesystem ns\n# 2. rolebinding in kubesystem ns + clusterrole\ncan_create_to_pod_namespace(rolebinding) {\n\trolebinding.metadata.namespace == \"kube-system\"\n}\n\n# 3. clusterrolebinding + clusterrole\ncan_create_to_pod_namespace(rolebinding) {\n\trolebinding.kind == \"ClusterRoleBinding\"\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users can create pods in kube-system namespace", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "exposed-sensitive-interfaces-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data.kubernetes.api.client as client\nimport data\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n \n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n \n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n \n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n} \n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n \n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n \n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}", - "resourceEnumerator": "package armo_builtins\nimport data.kubernetes.api.client as client\nimport data\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": [ - "settings.postureControlInputs.sensitiveInterfaces" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveInterfaces", - "name": "Sensitive interfaces", - "description": "The following interfaces were seen exploited. Kubescape checks it they are externally exposed." - } - ], - "description": "fails if known interfaces have exposed services", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "etcd-encryption-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tpath := \"spec.containers[0].command\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "", "remediation": "", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, - { - "guid": "", - "name": "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server communication is not encrypted properly\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-ca-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Setup TLS connection on the API server.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--client-ca-file` argument is not set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "access-container-service-account", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\n# Returns for each Pod, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n \n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [\"\"],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n \n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n\n# Returns for each Cronjob, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n \n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n# ===============================================================\n\nisNotAutoMount(serviceaccount, pod) {\n pod.spec.automountServiceAccountToken == false\n}\nisNotAutoMount(serviceaccount, pod) {\n serviceaccount.automountServiceAccountToken == false\n not pod.spec[\"automountServiceAccountToken\"]\n}\n\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "ServiceAccount" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which service accounts can be used to access other resources in the cluster", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, { "guid": "", "name": "rbac-enabled-native", "attributes": { - "armoBuiltin": true, "resourcesAggregator": "apiserver-pod", "useFromKubescapeVersion": "v1.0.133" }, "creationTime": "", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--authorization-mode=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"RBAC\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t}\n\t}\n}", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -13129,7 +25773,6 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "", "remediation": "", @@ -13138,1037 +25781,12 @@ }, { "guid": "", - "name": "rule-excessive-delete-rights-v1", + "name": "ensure-that-the-kubelet-configuration-file-ownership-is-set-to-root-root", "attributes": { - "m$K8sThreatMatrix": "Impact::Data Destruction", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if user can delete important resources", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "enforce-kubelet-client-tls-authentication", - "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "creationTime": "", - "rule": "package armo_builtins\nimport data.kubernetes.api.client as client\n\n# Both config and cli present\ndeny[msga] {\n\t\tkubelet_config := input[_]\n\t\tkubelet_config.kind == \"KubeletConfiguration\"\n\t\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\t\tkubelet_cli := input[_] \n\t\tkubelet_cli.kind == \"KubeletCommandLine\"\n\t\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\t\tkubelet_cli_data := kubelet_cli.data\n\n\t\tresult := is_client_tls_disabled_both(kubelet_config, kubelet_cli_data)\n\t\texternal_obj := result.obj\n\t\tfailed_paths := result.failedPaths\n\t\tfixPaths := result.fixPaths\n\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"failedPaths\": failed_paths,\n\t\t\t\"fixPaths\": fixPaths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [kubelet_config, kubelet_cli]\n\t\t\t},\n\t\t}\n\t}\n\n\n# Only of them present\ndeny[msga] {\n\t\tresult := is_client_tls_disabled_single(input)\n\t\texternal_obj := result.obj\n\t\tfailed_paths := result.failedPaths\n\t\tfixPaths := result.fixPaths\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"failedPaths\": failed_paths,\n\t\t\t\"fixPaths\": fixPaths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [external_obj]\n\t\t\t},\n\t\t}\n\t}\n\n# CLI overrides config\nis_client_tls_disabled_both(kubelet_config, kubelet_cli_data) = {\"obj\": obj,\"failedPaths\": [], \"fixPaths\": [{\"path\": \"data.authentication.x509.clientCAFile\", \"value\": \"YOUR_VALUE\"}]} {\n\tnot contains(kubelet_cli_data[\"fullCommand\"], \"client-ca-file\")\n not kubelet_config.data.authentication.x509.clientCAFile\n\tobj = kubelet_config\n}\n\n# Only cli\nis_client_tls_disabled_single(resources) = {\"obj\": obj,\"failedPaths\": [], \"fixPaths\": []} {\n\tkubelet_cli := resources[_] \n\tkubelet_cli.kind == \"KubeletCommandLine\"\n\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_config := [config | config = resources[_]; config.kind == \"KubeletConfiguration\"]\n\tcount(kubelet_config) == 0\n\n\tobj = isClientTlsDisabledCli(kubelet_cli)\n\t\n}\n\n# Only config\nis_client_tls_disabled_single(resources) = {\"obj\": obj,\"failedPaths\": [], \"fixPaths\": [{\"path\": \"data.authentication.x509.clientCAFile\", \"value\": \"YOUR_VALUE\"}]} {\n\tkubelet_config := resources[_] \n\tkubelet_config.kind == \"KubeletConfiguration\"\n\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_cmd := [cmd | cmd = resources[_]; cmd.kind == \"KubeletCommandLine\"]\n\tcount(kubelet_cmd) == 0\n\n\tobj = is_Client_tls_disabled_config(kubelet_config)\n}\n\n\nis_Client_tls_disabled_config(kubelet_config) = obj {\n\tnot kubelet_config.data.authentication.x509.clientCAFile\n\tobj = kubelet_config\n}\n\nisClientTlsDisabledCli(kubelet_cli) = obj {\n\tkubelet_cli_data = kubelet_cli.data\n\tnot contains(kubelet_cli_data[\"fullCommand\"], \"client-ca-file\")\n\tobj = kubelet_cli\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletConfiguration", - "KubeletCommandLine" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Determines if kubelet client tls authentication is enabled.", - "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate", - "attributes": { - "useFromKubescapeVersion": "v2.0.159", - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-key-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Explicitly set a service account public key file for service accounts on the apiserver.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```\n\n#### Impact Statement\nThe corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-key-file` argument is not set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "deny-vuln-image-pods", - "attributes": { - "mitre": "Exploit Public-Facing Application", - "mitreCode": "T1190", - "armoBuiltin": true, - "armoOpa": "true", - "m$K8sThreatMatrix": "Initial Access::Application Vulnerability" - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data.cautils as cautils\n \ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v]\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n\n is_unsafe_image(scan)\n\tscan.containersScanID\n\tvulnerabilities := armo.get_image_scan_details({\"containersScanID\":scan.containersScanID, \"fieldCreteria\":{\"description\":\"RCE|like,Remote Code Execution|like,remote code execution|like,remote command execution|like,Remote Command Execution|like,arbitrary code|like,code execution|like,Arbitrary Code|like,Code Execution|like,code injection|like,Code Injection|like,execute code|like,Execute Code|like,arbitrary command|like,Arbitrary Command|like,arbitrary commands|like,Arbitrary Commands|like,command injection|like,Command Injection|like,command execution|like,Command Execution|like,inject arbitrary commands|like,Inject Arbitrary Commands|like\"} })\n\tcount(vulnerabilities) > 0\n\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n\tservice := input[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == pod.metadata.namespace\n\tnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\n\tnp_or_lb[service.spec.type]\n\tcautils.is_subobject(service.spec.selector,filtered_labels)\n\n msga := {\n \"alertMessage\": sprintf(\"pod %v/%v has vulnerabilities\", [pod.metadata.namespace,pod.metadata.name]),\n \"alertScore\": 2,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t\t\"externalObjects\": {\n\t\t\t\"vulnerabilities\" : [vulnerabilities]\n\t\t}\n }\n}\n\n# covers most workloads\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v]\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n\n is_unsafe_image(scan)\n\tscan.containersScanID\n\tvulnerabilities := armo.get_image_scan_details({\"containersScanID\":scan.containersScanID, \"fieldCreteria\":{\"description\":\"RCE|like,Remote Code Execution|like,remote code execution|like,remote command execution|like,Remote Command Execution|like,arbitrary code|like,code execution|like,Arbitrary Code|like,Code Execution|like,code injection|like,Code Injection|like,execute code|like,Execute Code|like,arbitrary command|like,Arbitrary Command|like,arbitrary commands|like,Arbitrary Commands|like,command injection|like,Command Injection|like,command execution|like,Command Execution|like,inject arbitrary commands|like,Inject Arbitrary Commands|like\"} })\n\tcount(vulnerabilities) > 0\n\n\tlabels := wl.spec.template.metadata.labels\n\tservice := input[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\n\tnp_or_lb[service.spec.type]\n\tcautils.is_subobject(service.spec.selector,labels)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v/%v has vulnerabilities\", [wl.kind, wl.metadata.namespace, wl.metadata.name]),\n \"alertScore\": 2,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t\t\"externalObjects\": {\n\t\t\t\"vulnerabilities\" : [vulnerabilities]\n\t\t}\n }\n}\n\n# covers cronjobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v]\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n\n is_unsafe_image(scan)\n\tscan.containersScanID\n\tvulnerabilities := armo.get_image_scan_details({\"containersScanID\":scan.containersScanID, \"fieldCreteria\":{\"description\":\"RCE|like,Remote Code Execution|like,remote code execution|like,remote command execution|like,Remote Command Execution|like,arbitrary code|like,code execution|like,Arbitrary Code|like,Code Execution|like,code injection|like,Code Injection|like,execute code|like,Execute Code|like,arbitrary command|like,Arbitrary Command|like,arbitrary commands|like,Arbitrary Commands|like,command injection|like,Command Injection|like,command execution|like,Command Execution|like,inject arbitrary commands|like,Inject Arbitrary Commands|like\"} })\n\tcount(vulnerabilities) > 0\n\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n\tservice := input[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\n\tnp_or_lb[service.spec.type]\n\tcautils.is_subobject(service.spec.selector,labels)\n\t\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v/%v has vulnerabilities\", [wl.kind, wl.metadata.namespace, wl.metadata.name]),\n \"alertScore\": 2,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t\t\"externalObjects\": {\n\t\t\t\"vulnerabilities\" : [vulnerabilities]\n\t\t}\n }\n}\n\n\n#treat as potentially critical\nis_unsafe_image(scanresult) {\n\tscanresult.numOfUnknownSeverity > 0\n}\nis_unsafe_image(scanresult) {\n\tscanresult.numOfNegligibleSeverity > 0\n}\n\nis_unsafe_image(scanresult) {\n\tscanresult.numOfLowSeverity > 0\n}\n\nis_unsafe_image(scanresult) {\n\tscanresult.numOfMeduiumSeverity > 0\n}\n\nis_unsafe_image(scanresult) {\n\tscanresult.numOfHighSeverity > 0\n}\n\nis_unsafe_image(scanresult) {\n\tscanresult.numOfCriticalSeverity > 0\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines if pods/deployments has vulnerable image", - "remediation": "Isolate such deployments in sandboxes if possible. Otherwise, keep scanning frequently for in case a patch will be available - MAKE SURE it has least privileges as necessary!", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "pods-in-default-namespace", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-admin.conf-file-permissions-are-set-to-600", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the `admin.conf` file has permissions of `600`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-impersonate-users-groups", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "\npackage armo_builtins\nimport data.cautils as cautils\n\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanImpersonateVerb(rule)\n canImpersonateResource(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can impersonate users\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"impersonate\")\n}\ncanImpersonateVerb(rule) {\n\t\tcautils.list_contains(rule.verbs, \"*\")\n}\n\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"users\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"serviceaccounts\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"groups\")\n}\n\ncanImpersonateResource(rule) {\n\tcautils.list_contains(rule.resources,\"uids\")\n}\n\ncanImpersonateResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users can impersonate users/groups", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-access-dashboard-subject-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"the controller manager is not configured to inject the trusted ca.crt file into pods so that they can verify TLS connections to the API server\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--root-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--root-ca-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Allow pods to verify the API server's serving certificate before establishing connections.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```\n\n#### Impact Statement\nYou need to setup and maintain root certificate authority file.\n\n#### Default Value\nBy default, `--root-ca-file` is not set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "kubelet-streaming-connection-idle-timeout", - "attributes": { - "hostSensorRule": "true", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\t\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Determines if a kubelet has not disabled timeouts on streaming connections", - "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "kubelet-rotate-certificates", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the --rotate-certificates argument is not set to false.", - "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ingress-and-egress-blocked", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - }, - { - "apiGroups": [ - "networking.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "NetworkPolicy" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if there are no ingress and egress defined for pod", - "remediation": "Make sure you define ingress and egress policies for all your Pods", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-name-similarity", - "attributes": { - "m$K8sThreatMatrix": "Defense evasion::Pod / container name similarity", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data\n# import data.cautils as cautils\n# import data.kubernetes.api.client as client\n\n# input: pods\n# apiversion: v1\n# fails if object has similar name to known workload (but is not from that workload)\n\ndeny[msga] {\n\tobject := input[_]\n\twanted_kinds := {\"Pod\", \"ReplicaSet\", \"Job\"}\n\twanted_kinds[object.kind]\n\n\t# see default-config-inputs.json for list values\n wl_known_names := data.postureControlInputs.wlKnownNames\n wl_name := wl_known_names[_]\n contains(object.metadata.name, wl_name)\n\tpath := \"metadata.name\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this %v has a similar name to %v\", [object.kind, wl_name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [object]\n\t\t}\n }\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Pod", - "ReplicaSet", - "Job" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.wlKnownNames" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.wlKnownNames", - "name": "Kubescape will look for the following deployment names in your cluster, it will make sure that no one is trying to create similar pod names to hide their attack. ", - "description": "Deployment names" - } - ], - "description": "fails if there are objects with names similar to system pods, or other known deployments", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "linux-hardening", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if container does not define any linux security hardening", - "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-token-auth-file-parameter-is-not-set", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server TLS is not configured\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tre := \" ?--token-auth-file=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd[i], -1)\n\tcount(matchs) > 0\n\tfixed = replace(cmd[i], matchs[0][0], \"\")\n\tresult = get_result(sprintf(\"spec.containers[0].command[%d]\", [i]), fixed)\n}\n\n# Get fix and failed paths\nget_result(path, fixed) = result {\n\tfixed == \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(path, fixed) = result {\n\tfixed != \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed,\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Do not use token based authentication.", - "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.\n\n#### Impact Statement\nYou will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test \n\tallowed_perms := 448 # == 0o700\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "exec-into-container-v1", - "attributes": { - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133", - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users have permissions to exec into pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-ssh-to-pod", - "attributes": { - "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "denies pods with SSH ports opened(22/222)", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "insecure-port-flag", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data.cautils as cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\t\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", - "resourceEnumerator": "package armo_builtins\nimport data.cautils as cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if the api server has insecure-port enabled", - "remediation": "Make sure that the insecure-port flag of the api server is set to 0", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-scheduler-profiling-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-scheduler\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Disable profiling, if not needed.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-credentials-configmap", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n# import data.cautils as cautils\n# import data.kubernetes.api.client as client\nimport data\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n \n contains(lower(map_key), lower(key_name))\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n \n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n \n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n regex.match(value , decoded_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveValues", - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveValues", - "name": "Values", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Below you can find some examples of popular value phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Allowed values" - } - ], - "description": "fails if ConfigMaps have sensitive information in configuration", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [], @@ -14190,7 +25808,6 @@ "packageName": "cautils" } ], - "configInputs": null, "controlConfigInputs": null, "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file is owned by root:root.", "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", @@ -14199,165 +25816,9 @@ }, { "guid": "", - "name": "validate-kubelet-tls-configuration-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, + "name": "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set", "creationTime": "", - "rule": "package armo_builtins\n\n#CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t#get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletConfiguration", - "KubeletCommandLine" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", - "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "kubelet-strong-cryptographics-ciphers", - "attributes": { - "hostSensorRule": "true", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.13 https://workbench.cisecurity.org/sections/1126668/recommendations/1838663\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--tls-cipher-suites\")\n\n\tnot has_strong_cipher_set_via_cli(command)\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.TLSCipherSuites\n\n\tnot is_value_in_strong_cliphers_set(yamlConfig.TLSCipherSuites)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [\"TLSCipherSuites\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--tls-cipher-suites\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet is not configured to only use strong cryptographic ciphers\",\n\t\t\"alertScore\": 5,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\nhas_strong_cipher_set_via_cli(command) {\n\tcontains(command, \"--tls-cipher-suites=\")\n\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome i\n\tcontains(command, sprintf(\"%v%v\", [\"--tls-cipher-suites=\", strong_cliphers[i]]))\n}\n\nis_value_in_strong_cliphers_set(value) {\n\tstrong_cliphers := [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t]\n\n\tsome x\n\tstrong_cliphers[x] == value\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Determines if the Kubelet is configured to only use strong cryptographic ciphers.", - "remediation": "Change --tls-cipher-suites value of TLSCipherSuites property of config file to use strong cryptographics ciphers", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-audit-log-path-argument-is-set", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubernetes API Server is not audited\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-path\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-path=/var/log/apiserver/audit.log\",\n\t\t}],\n\t}\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\t\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"AlwaysAdmit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"AlwaysAdmit\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", "ruleLanguage": "Rego", "match": [ @@ -14374,906 +25835,155 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "description": "Do not allow all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.\n\n#### Impact Statement\nOnly requests explicitly allowed by the admissions control plugins would be served.\n\n#### Default Value\n`AlwaysAdmit` is not in the list of default admission plugins.", "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "rule-deny-cronjobs", - "attributes": { - "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob", - "armoBuiltin": true - }, + "name": "list-role-definitions-in-acr", "creationTime": "", - "rule": "package armo_builtins\n\n# alert cronjobs\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines if it's cronjob", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "rule": "package armo_builtins\n\n# return ListEntitiesForPolicies resource in azure\ndeny[msg] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.apiVersion == \"management.azure.com/v1\"\n\tresources.metadata.provider == \"aks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", - "match": [], + "match": null, "dynamicMatch": [ { "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", - "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority file is not specified\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--kubelet-certificate-authority\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--kubelet-certificate-authority=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" + "management.azure.com" ], "apiVersions": [ "v1" ], "resources": [ - "Pod" + "ListEntitiesForPolicies" ] } ], "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Verify kubelet's certificate before establishing connection.", - "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, `--kubelet-certificate-authority` argument is not set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the Controller Manager API service is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue =matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "exposed-sensitive-interfaces", - "attributes": { - "useUntilKubescapeVersion": "v1.0.133", - "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data.kubernetes.api.client as client\nimport data\n\n# loadbalancer\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\tresult := wl_connectedto_service(wl, service)\n \n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n \n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n \n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tresult := wl_connectedto_service(pod, service)\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod, service]\n\t\t}\n\t}\n} \n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n \n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n \n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n\tresult := wl_connectedto_service(wl, service)\n\n\tpods_resource := client.query_all(\"pods\")\n\tpod := pods_resource.body.items[_]\n\tmy_pods := [pod | startswith(pod.metadata.name, wl.metadata.name)]\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Service" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": [ - "settings.postureControlInputs.servicesNames" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.servicesNames", - "name": "Service names", - "description": "Kubescape will look for the following services that exposes sensitive interfaces of common K8s projects/applications" - } - ], - "description": "fails if known interfaces have exposed services", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true", - "useFromKubescapeVersion": "v2.0.159" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxsize\")\n\tresult = {\n\t\t\"alert\": \"Please validate that audit-log-maxsize has an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxsize\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max size not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxsize=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Rotate log files on reaching 100 MB or as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeProxyInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "validate-kubelet-tls-configuration", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data.kubernetes.api.client as client\n\ndeny[msga] {\n\t\tkubelet_config := input[_]\n\t\tkubelet_config.kind == \"KubeletConfiguration\"\n\t\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\t\tkubelet_cli := input[_] \n\t\tkubelet_cli.kind == \"KubeletCommandLine\"\n\t\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\t\tkubelet_cli_data := kubelet_cli.data\n\n\t\tis_tls_disabled_both(kubelet_config, kubelet_cli_data)\n\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n \"k8sApiObjects\": [kubelet_config, kubelet_cli]\n\t\t\t},\n\t\t}\n\t}\n\n\ndeny[msga] {\n\t\texternal_obj := is_tls_disabled_single(input)\n\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n \"k8sApiObjects\": [external_obj]\n\t\t\t},\n\t\t}\n\t}\n\n\n\n# CLI overrides config\nis_tls_disabled_both(kubelet_config, kubelet_cli) {\n is_not_tls_cli(kubelet_cli)\n is_not_tls_config(kubelet_config)\n}\n\nis_tls_disabled_single(resources) = obj {\n\tkubelet_cli := resources[_] \n\tkubelet_cli.kind == \"KubeletCommandLine\"\n\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_config := [config | config = resources[_]; config.kind == \"KubeletConfiguration\"]\n\tcount(kubelet_config) == 0\n\n\tis_not_tls_cli(kubelet_cli)\n\n\tobj = kubelet_cli\n}\n\n\nis_tls_disabled_single(resources) = obj {\n\tkubelet_config := resources[_]\n\tkubelet_config.kind == \"KubeletConfiguration\"\n\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_cli := [cli | cli = resources[_]; cli.kind == \"KubeletCommandLine\"]\n\tcount(kubelet_cli) == 0\n\n\tis_not_tls_config(kubelet_config)\n\n\tobj = kubelet_config\n}\n\n\nis_not_tls_cli(kubelet_cli) {\n\tkubelet_cli_data := kubelet_cli.data\n\tnot contains(kubelet_cli_data[\"fullCommand\"], \"tls-cert-file\")\n\tnot contains(kubelet_cli_data[\"fullCommand\"], \"tls-private-key-file\")\n}\n\nis_not_tls_config(kubelet_config){\n not kubelet_config.data.tlsCertFile\n not kubelet_config.data.tlsPrivateKeyFile\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletConfiguration", - "KubeletCommandLine" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Determines if kubelet has tls configuration.", - "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-access-dashboard-wl-v1", - "attributes": { - "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"spec.template.spec.serviceaccountname\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_value(cmd) = {\"origin\": origin, \"value\": value} {\n\tre := \" ?--audit-log-maxage=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalue = to_number(matchs[0][1])\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag = get_flag_value(cmd[i])\n\tflag.value < 30\n\tfixed = replace(cmd[i], flag.origin, \"--audit-log-maxage=30\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"alert\": sprintf(\"Audit log retention period is %v days, which is too small (should be at least 30 days)\", [flag.value]),\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxage\")\n\tresult = {\n\t\t\"alert\": \"Audit log retention period is not set\",\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%v]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-maxage=30\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Retain the logs for at least 30 days or as appropriate.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "etcd-auto-tls-disabled", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Auto tls is enabled. Clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--auto-tls=true\")\n\tfixed = replace(cmd[i], \"--auto-tls=true\", \"--auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Do not use self-signed certificates for TLS.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```\n\n#### Impact Statement\nClients will not be able to use self-signed certificates for TLS.\n\n#### Default Value\nBy default, `--auto-tls` is set to `false`.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-delete-logs-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Clear container logs", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete logs of pod \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/log\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete logs\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users can delete logs inside a container", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-list-get-secrets", - "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data.cautils as cautils\n\n\n# fails if user can list/get secrets \n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can list/get secrets \n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can list/get secrets \n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"get\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"list\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"watch\")\n}\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}\n\n\ncanViewSecretsResource(rule) {\n cautils.list_contains(rule.resources,\"secrets\")\n}\n\ncanViewSecretsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users can list/get secrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "kubelet-authorization-mode-alwaysAllow", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\t\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Do not allow all requests. Enable explicit authorization.", - "remediation": "Change authorization mode to Webhook.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "etcd-peer-client-auth-cert", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--peer-client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--peer-client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--peer-client-cert-auth=false\", \"--peer-client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "etcd should be configured for peer authentication.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-client-cert-auth` argument is set to `false`.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "pod-security-admission-baseline-applied", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Namespace" - ] - }, - { - "apiGroups": [ - "admissionregistration.k8s.io" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "ValidatingWebhookConfiguration", - "MutatingWebhookConfiguration" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "host-pid-ipc-privileges", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", - "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "non-root-containers", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if container can run as root", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"service account token can not be rotated as needed\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-private-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-private-key-file=\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Explicitly set a service account private key file for service accounts on the controller manager.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```\n\n#### Impact Statement\nYou would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-private-key-file` it not set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "CVE-2022-24348", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-exposed-dashboard-v1", - "attributes": { - "m$K8sThreatMatrix": "Initial Access::Exposed Dashboard", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# input: deployment, service\n# apiversion: v1\n# fails if dashboard exists and is exposed\n\ndeny[msga] {\n\tdeployment := input[_]\n\tstartswith(deployment.metadata.name, \"kubernetes-dashboard\")\n\tcontainer := deployment.spec.template.spec.containers[j]\n\tversion := trim_prefix(container.image, \"kubernetesui/dashboard:v\")\n\tto_number(replace(version, \".\", \"\")) < 201\n\t\n\tservice := input[_]\n\tservice.kind == \"Service\"\n\tisNodePortLbService(service)\n\tcount({x | service.spec.selector[x]; deployment.metadata.labels[x]}) == count(service.spec.selector)\n\tpath := sprintf(\"spec.template.spec.containers[%v]\", [format_int(j, 10)])\n\n\tdeploymentvector = {\"name\": deployment.metadata.name,\n\t\t\t\t\t\t\"namespace\": deployment.metadata.namespace,\n\t\t\t\t\t\t\"kind\": deployment.kind,\n\t\t\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"dashboard exists and is exposed %s\", [container.image]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": deploymentvector\n\t\t}\n\t}\n}\n\n\n\nisNodePortLbService(service) {\n\tservice.spec.type == \"NodePort\"\n}\n\nisNodePortLbService(service) {\n\tservice.spec.type == \"LoadBalancer\"\n}", - "resourceEnumerator": "package armo_builtins\n\n# input: deployment, service\n# apiversion: v1\n# fails if dashboard exists and is exposed\n\ndeny[msga] {\n\tdeployment := input[_]\n\tstartswith(deployment.metadata.name, \"kubernetes-dashboard\")\n\tcontainer := deployment.spec.template.spec.containers[j]\n\tversion := trim_prefix(container.image, \"kubernetesui/dashboard:v\")\n\tto_number(replace(version, \".\", \"\")) < 201\n\t\n\tservice := input[_]\n\tservice.kind == \"Service\"\n\tcount({x | service.spec.selector[x]; deployment.metadata.labels[x]}) == count(service.spec.selector)\n\tpath := sprintf(\"spec.template.spec.containers[%v]\", [format_int(j, 10)])\n\n\tdeploymentvector = {\"name\": deployment.metadata.name,\n\t\t\t\t\t\t\"namespace\": deployment.metadata.namespace,\n\t\t\t\t\t\t\"kind\": deployment.kind,\n\t\t\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"dashboard exists and is exposed %s\", [container.image]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": deploymentvector\n\t\t}\n\t}\n}\n\n\n\nisNodePortLbService(service) {\n\tservice.spec.type == \"NodePort\"\n}\n\nisNodePortLbService(service) {\n\tservice.spec.type == \"LoadBalancer\"\n}", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment", - "Service" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if dashboard exists and is exposed", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "CVE-2022-0185", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n kernel_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", node.status.nodeInfo.kernelVersion, -1)\n kernelVersion := kernel_version_match[0][0]\n \n kernel_version_arr := split(kernelVersion, \".\")\n to_number(kernel_version_arr[0]) == 5\n to_number(kernel_version_arr[1]) >= 1\n to_number(kernel_version_arr[1]) <= 16\n to_number(kernel_version_arr[2]) < 2 \n \n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "LinuxKernelVariables" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "", "remediation": "", "ruleQuery": "armo_builtins", + "relevantCloudProviders": [ + "AKS" + ] + }, + { + "guid": "", + "name": "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--terminated-pod-gc-threshold\")\n\tresult = {\n\t\t\"alert\": \"Please validate that --terminated-pod-gc-threshold is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--terminated-pod-gc-threshold\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"--terminated-pod-gc-threshold flag not set to an appropriate value\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--terminated-pod-gc-threshold=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Activate garbage collector on pod termination, as appropriate.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--terminated-pod-gc-threshold` is set to `12500`.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-DenyServiceExternalIPs-is-not-set", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin DenyServiceExternalIPs is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"DenyServiceExternalIPs\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"DenyServiceExternalIPs\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "This admission controller rejects all net-new usage of the Service field externalIPs.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--DenyServiceExternalIPs'parameter\n\n or\n\n The Kubernetes API server flag disable-admission-plugins takes a comma-delimited list of admission control plugins to be disabled, even if they are in the list of plugins enabled by default.\n\n `kube-apiserver --disable-admission-plugins=DenyServiceExternalIPs,AlwaysDeny ...`\n\n#### Impact Statement\nWhen enabled, users of the cluster may not create new Services which use externalIPs and may not add new values to externalIPs on existing Service objects.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-request-timeout-argument-is-set-as-appropriate", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--request-timeout\")\n\tresult = {\n\t\t\"alert\": \"Please validate the request timeout flag is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Set global request timeout for API server requests as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameter as appropriate and if needed. For example,\n\n \n```\n--request-timeout=300s\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--request-timeout` is set to 60 seconds.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to serve only HTTPS traffic\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--tls-cert-file\", \"\"],\n\t\t[\"--tls-private-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", + "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", "name": "rule-allow-privilege-escalation", - "attributes": { - "armoBuiltin": true - }, "creationTime": "", - "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n", + "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_allow_privilege_escalation_container(container)\n\tfixPath := get_fix_path(i, start_of_path)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_allow_privilege_escalation_container(container)\n\tfixPath := get_fix_path(i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_allow_privilege_escalation_container(container)\n\tfixPath := get_fix_path(i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container) {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n}\n\nis_allow_privilege_escalation_container(container) {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n}\n\n\nis_allow_privilege_escalation_container(container) {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n}\n\nis_allow_privilege_escalation_container(container) {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) > 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n}\n\nget_fix_path(i, start_of_path) = [{\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [start_of_path, i]), \"value\":\"false\"},\n\t{\"path\": sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, i]), \"value\":\"false\"}]\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -15327,7 +26037,6 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "fails if container allows privilege escalation", "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false", @@ -15336,23 +26045,22 @@ }, { "guid": "", - "name": "rule-excessive-delete-rights", + "name": "rule-can-create-pod", "attributes": { - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133", - "m$K8sThreatMatrix": "Impact::Data Destruction" + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" }, "creationTime": "", - "rule": "package armo_builtins\nimport data.cautils as cautils\n\n\n# fails if user can can delete important resources\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can can delete important resources\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can can delete important resources\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"delete\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"deletecollection\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"secrets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"pods\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"services\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"deployments\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"replicasets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"daemonsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"statefulsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"jobs\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"cronjobs\")\n}\ncanDeleteResource(rule) {\n is_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"apps\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"batch\"\n}\n\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to pods\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create pods\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ { "apiGroups": [ - "*" + "rbac.authorization.k8s.io" ], "apiVersions": [ - "*" + "v1" ], "resources": [ "Role", @@ -15363,60 +26071,17 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "fails if user can delete important resources", + "description": "determines which users can create pods", "remediation": "", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "cluster-admin-role", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, + "name": "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true", "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"etcd is not configured to use TLS properly\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--etcd-certfile\", \"\"],\n\t\t[\"--etcd-keyfile\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) := invalid_flags[0] {\n\tinvalid_flags := [flag |\n\t\tsome i, c in cmd\n\t\tflag := get_result(c, i)\n\t]\n}\n\nget_result(cmd, i) = result {\n\tcmd == \"--service-account-lookup=false\"\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(cmd, i) = result {\n\tcmd != \"--service-account-lookup=false\"\n\tcontains(cmd, \"--service-account-lookup=false\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": replace(cmd, \"--service-account-lookup=false\", \"--service-account-lookup=true\"),\n\t\t}],\n\t}\n}\n", "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", "ruleLanguage": "Rego", "match": [ @@ -15433,374 +26098,9 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "etcd should be configured to make use of TLS encryption for client connections.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "alert-rw-hostpath", - "attributes": { - "armoBuiltin": true, - "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host" - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n} ", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines if any workload contains a hostPath volume with rw permissions", - "remediation": "Set the readOnly field of the mount to true", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "resource-policies", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tbeggining_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if namespace has no resource policies defined", - "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "etcd-peer-auto-tls-disabled", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Peer auto tls is enabled. Peer clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-auto-tls=true\")\n\tfixed = replace(cmd[i], \"--peer-auto-tls=true\", \"--peer-auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-access-kubelet-API", - "attributes": { - "m$K8sThreatMatrix": "Discovery::Access Kubelet API", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined\n\ndeny[msga] {\n\tnetworkpolicies := input\n count(networkpolicies) == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"no network policy is defined\",\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\n\t\t}\n\t}\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "networkpolicies" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if no network policy exists", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "image-pull-secrets", - "attributes": { - "m$K8sThreatMatrix": "Collection::Images from private registry", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# input: service accounts\n# apiversion: v1 \n# returns ImagePullSecrets that more than one service account have access to\n\ndeny[msga] {\n\n image = input[i].imagePullSecrets[k] == input[j].imagePullSecrets[_]\n\tpath := sprintf(\"imagePullSecrets[%v]\", [format_int(k, 10)])\n\ti > j\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following ImagePullSecret: %v, is exposed to more than one serviceaccount\", [image]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t \"alertObject\": {\n\t\t}\n\t}\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ServiceAccount" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Checks if more than on service account have access to an ImagePullSecrets", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-delete-k8s-events", - "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data.cautils as cautils\n\n# fails if user can delete events\n#RoleBinding to Role\ndeny [msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteEventsResource(rule) {\n cautils.list_contains(rule.resources,\"events\")\n}\ncanDeleteEventsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"delete\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"deletecollection\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users can delete events", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-update-configmap-v1", - "attributes": { - "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if user can modify all configmaps\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\tnot rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# Fails if user can modify the 'coredns' configmap (default for coredns)\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\t\"coredns\" in rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users can update/patch the 'coredns' configmap", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "external-secret-storage", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := data {\n\tdata := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": null, - "configInputs": null, - "controlConfigInputs": null, - "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", - "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "description": "Validate service account before validating token.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--service-account-lookup` argument is set to `true`.", "ruleQuery": "", "relevantCloudProviders": null }, @@ -15808,11 +26108,10 @@ "guid": "", "name": "kubelet-rotate-kubelet-server-certificate", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(\" +\", command)\n\tsome i\n\tregex.match(\"RotateKubeletServerCertificate=true\", args[i])\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tnot should_skip_check(kubelet_info)\n\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot is_RotateKubeletServerCertificate_enabled_via_cli(command)\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"RotateKubeletServerCertificate is not set to true\",\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Inner rules\nshould_skip_check(kubelet_info) {\n\tcommand := kubelet_info.data.cmdLine\n\tcontains(command, \"--rotate-server-certificates\")\n}\n\nshould_skip_check(kubelet_info) {\n\tyamlConfigContent := yaml.unmarshal(base64.decode(kubelet_info.data.configFile.content))\n\tyamlConfigContent.serverTLSBootstrap == true\n}\n\nis_RotateKubeletServerCertificate_enabled_via_cli(command) {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`RotateKubeletServerCertificate=true`, args[i])\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -15836,7 +26135,6 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "Verify that the RotateKubeletServerCertificate argument is set to true.", "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", @@ -15845,24 +26143,23 @@ }, { "guid": "", - "name": "rule-can-impersonate-users-groups-v1", + "name": "rule-access-dashboard-subject-v1", "attributes": { - "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", "resourcesAggregator": "subject-role-rolebinding", "useFromKubescapeVersion": "v1.0.133" }, "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ { "apiGroups": [ - "*" + "rbac.authorization.k8s.io" ], "apiVersions": [ - "*" + "v1" ], "resources": [ "Role", @@ -15873,22 +26170,115 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "determines which users can impersonate users/groups", + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "exposed-sensitive-interfaces-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport data.kubernetes.api.client\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"spec.selector\"]\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.kubernetes.api.client\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "kubernetes.api.client" + } + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveInterfaces", + "name": "Sensitive interfaces", + "description": "List of known software interfaces that should not generally be exposed to the Internet." + } + ], + "description": "fails if known interfaces have exposed services", "remediation": "", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root", + "name": "etcd-peer-auto-tls-disabled", + "creationTime": "", + "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Peer auto tls is enabled. Peer clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-auto-tls=true\")\n\tfixed = replace(cmd[i], \"--peer-auto-tls=true\", \"--peer-auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Do not use automatically generated self-signed certificates for TLS connections between peers.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--peer-auto-tls` parameter or set it to `false`.\n\n \n```\n--peer-auto-tls=false\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-auto-tls` argument is set to `false`.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-admin.conf-file-permissions-are-set-to-600", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [], @@ -15910,22 +26300,229 @@ "packageName": "cautils" } ], - "configInputs": null, "controlConfigInputs": null, - "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "description": "Ensure that the `admin.conf` file has permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/admin.conf\n\n```", "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "rule-credentials-in-env-var", - "attributes": { - "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files", - "armoBuiltin": true - }, + "name": "etcd-peer-tls-enabled", "creationTime": "", - "rule": "\tpackage armo_builtins\n\t# import data.cautils as cautils\n\t# import data.kubernetes.api.client as client\n\timport data\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value) \n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value) \n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\t\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value) \n\t\t\n\t\tis_not_reference(env)\n\t\t\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}", + "rule": "package armo_builtins\n\n# Check if peer tls is enabled in etcd cluster\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd encryption for peer connection is not enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--peer-cert-file\", \"\"],\n\t\t[\"--peer-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [\"spec.containers[0].command\"],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "etcd should be configured to make use of TLS encryption for peer connections.", + "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```\n\n#### Impact Statement\netcd cluster peers would need to set up TLS for their communication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, peer communication over TLS is not configured.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "etcd-client-auth-cert", + "creationTime": "", + "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--client-cert-auth=false\", \"--client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Enable client authentication on etcd service.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```\n\n#### Impact Statement\nAll clients attempting to access the etcd server will require a valid client certificate.\n\n#### Default Value\nBy default, the etcd service can be queried by unauthenticated clients.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rolebinding-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "namespace-without-service-account", + "creationTime": "", + "rule": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tserviceAccounts := [serviceaccount | serviceaccount= input[_]; is_good_sa(serviceaccount, namespace.metadata.name)]\n\tcount(serviceAccounts) < 1\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\t\n\t\nis_good_sa(sa, namespace) { \n\tsa.kind == \"ServiceAccount\"\n\tsa.metadata.namespace == namespace\n\tsa.metadata.name != \"default\"\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Fails if namespace does not have service accounts (not incluiding default)\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not have any service accounts besides 'default'\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Namespace", + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if namespace does not have service accounts (not incluiding default)", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure_network_policy_configured_in_labels", + "creationTime": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tworkload := input[_]\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\", \"Pod\", \"CronJob\"}\n\tworkload_kinds[workload.kind]\n\n\tnetworkpolicies := [networkpolicy | networkpolicy = input[_]; is_network_policy(networkpolicy)]\n\tnot connected_to_any_network_policy(workload, networkpolicies)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: no networkpolicy configured in labels\", [workload.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [workload]\n\t\t}\n\t}\n}\n\n# Helper function to identify both standard NetworkPolicy and CiliumNetworkPolicy\nis_network_policy(policy) {\n\tpolicy.kind == \"NetworkPolicy\"\n}\n\nis_network_policy(policy) {\n\tpolicy.kind == \"CiliumNetworkPolicy\"\n}\n\n\nconnected_to_any_network_policy(workload, networkpolicies){\n\tconnected_to_network_policy(workload, networkpolicies[_])\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\t\n\t# Handle standard NetworkPolicy\n\tnetworkpolicy.kind == \"NetworkPolicy\"\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the CiliumNetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tworkload_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tworkload_kinds[wl.kind]\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\t\n\t# Handle CiliumNetworkPolicy\n\tnetworkpolicy.kind == \"CiliumNetworkPolicy\"\n\tcount(networkpolicy.spec.endpointSelector.matchLabels) > 0\n count({x | networkpolicy.spec.endpointSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.endpointSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\t\n\t# Handle standard NetworkPolicy\n\tnetworkpolicy.kind == \"NetworkPolicy\"\n count(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the Pod is connected to the CiliumNetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"Pod\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\t\n\t# Handle CiliumNetworkPolicy\n\tnetworkpolicy.kind == \"CiliumNetworkPolicy\"\n count(networkpolicy.spec.endpointSelector) > 0\n count({x | networkpolicy.spec.endpointSelector.matchLabels[x] == wl.metadata.labels[x]}) == count(networkpolicy.spec.endpointSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the workload is connected to the networkpolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\t\n\t# Handle standard NetworkPolicy\n\tnetworkpolicy.kind == \"NetworkPolicy\"\n\tcount(networkpolicy.spec.podSelector) > 0\n count({x | networkpolicy.spec.podSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.podSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the CronJob is connected to the CiliumNetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\twl.kind == \"CronJob\"\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\t\n\t# Handle CiliumNetworkPolicy\n\tnetworkpolicy.kind == \"CiliumNetworkPolicy\"\n\tcount(networkpolicy.spec.endpointSelector.matchLabels) > 0\n count({x | networkpolicy.spec.endpointSelector.matchLabels[x] == wl.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicy.spec.endpointSelector.matchLabels)\n}\n\n# connected_to_network_policy returns true if the NetworkPolicy has no podSelector.\n# if the NetworkPolicy has no podSelector, it is applied to all workloads in the namespace of the NetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tnetworkpolicy.kind == \"NetworkPolicy\"\n count(networkpolicy.spec.podSelector) == 0\n}\n\n# connected_to_network_policy returns true if the CiliumNetworkPolicy has no endpointSelector.\n# if the CiliumNetworkPolicy has no endpointSelector, it is applied to all workloads in the namespace of the CiliumNetworkPolicy\nconnected_to_network_policy(wl, networkpolicy){\n\tis_same_namespace(networkpolicy.metadata, wl.metadata)\n\tnetworkpolicy.kind == \"CiliumNetworkPolicy\"\n count(networkpolicy.spec.endpointSelector.matchLabels) == 0\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + }, + { + "apiGroups": [ + "cilium.io" + ], + "apiVersions": [ + "v2" + ], + "resources": [ + "CiliumNetworkPolicy" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "fails if no networkpolicy configured in workload labels", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-controller-manager-use-service-account-credentials-argument-is-set-to-true", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"--use-service-account-credentials is set to false in the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--use-service-account-credentials=false\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--use-service-account-credentials=true\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--use-service-account-credentials\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--use-service-account-credentials=true\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Use individual service account credentials for each controller.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node to set the below parameter.\n\n \n```\n--use-service-account-credentials=true\n\n```\n\n#### Impact Statement\nWhatever authorizer is configured for the cluster, it must grant sufficient permissions to the service accounts to perform their intended tasks. When using the RBAC authorizer, those roles are created and bound to the appropriate service accounts in the `kube-system` namespace automatically with default roles and rolebindings that are auto-reconciled on startup.\n\n If using other authorization methods (ABAC, Webhook, etc), the cluster deployer is responsible for granting appropriate permissions to the service accounts (the required permissions can be seen by inspecting the `controller-roles.yaml` and `controller-role-bindings.yaml` files for the RBAC roles.\n\n#### Default Value\nBy default, `--use-service-account-credentials` is set to false.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "linux-hardening", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) > 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -> produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -> produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[, , ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) > 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -15968,36 +26565,53 @@ } ], "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.sensitiveKeyNames", - "settings.postureControlInputs.sensitiveValuesAllowed" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.sensitiveKeyNames", - "name": "Keys", - "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for" - }, - { - "path": "settings.postureControlInputs.sensitiveValuesAllowed", - "name": "AllowedValues", - "description": "Allowed values" - } - ], - "description": "fails if Pods have sensitive information in configuration", - "remediation": "", + "controlConfigInputs": null, + "description": "fails if container does not define any linux security hardening", + "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "pod-security-admission-applied", + "name": "ensure-that-the-Container-Network-Interface-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true + "hostSensorRule": "true" }, "creationTime": "", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the Container Network Interface files have ownership set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "pod-security-admission-restricted-applied-1", + "creationTime": "", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", "ruleLanguage": "Rego", "match": [ { @@ -16019,28 +26633,908 @@ "*" ], "resources": [ - "ValidatingWebhookConfiguration", "MutatingWebhookConfiguration" ] } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1", + "name": "rule-credentials-in-env-var", "attributes": { - "armoBuiltin": true + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" }, "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the kube scheduler is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue = matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n", + "rule": "\tpackage armo_builtins\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), lower(key_name))\n\t\tenv.value != \"\"\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n# check sensitive values\ndeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\tsprintf(\"spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_values := data.postureControlInputs.sensitiveValues\n \tvalue := sensitive_values[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.value), lower(value))\n\t\t# check that value or key weren't allowed by user\n \tnot is_allowed_value(env.value)\n \tnot is_allowed_key_name(env.name)\n\n\t\tis_not_reference(env)\n\n\t\tpaths := [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [i, j]),\n\t\t\t\t sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].value\", [i, j])]\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"deletePaths\": paths,\n\t\t\t\"failedPaths\": paths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if Pods have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-deny-cronjobs", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n# alert cronjobs\n\n# handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines if it's cronjob", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "automount-default-service-account", + "creationTime": "", + "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"deletePaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n\tservice_account.metadata.name == \"default\"\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if default service account mounts service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the default service account spec is set to false", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "drop-capability-netraw", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if container does not drop the capability NET_RAW", + "remediation": "Define the drop list in security context capabilities to include NET_RAW.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".crt\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "resources-cpu-limit-and-request", + "creationTime": "", + "rule": "package armo_builtins\n\n# ==================================== no CPU requests =============================================\n# Fails if pod does not have container with CPU request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU requests\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU requests\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n# ============================================= cpu limits exceed min/max =============================================\n\n# Fails if pod exceeds CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tpath := \"resources.limits.cpu\" \n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload exceeds CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tpath := \"resources.limits.cpu\" \n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob doas exceeds CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n \tpath := \"resources.limits.cpu\" \n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# ============================================= cpu requests exceed min/max =============================================\n\n# Fails if pod exceeds CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tpath := \"resources.requests.cpu\" \n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload exceeds CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tpath := \"resources.requests.cpu\" \n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob doas exceeds CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tpath := \"resources.requests.cpu\" \n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n#################################################################################################################\n\n\nis_min_max_exceeded_cpu(container) = \"resources.limits.cpu\" {\n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n} else = \"resources.requests.cpu\" {\n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n} else = \"\"\n\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_min_limit_exceeded_cpu(cpu_limit)\n}\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_max_limit_exceeded_cpu(cpu_limit)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_max_request_exceeded_cpu(cpu_req)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_min_request_exceeded_cpu(cpu_req)\n}\n\nis_max_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_max := data.postureControlInputs.cpu_limit_max[_]\n\tcompare_max(cpu_limit_max, cpu_limit)\n}\n\nis_min_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_min := data.postureControlInputs.cpu_limit_min[_]\n\tcompare_min(cpu_limit_min, cpu_limit)\n}\n\nis_max_request_exceeded_cpu(cpu_req) {\n\tcpu_req_max := data.postureControlInputs.cpu_request_max[_]\n\tcompare_max(cpu_req_max, cpu_req)\n}\n\nis_min_request_exceeded_cpu(cpu_req) {\n\tcpu_req_min := data.postureControlInputs.cpu_request_min[_]\n\tcompare_min(cpu_req_min, cpu_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tto_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tto_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tto_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tto_number(given) > to_number(max)\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tto_number(split_given) < to_number(split_min)\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tto_number(split_given) < to_number(split_min)\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tto_number(split_given) < to_number(split_min)\n\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tto_number(given) < to_number(min)\n\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.cpu_request_max", + "name": "cpu_request_max", + "description": "Ensure a CPU resource request is set and is under this defined maximum value." + }, + { + "path": "settings.postureControlInputs.cpu_request_min", + "name": "cpu_request_min", + "description": "Ensure a CPU resource request is set and is above this defined minimum value." + }, + { + "path": "settings.postureControlInputs.cpu_limit_max", + "name": "cpu_limit_max", + "description": "Ensure a CPU resource limit is set and is under this defined maximum value." + }, + { + "path": "settings.postureControlInputs.cpu_limit_min", + "name": "cpu_limit_min", + "description": "Ensure a CPU resource limit is set and is above this defined minimum value." + } + ], + "description": "CPU limits and requests are not set.", + "remediation": "Ensure CPU limits and requests are set.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "sudo-in-container-entrypoint", + "creationTime": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have sudo in entrypoint\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v have sudo in entrypoint\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_sudo_entrypoint(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have sudo in entrypoint\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_sudo_entrypoint(container, start_of_path, i) = path {\n\tpath = [sprintf(\"%vcontainers[%v].command[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | command = container.command[k]; contains(command, \"sudo\")]\n\tcount(path) > 0\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "encrypt-traffic-to-https-load-balancers-with-tls-certificates", + "attributes": { + "hostSensorRule": "false", + "imageScanRelated": false + }, + "creationTime": "", + "rule": "package armo_builtins\n\n# fails in case of 'Services' of type 'LoadBalancer' are not found.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type != \"LoadBalancer\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"No LoadBalancer service found.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n\t\t}\n\t}\n}\n\n# fails in case 'Service' object has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tnot svc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"]\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has not 'service.beta.kubernetes.io/azure-load-balancer-internal' annotation.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Service' object has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] != \"true\"\n\tpath := \"metadata.annotations[service.beta.kubernetes.io/azure-load-balancer-internal]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Service object LoadBalancer has annotation 'service.beta.kubernetes.io/azure-load-balancer-internal' != 'true'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"true\"}],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [svc]\n }\n }\n}\n\n# fails in case 'Ingress' object has spec.tls value not set.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tnot isTLSSet(ingress.spec)\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has 'spec.tls' value not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"spec.tls\"],\n \t\"failedPaths\": [\"spec.tls\"],\n \t\"fixPaths\":[],\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\n# fails in case 'Ingress' object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\ndeny[msga] {\n\tsvc := input[_]\n\tsvc.kind == \"Service\"\n\tsvc.spec.type == \"LoadBalancer\"\n\tsvc.metadata.annotations[\"service.beta.kubernetes.io/azure-load-balancer-internal\"] == \"true\"\n\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\tisTLSSet(ingress.spec)\n\tingress.metadata.annotations[\"kubernetes.io/ingress.class\"] != \"azure/application-gateway\"\n\n\tpath := \"metadata.annotations[kubernetes.io/ingress.class]\"\n\n\tmsga := {\n \t\"alertMessage\": \"Ingress object has annotation 'kubernetes.io/ingress.class' != 'azure/application-gateway'.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[{\"path\": path, \"value\": \"azure/application-gateway\"}],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [ingress]\n }\n }\n}\n\nisTLSSet(spec) {\n\tcount(spec.tls) > 0\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "Encrypt traffic to HTTPS load balancers using TLS certificates.", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "kubelet-ip-tables", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.7 https://workbench.cisecurity.org/sections/1126668/recommendations/1838651\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--make-iptables-util-chains\")\n\tnot contains(command, \"--make-iptables-util-chains=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --make-iptables-util-chains is not set to true.\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.makeIPTablesUtilChains == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property makeIPTablesUtilChains is not set to true\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"failedPaths\": [\"makeIPTablesUtilChains\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--make-iptables-util-chains\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Ensures that the --make-iptables-util-chains argument is set to true.", + "remediation": "Set --make-iptables-util-chains to true or if using a config file set the makeIPTablesUtilChains as true", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "resources-memory-requests", + "creationTime": "", + "rule": "package armo_builtins\n\n# ================================== no memory requests ==================================\n# Fails if pod does not have container with memory requests\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "memory requests are not set.", + "remediation": "Ensure memory requests are set.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "kubelet-set-pod-limit", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.13 https://workbench.cisecurity.org/sections/2633393/recommendations/4262020\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tnot contains(command, \"--pod-max-pids\")\n\n\tdecodedConfigContent := base64.decode(kubelet_info.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.podPidsLimit\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Neither argument --pod-max-pids nor podPidsLimit is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Ensure that the --pod-max-pids argument is set.", + "remediation": "Set the --pod-max-pids argument.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "container-hostPort", + "creationTime": "", + "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, start_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [start_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if container has hostPort", + "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-list-all-cluster-admins-v1", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133", + "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines which users have cluster admin permissions", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-can-impersonate-users-groups-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"impersonate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"users\", \"serviceaccounts\", \"groups\", \"uids\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can impersonate users\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines which users can impersonate users/groups", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "host-ipc-privileges", + "creationTime": "", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostIPC is set to false. Default is false. Only in pod spec\n\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Containers should be as isolated as possible from the host machine. The hostIPC field in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the field hostIPC in the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-Kubernetes-PKI-key-file-permissions-are-set-to-600", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".key\")\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that Kubernetes PKI key files have permissions of `600`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.key\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", + "creationTime": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, start_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"deletePaths\": final_path,\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, start_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[start_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", + "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Node" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", + "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "k8s-audit-logs-enabled-cloud", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\nimport future.keywords.in\n\n# =============================== GKE ===============================\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"gke\"\n\tconfig := cluster_config.data\n\n\t# If enableComponents is empty, it will disable logging\n\t# https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\n\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\n# =============================== EKS ===============================\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\t# logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n\t# types - available cluster control plane log types\n\t# https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n\tlogging_types := {\"api\", \"audit\", \"authenticator\", \"controllerManager\", \"scheduler\"}\n\tlogSetups = config.Cluster.Logging.ClusterLogging\n\tnot all_auditlogs_enabled(logSetups, logging_types)\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region '${REGION_CODE}' --name '${CLUSTER_NAME}' --logging '{'clusterLogging':[{'types':['api','audit','authenticator','controllerManager','scheduler'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config,\n\t\t},\n\t}\n}\n\nall_auditlogs_enabled(logSetups, types) {\n\tevery type in types {\n\t\tauditlogs_enabled(logSetups, type)\n\t}\n}\n\nauditlogs_enabled(logSetups, type) {\n\tlogSetup := logSetups[_]\n\tlogSetup.Enabled == true\n\ttype in logSetup.Types\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "container.googleapis.com", + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": [ + "EKS", + "GKE" + ] + }, + { + "guid": "", + "name": "ingress-no-tls", + "creationTime": "", + "rule": "package armo_builtins\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n\tingress := input[_]\n\tingress.kind == \"Ingress\"\n\n\t# Check if ingress has TLS enabled\n\tnot ingress.spec.tls\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Ingress '%v' has not TLS definition\", [ingress.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n \"path\": \"spec.tls\",\n \"value\": \"\"\n }],\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\"k8sApiObjects\": [ingress]}\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "Ingress should not be configured without TLS", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "kubelet-streaming-connection-idle-timeout", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.5 https://workbench.cisecurity.org/sections/1126668/recommendations/1838646\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--streaming-connection-idle-timeout=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.streamingConnectionIdleTimeout == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Timeouts on streaming connections are enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"failedPaths\": [\"streamingConnectionIdleTimeout\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}}\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--streaming-connection-idle-timeout\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Determines if a kubelet has not disabled timeouts on streaming connections", + "remediation": "Change value of a --streaming-connection-idle-timeout argument or if using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a value other than 0.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-audit-log-path-argument-is-set", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubernetes API Server is not audited\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-path\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-path=/var/log/apiserver/audit.log\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", "ruleLanguage": "Rego", "match": [ { @@ -16056,21 +27550,224 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "Do not bind the scheduler service to non-loopback insecure addresses.", - "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", + "description": "Enable auditing on the Kubernetes API Server and set the desired audit log path.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-path` parameter to a suitable path and file where you would like audit logs to be written, for example:\n\n \n```\n--audit-log-path=/var/log/apiserver/audit.log\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "automount-service-account", + "name": "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"Admission control policy is not set to AlwaysPullImages\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"AlwaysPullImages\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"AlwaysPullImages\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=AlwaysPullImages\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Always pull images.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```\n\n#### Impact Statement\nCredentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed.\n\n This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.\n\n#### Default Value\nBy default, `AlwaysPullImages` is not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "host-pid-privileges", + "creationTime": "", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n# Check that hostPID and are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Containers should be as isolated as possible from the host machine. The hostPID field in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the field hostPID in the pod spec is not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "list-all-namespaces", + "creationTime": "", + "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "lists all namespaces for users to review", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-client-ca-file-argument-is-set-as-appropriate", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server communication is not encrypted properly\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-ca-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Setup TLS connection on the API server.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the client certificate authority file.\n\n \n```\n--client-ca-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--client-ca-file` argument is not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-etcd-data-directory-permissions-are-set-to-700-or-more-restrictive", "attributes": { - "armoBuiltin": true + "hostSensorRule": "true" }, "creationTime": "", - "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) > 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 448 # == 0o700\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the etcd data directory has permissions of `700` or more restrictive.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchmod 700 /var/lib/etcd\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "pod-security-admission-restricted-applied-2", + "creationTime": "", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "unauthenticated-service", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.contains\nimport future.keywords.if\n\ndeny contains msga if {\n\tservice := input[_]\n\tservice.kind == \"Service\"\n\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\tis_same_namespace(wl, service)\n\twl_connected_to_service(wl, service)\n\n\tservice_scan_result := input[_]\n\tservice_scan_result.kind == \"ServiceScanResult\"\n\tservice_name := service.metadata.name\n\thas_unauthenticated_service(service_name, service.metadata.namespace, service_scan_result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Unauthenticated service %v exposes %v\", [service_name, wl.metadata.name]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\nhas_unauthenticated_service(service_name, namespace, service_scan_result) if {\n\tservice_scan_result.metadata.name == service_name\n\tservice_scan_result.metadata.namespace == namespace\n\tservice_scan_result.spec.ports[_].authenticated == false\n}\n\n\n\nwl_connected_to_service(wl, svc) if {\n\tcount({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) if {\n\twl.spec.selector.matchLabels == svc.spec.selector\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -16083,7 +27780,7 @@ ], "resources": [ "Pod", - "ServiceAccount" + "Service" ] }, { @@ -16111,561 +27808,37 @@ "Job", "CronJob" ] + }, + { + "apiGroups": [ + "kubescape.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "servicesscanresults" + ] } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "fails if service account and workloads mount service account token by default", - "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", + "description": "Verifies that the service is authenticated", + "remediation": "Add authentication to the service", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "kubelet-event-qps", + "name": "exposed-rce-pods", "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", - "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-update-configmap", - "attributes": { - "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data.cautils as cautils\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n#RoleBinding to Role\ndeny [msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.metadata.namespace == \"kube-system\"\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n# RoleBinding to ClusterRole\ndeny[msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.metadata.namespace == \"kube-system\"\n\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n\n}\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\n\n canModifyConfigMapResource(rule) {\n not rule.resourceNames\n cautils.list_contains(rule.resources,\"configmaps\")\n }\n\n canModifyConfigMapResource(rule) {\n not rule.resourceNames\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n }\n\n canModifyConfigMapResource(rule) {\n cautils.list_contains(rule.resources,\"configmaps\")\n cautils.list_contains(rule.resourceNames,\"coredns\")\n }\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"update\")\n }\n\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"patch\")\n }\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n }\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding", - "ConfigMap" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users can update/patch the 'coredns' configmap", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-tls-cert-file-and-tls-private-key-file-arguments-are-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to serve only HTTPS traffic\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--tls-cert-file\", \"\"],\n\t\t[\"--tls-private-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Setup TLS connection on the API server.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection on the apiserver. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the TLS certificate and private key file parameters.\n\n \n```\n--tls-cert-file= \n--tls-private-key-file=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for your Kubernetes cluster deployment.\n\n#### Default Value\nBy default, `--tls-cert-file` and `--tls-private-key-file` arguments are not set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-API-server-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\t\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t]) \n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the API server pod specification file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-admission-control-plugin-AlwaysPullImages-is-set", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"Admission control policy is not set to AlwaysPullImages\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"AlwaysPullImages\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"AlwaysPullImages\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=AlwaysPullImages\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Always pull images.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `AlwaysPullImages`.\n\n \n```\n--enable-admission-plugins=...,AlwaysPullImages,...\n\n```\n\n#### Impact Statement\nCredentials would be required to pull the private images every time. Also, in trusted environments, this might increases load on network, registry, and decreases speed.\n\n This setting could impact offline or isolated clusters, which have images pre-loaded and do not have access to a registry to pull in-use images. This setting is not appropriate for clusters which use this configuration.\n\n#### Default Value\nBy default, `AlwaysPullImages` is not set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"AlwaysAllow authorization mode is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# Check if include AlwaysAllow\n\t\"AlwaysAllow\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val = flag.values[_]; val != \"AlwaysAllow\"]\n\tfixed_flag = get_fixed_flag(fixed_values)\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\n\nget_fixed_flag(values) = fixed {\n\tcount(values) == 0\n\tfixed = \"--authorization-mode=RBAC\" # If no authorization-mode, set it to RBAC, as recommended by CIS\n}\nget_fixed_flag(values) = fixed {\n\tcount(values) > 0\n\tfixed = sprintf(\"--authorization-mode=%s\", [concat(\",\", values)])\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Do not always authorize all requests.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```\n\n#### Impact Statement\nOnly authorized requests will be served.\n\n#### Default Value\nBy default, `AlwaysAllow` is not enabled.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "strict-file-owners-root", - "attributes": { - "hostSensorRule": "true", - "useFromKubescapeVersion": "v2.0.170", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\n# Fail for every file in data.postureControlInputs.fileObjPath\n# if the owners of the file are not `root:root`.\n# Expect (supposed to be fixed per control, not user configurable): \n# \t(required) data.postureControlInputs.fileObjPath - list of paths strings. The item delimiter is `.`.\n# \t(optional) data.postureControlInputs.kindFilter \n# \t(optional) data.postureControlInputs.pathGlob \ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tfilter_kind(obj.kind)\n\n\t# Get the file info using the input object-paths\n\trawObjPath = data.postureControlInputs.fileObjPath[_]\n\tobjPath := split(rawObjPath, \"/\")\n\tsubject := object.get(obj, objPath, false)\n\tsubject != false\n\n\t# Run the test for every file\n\tfiles := get_files(subject)\n\tfile = files[file_index]\n\tfile_path_glob(file.path)\n\n\t# Actual ownership test \n\tcautils.is_not_strict_conf_ownership(file.ownership)\n\n\t# Filter out irrelevant data from the alert object\n\tfile_filtered := filter_file(obj, objPath, file_index)\n\tobj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\"])\n\toutput := object.union(file_filtered, obj_filtered)\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"%s is not owned by `root:root`\", [file.path]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown root:root %s\", [file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": output},\n\t}\n}\n\n# Always return a list\nget_files(obj) = files {\n\tis_array(obj)\n\tfiles = obj\n}\n\nget_files(obj) = files {\n\tnot is_array(obj)\n\tfiles = [obj]\n}\n\n# Filter only kinds that are in data.postureControlInputs.kindFilter.\n# If no kindFilter - match everything\nfilter_kind(kind) {\n\tkind in data.postureControlInputs.kindFilter\n}\n\nfilter_kind(kind) {\n\tnot data.postureControlInputs.kindFilter\n}\n\n# Filter file path globs from data.postureControlInputs.pathGlob\nfile_path_glob(path) {\n\tpatterns = data.postureControlInputs.pathGlob\n\tcount({true | patterns[i]; glob.match(patterns[i], null, path)}) > 0\n}\n\nfile_path_glob(path) {\n\tnot data.postureControlInputs.pathGlob\n}\n\n# Filter only the current file\nfilter_file(obj, objPath, file_index) = ret {\n\tis_array(object.get(obj, objPath, false))\n\tfull_path := array.concat(objPath, [format_int(file_index, 10)])\n\tfinal_path := concat(\"/\", full_path)\n\tret := json.filter(obj, [final_path])\n}\n\nfilter_file(obj, objPath, file_index) = ret {\n\tnot is_array(object.get(obj, objPath, false))\n\tret = object.filter(obj, objPath)\n}\n", - "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\n# Filter only kinds that are in data.postureControlInputs.kindFilter.\n# Filter out non-host-sensor as well.\n# If no kindFilter - match every kind\ndeny[msg] {\n\tobj = input[_]\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tfilter_kind(obj.kind)\n\tmsg := {\"alertObject\": {\"externalObjects\": obj}}\n}\n\n# Filter only kinds that are in data.postureControlInputs.kindFilter.\n# If no kindFilter - match everything\nfilter_kind(kind) {\n\tkind in data.postureControlInputs.kindFilter\n}\n\nfilter_kind(kind) {\n\tnot data.postureControlInputs.kindFilter\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo", - "KubeProxyInfo", - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that file owners are `root:root`", - "remediation": "Set the owners of the failed file to `root:root`", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tcontains(obj.spec.containers[0].command[i], \"--secure-port=0\")\n\tmsg := {\n\t\t\"alertMessage\": \"the secure port is disabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Do not disable the secure port.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.\n\n#### Impact Statement\nYou need to set the API Server up with the right TLS certificates.\n\n#### Default Value\nBy default, port 6443 is used as the secure port.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "etcd-unique-ca", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 2.7 https://workbench.cisecurity.org/sections/1126654/recommendations/1838578\n\ndeny[msga] {\n\tetcdPod := [pod | pod := input[_]; filter_input(pod, \"etcd\")]\n\tetcdCheckResult := get_argument_value_with_path(etcdPod[0].spec.containers[0].command, \"--trusted-ca-file\")\n\n\tapiserverPod := [pod | pod := input[_]; filter_input(pod, \"kube-apiserver\")]\n\tapiserverCheckResult := get_argument_value_with_path(apiserverPod[0].spec.containers[0].command, \"--client-ca-file\")\n\n\tetcdCheckResult.value == apiserverCheckResult.value\n\tmsga := {\n\t\t\"alertMessage\": \"Cert file is the same both for the api server and the etcd\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"fixPaths\": [etcdCheckResult.fix_paths, apiserverCheckResult.fix_paths],\n\t\t\"alertObject\": {\"k8sApiObjects\": [etcdPod[0], apiserverPod[0]]},\n\t}\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"kube-apiserver\")\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"etcd\")\n}\n\nfilter_input(obj, res) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], res)\n}\n\nget_argument_value(command, argument) = value {\n\targs := regex.split(\"=\", command)\n\tsome i, sprintf(\"%v\", [argument]) in args\n\tvalue := args[i + 1]\n}\n\nget_argument_value_with_path(cmd, argument) = result {\n\tcontains(cmd[i], argument)\n\targumentValue := get_argument_value(cmd[i], argument)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"path\": path,\n\t\t\"value\": argumentValue,\n\t\t\"fix_paths\": {\"path\": path, \"value\": \"\"},\n\t}\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", - "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```\n\n#### Impact Statement\nAdditional management of the certificates and keys for the dedicated certificate authority will be required.\n\n#### Default Value\nBy default, no etcd certificate is created and used.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", - "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "read-only-port-enabled-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Determines if kubelet has read-only port enabled.", - "remediation": "Start the kubelet with the --read-only-port flag set to 0.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "anonymous-requests-to-kubelet-service-updated", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [], - "apiVersions": [], - "resources": [] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Determines if anonymous requests to the kubelet service are allowed.", - "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"`RotateKubeletServerCertificate` is set to false on the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"RotateKubeletServerCertificate=false\")\n\tfixed = replace(cmd[i], \"RotateKubeletServerCertificate=false\", \"RotateKubeletServerCertificate=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Enable kubelet server certificate rotation on controller-manager.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "CVE-2022-23648", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Node" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-portforward", - "attributes": { - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "\npackage armo_builtins\nimport data.cautils as cautils\n\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcanForwardToPodResource(rule)\n\tcanForwardToPodVerb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can do port forwarding\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\n\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"get\")\n}\ncanForwardToPodVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/portforward\")\n}\ncanForwardToPodResource(rule) {\n\tcautils.list_contains(rule.resources,\"pods/*\")\n}\ncanForwardToPodResource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "has-critical-vulnerability", - "attributes": { - "armoBuiltin": true, "imageScanRelated": true, - "m$K8sThreatMatrix": "has-critical-vulnerability" + "m$K8sThreatMatrix": "exposed-rce-pods", + "useFromKubescapeVersion": "v2.0.150" }, "creationTime": "", - "rule": "package armo_builtins\nimport data.cautils as cautils\n \ndeny[msga] {\n \n\t# get pod and imageVulnerabilities\n\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n vulns := [vuln | vuln = input[_]; vuln.kind == \"ImageVulnerabilities\"]\n pod := pods[_]\n vuln := vulns[_]\n \n\t# get container image name\n\tcontainer := pod.spec.containers[i]\n\n\t# image has vulnerabilities\n\tcontainer.image == vuln.metadata.name\n\n\t# has a \"crirtical\" vulnerability\n\thas_crirtical_vulnerability(vuln)\n\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmetadata = {\n\t\t\"name\": pod.metadata.name,\n\t\t\"namespace\": pod.metadata.namespace\n\t}\n\tattackvector = {\n\t\t\"apiVersion\": \"result.vulnscan.com/v1\",\n\t\t\"kind\": pod.kind,\n\t\t\"metadata\": metadata,\n\t\t\"relatedObjects\": [pod, vuln]\n\t}\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has critical vulnerabilities\", [container.image]),\n \"alertScore\": 5,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"externalObjects\": attackvector\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n \n # get pod and imageVulnerabilities\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\n\twls := [wl | wl= input[_]; spec_template_spec_patterns[wl.kind]]\n vulns := [vuln | vuln = input[_]; vuln.kind == \"ImageVulnerabilities\"]\n wl := wls[_]\n vuln := vulns[_]\n \n\t# get container image name\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\t# image has vulnerabilities\n\tcontainer.image == vuln.metadata.name\n\n\t# has a \"crirtical\" vulnerability\n\thas_crirtical_vulnerability(vuln)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmetadata = {\n\t\t\"name\": wl.metadata.name,\n\t\t\"namespace\": wl.metadata.namespace\n\t}\n\tattackvector = {\n\t\t\"apiVersion\": \"result.vulnscan.com/v1\",\n\t\t\"kind\": wl.kind,\n\t\t\"metadata\": metadata,\n\t\t\"relatedObjects\": [wl, vuln]\n\t}\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has critical vulnerabilities\", [container.image]),\n \"alertScore\": 5,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"externalObjects\": attackvector\n\t\t}\n }\n}\n\n\n# handles cronjob\ndeny[msga] {\n \n\twls := [wl | wl= input[_]; wl.kind == \"CronJob\"]\n vulns := [vuln | vuln = input[_]; vuln.kind == \"ImageVulnerabilities\"]\n wl := wls[_]\n vuln := vulns[_]\n \n\t# get container image name\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\t# image has vulnerabilities\n\tcontainer.image == vuln.metadata.name\n\n\t# has a \"crirtical\" vulnerability\n\thas_crirtical_vulnerability(vuln)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmetadata = {\n\t\t\"name\": wl.metadata.name,\n\t\t\"namespace\": wl.metadata.namespace\n\t}\n\tattackvector = {\n\t\t\"apiVersion\": \"result.vulnscan.com/v1\",\n\t\t\"kind\": wl.kind,\n\t\t\"metadata\": metadata,\n\t\t\"relatedObjects\": [wl, vuln]\n\t}\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has critical vulnerabilities\", [container.image]),\n \"alertScore\": 5,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"externalObjects\": attackvector\n\t\t}\n }\n}\n \n\nhas_crirtical_vulnerability(vuln){\n\tcount(vuln.data) > 0\n\tdata := vuln.data[_]\n\tdata.severity == \"Critical\"\n}", - "resourceEnumerator": "package armo_builtins\nimport data.cautils as cautils\n \ndeny[msga] {\n \n\t# get pod and imageVulnerabilities\n\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n vulns := [vuln | vuln = input[_]; vuln.kind == \"ImageVulnerabilities\"]\n pod := pods[_]\n vuln := vulns[_]\n \n\t# get container image name\n\tcontainer := pod.spec.containers[i]\n\n\t# image has vulnerabilities\n\tcontainer.image == vuln.metadata.name\n\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmetadata = {\n\t\t\"name\": pod.metadata.name,\n\t\t\"namespace\": pod.metadata.namespace\n\t}\n\tattackvector = {\n\t\t\"apiVersion\": \"result.vulnscan.com/v1\",\n\t\t\"kind\": pod.kind,\n\t\t\"metadata\": metadata,\n\t\t\"relatedObjects\": [pod, vuln]\n\t}\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has critical vulnerabilities\", [container.image]),\n \"alertScore\": 5,\n\t\t\"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"externalObjects\": attackvector\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n \n # get pod and imageVulnerabilities\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\n\twls := [wl | wl= input[_]; spec_template_spec_patterns[wl.kind]]\n vulns := [vuln | vuln = input[_]; vuln.kind == \"ImageVulnerabilities\"]\n wl := wls[_]\n vuln := vulns[_]\n \n\t# get container image name\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\t# image has vulnerabilities\n\tcontainer.image == vuln.metadata.name\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmetadata = {\n\t\t\"name\": wl.metadata.name,\n\t\t\"namespace\": wl.metadata.namespace\n\t}\n\tattackvector = {\n\t\t\"apiVersion\": \"result.vulnscan.com/v1\",\n\t\t\"kind\": wl.kind,\n\t\t\"metadata\": metadata,\n\t\t\"relatedObjects\": [wl, vuln]\n\t}\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has critical vulnerabilities\", [container.image]),\n \"alertScore\": 5,\n\t\t\"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"externalObjects\": attackvector\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n \n\twls := [wl | wl= input[_]; wl.kind == \"CronJob\"]\n vulns := [vuln | vuln = input[_]; vuln.kind == \"ImageVulnerabilities\"]\n wl := wls[_]\n vuln := vulns[_]\n \n\t# get container image name\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\t# image has vulnerabilities\n\tcontainer.image == vuln.metadata.name\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmetadata = {\n\t\t\"name\": wl.metadata.name,\n\t\t\"namespace\": wl.metadata.namespace\n\t}\n\tattackvector = {\n\t\t\"apiVersion\": \"result.vulnscan.com/v1\",\n\t\t\"kind\": wl.kind,\n\t\t\"metadata\": metadata,\n\t\t\"relatedObjects\": [wl, vuln]\n\t}\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has critical vulnerabilities\", [container.image]),\n \"alertScore\": 5,\n\t\t\"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"externalObjects\": attackvector\n\t\t}\n }\n}", + "rule": "package armo_builtins\n\n# regal ignore:rule-length\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\" ]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n # At least one rce vulnerability\n filter_rce_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with rce vulnerability\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 8,\n\t\t\"reviewPaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_rce_vulnerabilities(vuln) {\n data := vuln.data[_]\n data.categories.isRce == true\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", + "resourceEnumerator": "package armo_builtins\n\n# regal ignore:rule-length\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ; x.apiVersion == \"v1\"]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ; x.apiVersion == \"v1\"]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"] # TODO: x.apiVersion == \"--input--\" || x.apiVersion == \"--input--\" ]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with rce vulnerability\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 8,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", "ruleLanguage": "Rego", "match": [ { @@ -16676,34 +27849,9 @@ "v1" ], "resources": [ + "Service", "Pod" ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] } ], "dynamicMatch": [ @@ -16720,26 +27868,434 @@ ] } ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "fails if known pods have exposed services and known vulnerabilities with remote code execution", + "remediation": "The image of the listed pods might have a fix in a newer version. Alternatively, the pod service might not need to be external facing", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-admission-control-plugin-EventRateLimit-is-set", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to limit the rate at which it accepts requests. This could lead to a denial of service attack\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"EventRateLimit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"EventRateLimit\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=EventRateLimit\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Limit the rate at which the API server accepts requests.", + "remediation": "Follow the Kubernetes documentation and set the desired limits in a configuration file.\n\n Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` and set the below parameters.\n\n \n```\n--enable-admission-plugins=...,EventRateLimit,...\n--admission-control-config-file=\n\n```\n\n#### Impact Statement\nYou need to carefully tune in limits as per your environment.\n\n#### Default Value\nBy default, `EventRateLimit` is not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-controller-manager-profiling-argument-is-set-to-false", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-controller-manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-can-bind-escalate", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# ================= bind ===============================\n\n# fails if user has access to bind clusterroles/roles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"bind\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can bind roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# ================= escalate ===============================\n\n# fails if user has access to escalate roles/clusterroles\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\tis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"escalate\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"rbac.authorization.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"clusterroles\", \"roles\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can escalate roles/clusterroles\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines which users can or bind escalate roles/clusterroles", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], "ruleDependencies": [ { "packageName": "cautils" } ], - "configInputs": null, "controlConfigInputs": null, - "description": "image has-critical-vulnerability", - "remediation": "", - "ruleQuery": "package armo_builtins", + "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", + "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", + "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "host-network-access", + "name": "read-only-port-enabled-updated", "attributes": { - "armoBuiltin": true + "hostSensorRule": "true" }, "creationTime": "", - "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.4 https://workbench.cisecurity.org/sections/1126668/recommendations/1838645\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--read-only-port\")\n\tnot contains(command, \"--read-only-port=0\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": external_obj,\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tyamlConfig.readOnlyPort\n\tnot yamlConfig.readOnlyPort == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\"alertScore\": 4,\n\t\t\"reviewPaths\": [\"readOnlyPort\"],\n\t\t\"failedPaths\": [\"readOnlyPort\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(obj, \"--read-only-port\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Determines if kubelet has read-only port enabled.", + "remediation": "Start the kubelet with the --read-only-port flag set to 0.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-can-modify-admission-webhooks", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can modify admission webhooks\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"update\", \"delete\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"admissionregistration.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"validatingwebhookconfigurations\", \"mutatingwebhookconfigurations\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify admission webhooks\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines which users can modify admission webhooks", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "set-sysctls-params", + "creationTime": "", + "rule": "package armo_builtins\n\n_builtin_safe_sysctl(name) {\n\t# NOTE: This set mirrors Kubernetes' safe sysctls. During each\n\t# Armo/Kubescape release, compare it with the upstream list in\n\t# pkg/kubelet/sysctl/safe_sysctls.go and update any changes.\n\tbuiltin_safe_sysctls := {\n\t\t\"kernel.shm_rmid_forced\",\n\t\t\"net.ipv4.ip_local_port_range\",\n\t\t\"net.ipv4.tcp_syncookies\",\n\t\t\"net.ipv4.ping_group_range\",\n\t\t\"net.ipv4.ip_unprivileged_port_start\",\n\t\t\"net.ipv4.ip_local_reserved_ports\",\n\t\t\"net.ipv4.tcp_keepalive_time\",\n\t\t\"net.ipv4.tcp_fin_timeout\",\n\t\t\"net.ipv4.tcp_keepalive_intvl\",\n\t\t\"net.ipv4.tcp_keepalive_probes\",\n\t\t\"net.ipv4.tcp_rmem\",\n\t\t\"net.ipv4.tcp_wmem\",\n\t}\n\tbuiltin_safe_sysctls[name]\n}\n\nsafe_sysctl(name) {\n\t_builtin_safe_sysctl(name)\n}\n\n_deny_sysctls_msg(kind_label, obj, sysctls, path) = msga {\n\tcount(sysctls) > 0\n\tunsafe_sysctls := [sysctl.name |\n\t\tsysctl := sysctls[_]\n\t\tname := sysctl.name\n\t\tnot safe_sysctl(name)\n\t]\n\tcount(unsafe_sysctls) > 0\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v sets unsafe sysctl(s): %v\", [kind_label, obj.metadata.name, unsafe_sysctls]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [{\"path\": path, \"value\": \"REMOVE_UNSAFE_SYSCTLS\"}],\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n### POD ###\n\n# Fails if securityContext.sysctls contains values outside the safe list\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tsysctls := pod.spec.securityContext.sysctls\n\tpath := \"spec.securityContext.sysctls\"\n\tmsga := _deny_sysctls_msg(\"Pod\", pod, sysctls, path)\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.sysctls contains values outside the safe list\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tmanifest_kind[wl.kind]\n\n\tsysctls := wl.spec.template.spec.securityContext.sysctls\n\tpath := \"spec.template.spec.securityContext.sysctls\"\n\tmsga := _deny_sysctls_msg(\"Workload\", wl, sysctls, path)\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.sysctls contains values outside the safe list\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\tsysctls := cj.spec.jobTemplate.spec.template.spec.securityContext.sysctls\n\tpath := \"spec.jobTemplate.spec.template.spec.securityContext.sysctls\"\n\tmsga := _deny_sysctls_msg(\"CronJob\", cj, sysctls, path)\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Fails if securityContext.sysctls is not set.", + "remediation": "Set securityContext.sysctls params", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-scheduler-profiling-argument-is-set-to-false", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled for the kube-scheduler\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcmd[i] == \"--profiling=true\"\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--profiling=false\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Disable profiling, if not needed.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` file on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "csistoragecapacity-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "storage.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "CSIStorageCapacity" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-scheduler-bind-address-argument-is-set-to-127.0.0.1", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the kube scheduler is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue = matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_scheduler(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_scheduler(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-scheduler\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Do not bind the scheduler service to non-loopback insecure addresses.", + "remediation": "Edit the Scheduler pod specification file `/etc/kubernetes/manifests/kube-scheduler.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-service-account-key-file-argument-is-set-as-appropriate", + "attributes": { + "hostSensorRule": "true", + "useFromKubescapeVersion": "v2.0.159" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-key-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Explicitly set a service account public key file for service accounts on the apiserver.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--service-account-key-file` parameter to the public key file for service accounts:\n\n \n```\n--service-account-key-file=\n\n```\n\n#### Impact Statement\nThe corresponding private key must be provided to the controller manager. You would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-key-file` argument is not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-secrets-in-env-var", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tcontainer := pod.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has secrets in environment variables\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has secrets in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tenv := container.env[j]\n\tenv.valueFrom.secretKeyRef\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has secrets in environment variables\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if Pods have secrets in environment variables", + "remediation": "If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "host-network-access", + "creationTime": "", + "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -16782,7 +28338,6 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "fails if pod has hostNetwork enabled", "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)", @@ -16791,49 +28346,573 @@ }, { "guid": "", - "name": "rule-identify-blocklisted-image-registries", - "attributes": { - "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", - "armoBuiltin": true - }, + "name": "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers-cis1-10", "creationTime": "", - "rule": "package armo_builtins\nimport data\n# Check for images from blocklisted repos\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\trepo_prefix := untrusted_registries[_]\n\tstartswith(image, repo_prefix)\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\trepo_prefix := public_registries[_]\n\tstartswith(image, repo_prefix)\n}", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tdontwanted = [\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_RC4_128_SHA\",\n\t\t\"TLS_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA256\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_RSA_WITH_RC4_128_SHA\"\n\t]\n\n\tresult = invalid_flag(obj.spec.containers[0].command, dontwanted)\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to use strong cryptographic ciphers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--tls-cipher-suites=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd, dontwanted) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tdontuse = [x | x = dontwanted[_]; x in flag.values]\n\tcount(dontuse) > 0\n\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, dontuse)\n\tfixed_flag = sprintf(\"%s=%s\", [\"--tls-cipher-suites\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd, wanted) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--tls-cipher-suites\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--tls-cipher-suites=%s\", [concat(\",\", wanted)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```\n\n#### Impact Statement\nAPI server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.\n\n#### Default Value\nBy default the Kubernetes API server supports a wide range of TLS ciphers", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-image-vulnerability-scanning-using-azure-defender-image-scanning-or-a-third-party-provider", + "creationTime": "", + "rule": "package armo_builtins\n\n# fails in case Azure Defender image scanning is not enabled.\ndeny[msga] {\n cluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties \n\n not isAzureImageScanningEnabled(properties)\n\n msga := {\n\t\t\"alertMessage\": \"Azure Defender image scanning is not enabled.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks update --enable-defender --resource-group --name \",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_describe\n },\n\n\t}\n}\n\n# isAzureImageScanningEnabled check if Azure Defender is enabled into the ClusterDescribe object.\nisAzureImageScanningEnabled(properties) {\n properties.securityProfile.defender.securityMonitoring.enabled == true\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Scan images being deployed to Azure (AKS) for vulnerabilities. Vulnerability scanning for images stored in Azure Container Registry is generally available in Azure Security Center. This capability is powered by Qualys, a leading provider of information security. When you push an image to Container Registry, Security Center automatically scans it, then checks for known vulnerabilities in packages or dependencies defined in the file. When the scan completes (after about 10 minutes), Security Center provides details and a security classification for each vulnerability detected, along with guidance on how to remediate issues and protect vulnerable attack surfaces.", + "remediation": "Enable Azure Defender image scanning. Command: az aks update --enable-defender --resource-group --name ", + "ruleQuery": "armo_builtin", + "relevantCloudProviders": [ + "AKS" + ] + }, + { + "guid": "", + "name": "resources-secret-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ { "apiGroups": [ - "*" + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Secret" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "image-pull-policy-is-not-set-to-always", + "creationTime": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": paths,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" ], "apiVersions": [ "*" ], "resources": [ - "Pod", - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet", "Job", "CronJob" ] } ], "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.publicRegistries", - "settings.postureControlInputs.untrustedRegistries" + "controlConfigInputs": null, + "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-access-dashboard-wl-v1", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.template.spec.serviceAccountName\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceAccountName\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-audit-log-maxage-argument-is-set-to-30-or-as-appropriate", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_value(cmd) = {\"origin\": origin, \"value\": value} {\n\tre := \" ?--audit-log-maxage=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalue = to_number(matchs[0][1])\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag = get_flag_value(cmd[i])\n\tflag.value < 30\n\tfixed = replace(cmd[i], flag.origin, \"--audit-log-maxage=30\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"alert\": sprintf(\"Audit log retention period is %v days, which is too small (should be at least 30 days)\", [flag.value]),\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxage\")\n\tresult = {\n\t\t\"alert\": \"Audit log retention period is not set\",\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%v]\", [count(cmd)]),\n\t\t\t\"value\": \"--audit-log-maxage=30\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Retain the logs for at least 30 days or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxage` parameter to 30 or as an appropriate number of days:\n\n \n```\n--audit-log-maxage=30\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "secret-etcd-encryption-cloud", + "creationTime": "", + "rule": "package armo_builtins\n\n# Check if encryption in etcd in enabled for AKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n\tconfig = cluster_config.data\n\n\tnot isEncryptedAKS(config)\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"az aks nodepool add --name hostencrypt --cluster-name --resource-group -s Standard_DS2_v2 -l --enable-encryption-at-host\",\n\t\t\"alertObject\": {\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if encryption in etcd is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_EKS(config)\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster= --key-arn=arn:aws:kms:::key/ --region=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.database_encryption.state\"],\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update --region= --database-encryption-key=/locations//keyRings//cryptoKeys/ --project=\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\n\nis_encrypted_EKS(config) {\n\tencryption := config.Cluster.EncryptionConfig[_]\n\tencryption.provider.keyArn != \"\"\n\tcount(encryption.resources) > 0\n}\n\nisEncryptedAKS(cluster_config) {\n\tcluster_config.properties.agentPoolProfiles.enableEncryptionAtHost == true\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": [ + "AKS", + "EKS", + "GKE" + ] + }, + { + "guid": "", + "name": "ensure-endpointprivateaccess-is-enabled-and-endpointpublicaccess-is-disabled-eks", + "creationTime": "", + "rule": "package armo_builtins\n\n\n# Check if EndpointPrivateAccess in disabled or EndpointPublicAccess is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\t\t\n\tis_endpointaccess_misconfigured(config)\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPrivateAccess is not enabled, or EndpointPublicAccess is enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=true,publicAccessCidrs='203.0.113.5/32'\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n# check if EndpointPrivateAccess is disabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPrivateAccess == false\n}\n\n# check if EndpointPublicAccess is enabled\nis_endpointaccess_misconfigured(config) {\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n}\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": [ + "EKS" + ] + }, + { + "guid": "", + "name": "Ensure-that-the-kubeconfig-file-permissions-are-set-to-644-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test. num. configured from Octal (644) to Decimal num.\n\tallowed_perms := 420\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-scanner data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\"\n\t])\n\n\talert := sprintf(\"The permissions of %s are too permissive. maximum allowed: %o. actual: %o\",\n\t[file.path, allowed_perms, file.permissions])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": null, + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the kubeconfig file permissions are set to 644 or more restrictive", + "remediation": "Run the below command (based on the file location on your system) on the each worker node.\n\n \n```\nchmod 644 \n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "resources-memory-limits", + "creationTime": "", + "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "memory limits are not set.", + "remediation": "Ensure memory limits are set.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "etcd-auto-tls-disabled", + "creationTime": "", + "rule": "package armo_builtins\n\n# Check if --auto-tls is not set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\n\tcommands := obj.spec.containers[0].command\n\tresult := invalid_flag(commands)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Auto tls is enabled. Clients are able to use self-signed certificates for TLS.\",\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--auto-tls=true\")\n\tfixed = replace(cmd[i], \"--auto-tls=true\", \"--auto-tls=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Do not use self-signed certificates for TLS.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and either remove the `--auto-tls` parameter or set it to `false`.\n\n \n```\n--auto-tls=false\n\n```\n\n#### Impact Statement\nClients will not be able to use self-signed certificates for TLS.\n\n#### Default Value\nBy default, `--auto-tls` is set to `false`.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the `kubelet.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-service-principle-has-read-only-permissions", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if servicePrincipal has permissions that are not read-only\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"aks\"\n\n\troleAssignment := resources.data.roleAssignments[_]\n\troleAssignment.properties.principalType == \"ServicePrincipal\"\n\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"aks\"\n\n\tpolicy := policies.data.roleDefinitions[_]\n\tpolicy.id == roleAssignment.properties.roleDefinitionId\n\n\t# check if policy has at least one action that is not read\n\tsome action in policy.properties.permissions[_].actions\n\t\tnot endswith(action, \"read\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"ServicePrincipal has permissions that are not read-only to ACR.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": null, + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + }, + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": [ + "AKS" + ] + }, + { + "guid": "", + "name": "rule-identify-blocklisted-image-registries-v1", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", + "useFromKubescapeVersion": "v2.9.0" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tuntrusted_or_public_registries(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\tregistry := untrusted_registries[_]\n\tstartswith(image, registry)\n\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\tregistry := public_registries[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], "controlConfigInputs": [ { "path": "settings.postureControlInputs.publicRegistries", "name": "Public registries", - "description": "Kubescape checks none of these public registries are in use." + "description": "Kubescape checks none of these public container registries are in use." }, { "path": "settings.postureControlInputs.untrustedRegistries", "name": "Registries block list", - "description": "Kubescape checks none of the following registries are in use." + "description": "Kubescape checks none of these user-provided container registries are in use." } ], "description": "Identifying if pod container images are from unallowed registries", @@ -16843,45 +28922,12 @@ }, { "guid": "", - "name": "CVE-2022-47633", + "name": "alert-any-hostpath", "attributes": { - "armoBuiltin": true + "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount" }, "creationTime": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "a", - "remediation": "a", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "psp-enabled-native", - "attributes": { - "armoBuiltin": true, - "resourcesAggregator": "apiserver-pod", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.\"\n\tresult := is_dangerous_volume(volume, start_of_path, i)\n podname := pod.metadata.name\n\tvolumeMounts := pod.spec.containers[j].volumeMounts\n\tpathMounts = volume_mounts(volume.name, volumeMounts, sprintf(\"spec.containers[%v]\", [j]))\n\tfinalPath := array.concat([result], pathMounts)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": finalPath,\n\t\t\"failedPaths\": finalPath,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tvolumeMounts := wl.spec.template.spec.containers[j].volumeMounts\n\tpathMounts = volume_mounts(volume.name,volumeMounts, sprintf(\"spec.template.spec.containers[%v]\", [j]))\n\tfinalPath := array.concat([result], pathMounts)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": finalPath,\n\t\t\"failedPaths\": finalPath,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_volume(volume, start_of_path, i)\n\tvolumeMounts := wl.spec.jobTemplate.spec.template.spec.containers[j].volumeMounts\n\tpathMounts = volume_mounts(volume.name,volumeMounts, sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v]\", [j]))\n\tfinalPath := array.concat([result], pathMounts)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": finalPath,\n\t\t\"failedPaths\": finalPath,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_volume(volume, start_of_path, i) = path {\n volume.hostPath.path\n path = sprintf(\"%vvolumes[%v]\", [start_of_path, format_int(i, 10)])\n}\n\nvolume_mounts(name, volume_mounts, str) = [path] {\n\tname == volume_mounts[j].name\n\tpath := sprintf(\"%s.volumeMounts[%v]\", [str, j])\n} else = []", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -16895,87 +28941,174 @@ "resources": [ "Pod" ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "", - "remediation": "", + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "kubelet-rotate-certificates", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.11 https://workbench.cisecurity.org/sections/1126668/recommendations/1838658\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--rotate-certificates\")\n\tnot contains(command, \"--rotate-certificates=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.rotateCertificates == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Kubelet client certificates rotation is disabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"rotateCertificates\"],\n\t\t\"failedPaths\": [\"rotateCertificates\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--rotate-certificates\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Ensure that the --rotate-certificates argument is not set to false.", + "remediation": "Verify that the --rotate-certificates argument is not present, or is set to true. If the --rotate-certificates argument is not present, verify that if there is a Kubelet config file specified by --config, that file does not contain rotateCertificates: false.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "set-fsgroup-value", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(pod.spec.securityContext)\n\n\tsecurityContextPath := \"spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroup' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.jobTemplate.spec.template.spec.securityContext\"\n\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroup' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroup does not have a values >= 0\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# check securityContext has fsGroup set properly\n\tnot fsGroupSetProperly(wl.spec.template.spec.securityContext)\n\n\tsecurityContextPath := \"spec.template.spec.securityContext\"\n\tfixPaths = [{\"path\": sprintf(\"%v.fsGroup\", [securityContextPath]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroup' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# fsGroupSetProperly checks if fsGroup has a value >= 0.\nfsGroupSetProperly(securityContext) if {\n\tsecurityContext.fsGroup >= 0\n} else := false\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "ensure-that-the-api-server-service-account-lookup-argument-is-set-to-true", + "name": "ensure-that-the-etcd-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", "attributes": { - "armoBuiltin": true + "hostSensorRule": "true" }, "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tresult = get_result(cmd[i], i)\n}\n\nget_result(cmd, i) = result {\n\tcmd == \"--service-account-lookup=false\"\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(cmd, i) = result {\n\tcmd != \"--service-account-lookup=false\"\n\tcontains(cmd, \"--service-account-lookup=false\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": replace(cmd, \"--service-account-lookup=false\", \"--service-account-lookup=true\"),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Validate service account before validating token.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--service-account-lookup=true\n\n```\n Alternatively, you can delete the `--service-account-lookup` parameter from this file so that the default takes effect.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--service-account-lookup` argument is set to `true`.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxbackup\")\n\tresult = {\n\t\t\"alert\": \"Please validate that the audit log max backup is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxbackup\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max backup is not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxbackup=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Retain 10 or an appropriate number of old log files.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "hostSensorRule": "true", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [], @@ -16997,31 +29130,91 @@ "packageName": "cautils" } ], - "configInputs": null, "controlConfigInputs": null, - "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/etcd.yaml\n\n```", "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "read-only-port-enabled", + "name": "ensure-that-the-controller-manager-service-account-private-key-file-argument-is-set-as-appropriate", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"service account token can not be rotated as needed\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-private-key-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--service-account-private-key-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Explicitly set a service account private key file for service accounts on the controller manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--service-account-private-key-file` parameter to the private key file for service accounts.\n\n \n```\n--service-account-private-key-file=\n\n```\n\n#### Impact Statement\nYou would need to securely maintain the key file and rotate the keys based on your organization's key rotation policy.\n\n#### Default Value\nBy default, `--service-account-private-key-file` it not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "pv-without-encryption", "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" + "useFromKubescapeVersion": "v3.0.3" }, "creationTime": "", - "rule": "package armo_builtins\nimport data.kubernetes.api.client as client\n\n# Both config and cli present\ndeny[msga] {\n\t\tkubelet_config := input[_]\n\t\tkubelet_config.kind == \"KubeletConfiguration\"\n\t\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\t\tkubelet_cli := input[_] \n\t\tkubelet_cli.kind == \"KubeletCommandLine\"\n\t\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\t\tnot is_read_only_port_disabled_both(kubelet_config, kubelet_cli)\n\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"failedPaths\": [],\n\t\t\t\"fixPaths\": [],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n \"k8sApiObjects\": [kubelet_config, kubelet_cli]\n\t\t\t},\n\t\t}\n\t}\n\n\n# Only one of them present\ndeny[msga] {\n\t\texternal_obj := is_read_only_port_enabled_single(input)\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"kubelet read-only port is not disabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"failedPaths\": [],\n\t\t\t\"fixPaths\": [],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n \"k8sApiObjects\": [external_obj]\n\t\t\t},\n\t\t}\n\t}\n\n\nis_read_only_port_disabled_both(kubelet_config, kubelet_cli) {\n kubelet_config.data.readOnlyPort == 0\n}\n\nis_read_only_port_disabled_both(kubelet_config, kubelet_cli) {\n is_read_only_port_disabled_cli(kubelet_cli)\n not is_read_only_port_enabled_config(kubelet_config)\n}\n\nis_read_only_port_enabled_single(resources) = obj {\n\tkubelet_cli := resources[_] \n\tkubelet_cli.kind == \"KubeletCommandLine\"\n\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_config := [config | config = resources[_]; config.kind == \"KubeletConfiguration\"]\n\tcount(kubelet_config) == 0\n\n\tnot is_read_only_port_disabled_cli(kubelet_cli)\n\t\n\tobj = kubelet_cli\n}\n\n\nis_read_only_port_enabled_single(resources) = obj {\n\tkubelet_config := resources[_]\n\tkubelet_config.kind == \"KubeletConfiguration\"\n\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_cli := [cli | cli = resources[_]; cli.kind == \"KubeletCommandLine\"]\n\tcount(kubelet_cli) == 0\n\n\tis_read_only_port_enabled_config(kubelet_config) \n\t\n\tobj = kubelet_config\n}\n\n\n# 0 or not present -> disabled\nis_read_only_port_disabled_cli(kubelet_cli) {\n kubelet_cli_data := kubelet_cli.data\n contains(kubelet_cli_data[\"fullCommand\"], \"--read-only-port=0\")\n}\n\nis_read_only_port_disabled_cli(kubelet_cli) {\n kubelet_cli_data := kubelet_cli.data\n not contains(kubelet_cli_data[\"fullCommand\"], \"--read-only-port\")\n}\n\nis_read_only_port_disabled_config(kubelet_config) {\n not kubelet_config.data.readOnlyPort\n}\n\nis_read_only_port_disabled_config(kubelet_config) {\n kubelet_config.data.readOnlyPort == 0\n}\n\nis_read_only_port_enabled_config(kubelet_config) {\n kubelet_config.data.readOnlyPort\n kubelet_config.data.readOnlyPort != 0\n}\n", + "rule": "package armo_builtins\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n\tpv := input[_]\n\tpv.kind == \"PersistentVolume\"\n\n\t# Find the related storage class\n\tstorageclass := input[_]\n\tstorageclass.kind == \"StorageClass\"\n\tpv.spec.storageClassName == storageclass.metadata.name\n\n\t# Check if storage class is encrypted\n\tnot is_storage_class_encrypted(storageclass)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Volume '%v' has is using a storage class that does not use encryption\", [pv.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n\t\t\t\"path\": \"spec.storageClassName\",\n\t\t\t\"value\": \"\"\n }],\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pv]}\n\t}\n}\n\n# Storage class is encrypted - AWS\nis_storage_class_encrypted(storageclass) {\n\tstorageclass.parameters.encrypted == \"true\"\n}\n\n# Storage class is encrypted - Azure\nis_storage_class_encrypted(storageclass) {\n\tstorageclass.provisioner\n\tcontains(storageclass.provisioner,\"azure\")\n}\n\n# Storage class is encrypted - GCP\nis_storage_class_encrypted(storageclass) {\n\t# GKE encryption is enabled by default https://cloud.google.com/blog/products/containers-kubernetes/exploring-container-security-use-your-own-keys-to-protect-your-data-on-gke\n\tstorageclass.provisioner\n\tcontains(storageclass.provisioner,\"csi.storage.gke.io\")\n}\n\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ { - "apiGroups": [], - "apiVersions": [], - "resources": [] + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PersistentVolume" + ] + }, + { + "apiGroups": [ + "storage.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "StorageClass" + ] } ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "PersistentVolume without encryption", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], "dynamicMatch": [ { "apiGroups": [ @@ -17031,36 +29224,107 @@ "v1beta0" ], "resources": [ - "KubeletConfiguration", - "KubeletCommandLine" + "ControlPlaneInfo" ] } ], "ruleDependencies": [ { "packageName": "cautils" - }, - { - "packageName": "kubernetes.api.client" } ], - "configInputs": null, "controlConfigInputs": null, - "description": "Determines if kubelet has read-only port enabled.", - "remediation": "Start the kubelet with the --read-only-port flag set to 0.", + "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", + "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "rule-can-bash-cmd-inside-container", - "attributes": { - "microsoftK8sThreatMatrix": "Execution::Bash/cmd inside container", - "armoBuiltin": true, - "armoOpa": "true" - }, + "name": "lease-in-default-namespace", "creationTime": "", - "rule": "package armo_builtins\nimport data.cautils as cautils\nimport data\n\n\n# Fails if container has bash/cmd inside it \n# Pods\ndeny [msga] {\n pod := input[_]\n container := pod.spec.containers[i]\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n is_bash_container(scan)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n \n msga := {\n\t\t\"alertMessage\": sprintf(\"the following container: %v has bash/cmd inside it.\", [container.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"container\" : [{container.name}]\n\t\t\t}\n\t\t},\n\t}\n}\n\n\n# Workloads\ndeny [msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n is_bash_container(scan)\n\n \n msga := {\n\t\t\"alertMessage\": sprintf(\"the following container: %v has bash/cmd inside it.\", [container.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"container\" : [{container.name}]\n\t\t\t}\n\t\t},\n\t}\n}\n\n# Cronjobs\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n is_bash_container(scan)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following container: %v has bash/cmd inside it.\", [container.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"container\" : [{container.name}]\n\t\t\t}\n\t\t},\n\t}\n}\n\n\nis_bash_container(scan) {\n\t# see default-config-inputs.json for list values\n\tshells := data.postureControlInputs.listOfDangerousArtifacts\n\tshell := shells[_]\n\tcautils.list_contains(scan.listOfDangerousArtifacts, shell)\n}\n", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "coordination.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Lease" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-authorization-mode-argument-includes-Node", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"kubelet nodes can read objects that are not associated with them\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"Node\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"Node\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=Node\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Restrict kubelet nodes to reading only objects associated with them.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `Node`.\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `Node` authorization is not enabled.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-admission-control-plugin-ServiceAccount-is-set", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"ServiceAccount\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"ServiceAccount\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Automate service accounts management.", + "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.\n\n#### Impact Statement\nNone.\n\n#### Default Value\nBy default, `ServiceAccount` is set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "containers-mounting-docker-socket", + "creationTime": "", + "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v]\", [format_int(i, 10)])\n\tvolumeMounts := pod.spec.containers[j].volumeMounts\n\tpathMounts = volume_mounts(volume.name, volumeMounts, sprintf(\"spec.containers[%v]\", [j]))\n\tfinalPath := array.concat([path], pathMounts)\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\":finalPath,\n\t\t\"failedPaths\": finalPath,\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v]\", [format_int(i, 10)])\n\tvolumeMounts := wl.spec.template.spec.containers[j].volumeMounts\n\tpathMounts = volume_mounts(volume.name,volumeMounts, sprintf(\"spec.template.spec.containers[%v]\", [j]))\n\tfinalPath := array.concat([path], pathMounts)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": finalPath,\n\t\t\"failedPaths\": finalPath,\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_runtime_socket_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v]\", [format_int(i, 10)])\n\tvolumeMounts := wl.spec.jobTemplate.spec.template.spec.containers[j].volumeMounts\n\tpathMounts = volume_mounts(volume.name,volumeMounts, sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v]\", [j]))\n\tfinalPath := array.concat([path], pathMounts)\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": finalPath,\n\t\t\"failedPaths\": finalPath,\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nvolume_mounts(name, volume_mounts, str) = [path] {\n\tname == volume_mounts[j].name\n\tpath := sprintf(\"%s.volumeMounts[%v]\", [str, j])\n} else = []\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/run/containerd/containerd.sock\"\n}\n\nis_runtime_socket_mounting(host_path) {\n\thost_path.path == \"/var/run/crio/crio.sock\"\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -17102,33 +29366,643 @@ ] } ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": [ - "settings.postureControlInputs.listOfDangerousArtifacts" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.listOfDangerousArtifacts", - "name": "Shell executable in container", - "description": "Kubescape checks if container images have the any of the these shell executables." - } - ], - "description": "determines which containers have dangerous artifacts (based on the list of dangerous artifacts)", + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Check hostpath. If the path is set to one of the container runtime socket, the container has access to container runtime - fail.", "remediation": "", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "set-seLinuxOptions", + "name": "rule-can-list-get-secrets-v1", "attributes": { - "armoBuiltin": true + "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" }, "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines which users can list/get secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "poddisruptionbudget-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodDisruptionBudget" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "exposure-to-internet", + "creationTime": "", + "rule": "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n is_same_namespace(wl.metadata, service.metadata)\n pod := get_pod_spec(wl)[\"spec\"]\n wl_connected_to_service(pod, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n\t\t \"reviewPaths\": failPath,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n\n svc := input[_]\n svc.kind == \"Service\"\n\n # Make sure that they belong to the same namespace\n svc.metadata.namespace == ingress.metadata.namespace\n\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n is_same_namespace(wl.metadata, svc.metadata)\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [\n\t\t{\n\t \"object\": ingress,\n\t\t \"reviewPaths\": result,\n\t \"failedPaths\": result,\n\t },\n\t\t{\n\t \"object\": svc,\n\t\t}\n ]\n }\n}\n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.spec.template.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n result := [path |\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n path := sprintf(\"spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])\n ]\n count(result) > 0\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\n\n\n# get_volume - get resource spec paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_pod_spec(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"spec\": resources.spec.template, \"start_of_path\": \"spec.template.\"}\n}\n\n# get_volume - get resource spec paths for \"Pod\"\nget_pod_spec(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"spec\": resources, \"start_of_path\": \"\"}\n}\n\n# get_volume - get resource spec paths for \"CronJob\"\nget_pod_spec(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"spec\": resources.spec.jobTemplate.spec.template.spec, \"start_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "alert-mount-potential-credentials-paths", + "creationTime": "", + "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tspec_data := get_pod_spec(resources)\n\tspec := spec_data[\"spec\"]\n volumes := spec.volumes\n volume := volumes[i]\n\tstart_of_path := spec_data[\"start_of_path\"]\n result := is_unsafe_paths(volume, start_of_path, provider, i)\n\tvolumeMounts := spec.containers[j].volumeMounts\n\tpathMounts = volume_mounts(volume.name, volumeMounts, sprintf(\"%vcontainers[%d]\", [start_of_path, j]))\n\tfinalPath := array.concat([result], pathMounts)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": finalPath,\n\t\t\"failedPaths\": finalPath,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n# get_volume - get resource spec paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_pod_spec(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"spec\": resources.spec.template.spec, \"start_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource spec paths for \"Pod\"\nget_pod_spec(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"spec\": resources.spec, \"start_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource spec paths for \"CronJob\"\nget_pod_spec(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"spec\": resources.spec.jobTemplate.spec.template.spec, \"start_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, start_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult = sprintf(\"%vvolumes[%d]\", [start_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\nvolume_mounts(name, volume_mounts, str) = [path] {\n\tname == volume_mounts[j].name\n\tpath := sprintf(\"%s.volumeMounts[%v]\", [str, j])\n} else = []", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines if any workload contains a hostPath volume", + "remediation": "Try to refrain from using hostPath mounts", + "ruleQuery": "", + "relevantCloudProviders": [ + "EKS", + "GKE", + "AKS" + ] + }, + { + "guid": "", + "name": "ensure-that-the-api-server-authorization-mode-argument-is-not-set-to-AlwaysAllow", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"AlwaysAllow authorization mode is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# Check if include AlwaysAllow\n\t\"AlwaysAllow\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val = flag.values[_]; val != \"AlwaysAllow\"]\n\tfixed_flag = get_fixed_flag(fixed_values)\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\n\nget_fixed_flag(values) = fixed {\n\tcount(values) == 0\n\tfixed = \"--authorization-mode=RBAC\" # If no authorization-mode, set it to RBAC, as recommended by CIS\n}\nget_fixed_flag(values) = fixed {\n\tcount(values) > 0\n\tfixed = sprintf(\"--authorization-mode=%s\", [concat(\",\", values)])\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Do not always authorize all requests.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to values other than `AlwaysAllow`. One such example could be as below.\n\n \n```\n--authorization-mode=RBAC\n\n```\n\n#### Impact Statement\nOnly authorized requests will be served.\n\n#### Default Value\nBy default, `AlwaysAllow` is not enabled.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-client-certificate-authorities-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the certificate authorities file ownership is set to `root:root`.", + "remediation": "Run the following command to modify the ownership of the `--client-ca-file`.\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "etcd-peer-client-auth-cert", + "creationTime": "", + "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--peer-client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--peer-client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--peer-client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--peer-client-cert-auth=false\", \"--peer-client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "etcd should be configured for peer authentication.", + "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--peer-client-cert-auth=true\n\n```\n\n#### Impact Statement\nAll peers attempting to communicate with the etcd server will require a valid client certificate for authentication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, `--peer-client-cert-auth` argument is set to `false`.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "access-container-service-account-v1", + "attributes": { + "useFromKubescapeVersion": "v1.0.133", + "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account", + "resourcesAggregator": "subject-role-rolebinding" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", + "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n subjectVector := input[_]\n subjectVector.kind == \"ServiceAccount\"\n \n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n subject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster\", [subjectVector.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": subjectVector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines which service accounts can be used to access other resources in the cluster", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-cni-in-use-supports-network-policies", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Deny CNIs that don't support Network Policies.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfo(obj)\n\n\tnetwork_policy_not_supported(obj.data.CNINames)\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n\n\n# deny if Flannel is running without calico\nnetwork_policy_not_supported(CNIs) {\n\t\"Flannel\" in CNIs\n\tnot \"Calico\" in CNIs\n}\n\n# deny if aws is running without any other CNI\nnetwork_policy_not_supported(CNIs) {\n\t\"aws\" in CNIs\n\tcount(CNIs) < 2\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "There are a variety of CNI plugins available for Kubernetes. If the CNI in use does not support Network Policies it may not be possible to effectively restrict traffic in the cluster.", + "remediation": "If the CNI plugin in use does not support network policies, consideration should be given to making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-kubelet-configuration-file-has-permissions-set-to-644-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 420 # == 0o644\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "enforce-kubelet-client-tls-authentication-updated", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n# CIS 4.2.3 https://workbench.cisecurity.org/sections/1126668/recommendations/1838643\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.x509.clientCAFile\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"failedPaths\": [\"authentication.x509.clientCAFile\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tnot contains(command, \"--config\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": {\"cmdLine\": command},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--client-ca-file\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Determines if kubelet client tls authentication is enabled.", + "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-Container-Network-Interface-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_CNIInfo(obj)\n\n\tfile_obj_path := [\"data\", \"CNIConfigFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_CNIInfo(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the Container Network Interface files have permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "psp-deny-privileged-container", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have privileged set to true\n\t# if even one PSP has privileged set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.privileged == true\n\t}\n\n\t# return al the PSPs that have privileged set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.privileged == true\n\n\tpath := \"spec.privileged\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has privileged set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "immutable-container-filesystem", + "creationTime": "", + "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n is_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tis_mutable_filesystem(container)\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%d].securityContext.readOnlyRootFilesystem\", [start_of_path, i]), \"value\": \"true\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixPath],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container) {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n}\n\nis_mutable_filesystem(container) {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if container has mutable filesystem", + "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-can-update-configmap-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if user can modify all configmaps\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\tnot rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# Fails if user can modify the 'coredns' configmap (default for coredns)\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"configmaps\", \"*\"]\n\t\"coredns\" in rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines which users can update/patch the 'coredns' configmap", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "list-all-validating-webhooks", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Validate admission controller" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Returns validating webhook configurations to be verified", + "remediation": "Analyze webhook for malicious behavior", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "set-seLinuxOptions", + "creationTime": "", "rule": "package armo_builtins\n\n\n# Fails if pod does not define seLinuxOptions \ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seLinuxOptions\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seLinuxOptions \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tspec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seLinuxOptions\"]\n\tno_seLinuxOptions_in_securityContext(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n no_seLinuxOptions_in_securityContext(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define any seLinuxOptions\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nno_seLinuxOptions_in_securityContext(spec, path_to_search){\n object.get(spec, path_to_search, \"\") == \"\"\n}", "resourceEnumerator": "", "ruleLanguage": "Rego", @@ -17172,7 +30046,6 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "fails if workload and container do not define any seLinuxOptions", "remediation": "Make sure you set seLinuxOptions in the workload/container security context.", @@ -17181,14 +30054,12 @@ }, { "guid": "", - "name": "deny-RCE-vuln-image-pods", + "name": "exposure-to-internet-via-istio-ingress", "attributes": { - "m$K8sThreatMatrix": "Execution::Application Exploit (RCE)", - "armoBuiltin": true, - "armoOpa": "true" + "useFromKubescapeVersion": "v3.0.9" }, "creationTime": "", - "rule": "package armo_builtins\nimport data.cautils as cautils\n \n# ========= RCE : no service score 5 ================\ndeny[msga] {\n\tpod := input[_]\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n is_unsafe_image(scan)\n\tscan.containersScanID\n\tvulnerabilities := armo.get_image_scan_details({\"containersScanID\":scan.containersScanID, \"fieldCreteria\":{\"description\":\"RCE|like,Remote Code Execution|like,remote code execution|like,remote command execution|like,Remote Command Execution|like,arbitrary code|like,code execution|like,Arbitrary Code|like,Code Execution|like,code injection|like,Code Injection|like,execute code|like,Execute Code|like,arbitrary command|like,Arbitrary Command|like,arbitrary commands|like,Arbitrary Commands|like,command injection|like,Command Injection|like,command execution|like,Command Execution|like,inject arbitrary commands|like,Inject Arbitrary Commands|like\"} })\n\tcount(vulnerabilities) > 0\n\tt := { \"containersScanID\": scan.containersScanID,\"count\":count(vulnerabilities),\"vulnerabilities\":vulnerabilities}\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has %v RCE vulnerabilities\", [container.image,count(vulnerabilities)]),\n \"alertScore\": 5,\n\t\t\"fixPaths\":[],\n\t\t\"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"vulnerabilities\" : [vulnerabilities]\n\t\t\t}\n\t\t},\n }\n}\n\n# workloads\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n\n is_unsafe_image(scan)\n\tscan.containersScanID\n\tvulnerabilities := armo.get_image_scan_details({\"containersScanID\":scan.containersScanID, \"fieldCreteria\":{\"description\":\"RCE|like,Remote Code Execution|like,remote code execution|like,remote command execution|like,Remote Command Execution|like,arbitrary code|like,code execution|like,Arbitrary Code|like,Code Execution|like,code injection|like,Code Injection|like,execute code|like,Execute Code|like,arbitrary command|like,Arbitrary Command|like,arbitrary commands|like,Arbitrary Commands|like,command injection|like,Command Injection|like,command execution|like,Command Execution|like,inject arbitrary commands|like,Inject Arbitrary Commands|like\"} })\n\tcount(vulnerabilities) > 0\n\tt := { \"containersScanID\": scan.containersScanID,\"count\":count(vulnerabilities),\"vulnerabilities\":vulnerabilities}\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has %v RCE vulnerabilities\", [container.image,count(vulnerabilities)]),\n \"alertScore\": 5,\n\t\t\"fixPaths\":[],\n\t\t\"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"vulnerabilities\" : [vulnerabilities]\n\t\t\t}\n\t\t},\n }\n}\n\n# cronjobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v]\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n\n is_unsafe_image(scan)\n\tscan.containersScanID\n\tvulnerabilities := armo.get_image_scan_details({\"containersScanID\":scan.containersScanID, \"fieldCreteria\":{\"description\":\"RCE|like,Remote Code Execution|like,remote code execution|like,remote command execution|like,Remote Command Execution|like,arbitrary code|like,code execution|like,Arbitrary Code|like,Code Execution|like,code injection|like,Code Injection|like,execute code|like,Execute Code|like,arbitrary command|like,Arbitrary Command|like,arbitrary commands|like,Arbitrary Commands|like,command injection|like,Command Injection|like,command execution|like,Command Execution|like,inject arbitrary commands|like,Inject Arbitrary Commands|like\"} })\n\tcount(vulnerabilities) > 0\n\tt := { \"containersScanID\": scan.containersScanID,\"count\":count(vulnerabilities),\"vulnerabilities\":vulnerabilities}\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has %v RCE vulnerabilities\", [container.image,count(vulnerabilities)]),\n \"alertScore\": 5,\n\t\t\"fixPaths\":[],\n\t\t\"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"vulnerabilities\" : [vulnerabilities]\n\t\t\t}\n\t\t},\n }\n}\n\n# ======== RCE + service (not nodeport and not loadbalancer) 7 =====================\ndeny[msga] {\n\tpod := input[_]\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v]\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n\n is_unsafe_image(scan)\n\tscan.containersScanID\n\tvulnerabilities := armo.get_image_scan_details({\"containersScanID\":scan.containersScanID, \"fieldCreteria\":{\"description\":\"RCE|like,Remote Code Execution|like,remote code execution|like,remote command execution|like,Remote Command Execution|like,arbitrary code|like,code execution|like,Arbitrary Code|like,Code Execution|like,code injection|like,Code Injection|like,execute code|like,Execute Code|like,arbitrary command|like,Arbitrary Command|like,arbitrary commands|like,Arbitrary Commands|like,command injection|like,Command Injection|like,command execution|like,Command Execution|like,inject arbitrary commands|like,Inject Arbitrary Commands|like\"} })\n\tcount(vulnerabilities) > 0\n\tt := { \"containersScanID\": scan.containersScanID,\"count\":count(vulnerabilities),\"vulnerabilities\":vulnerabilities}\n\n\tservice := input[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == pod.metadata.namespace\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n\tnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\n\tnot np_or_lb[service.spec.type]\n\tcautils.is_subobject(service.spec.selector, filtered_labels)\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has %v RCE vulnerabilities\", [container.image,count(vulnerabilities)]),\n \"alertScore\": 7,\n\t\t\"fixPaths\":[],\n\t\t\"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"vulnerabilities\" : [vulnerabilities]\n\t\t\t}\n\t\t},\n }\n}\n\n# workloads\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v]\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n\n is_unsafe_image(scan)\n\tscan.containersScanID\n\tvulnerabilities := armo.get_image_scan_details({\"containersScanID\":scan.containersScanID, \"fieldCreteria\":{\"description\":\"RCE|like,Remote Code Execution|like,remote code execution|like,remote command execution|like,Remote Command Execution|like,arbitrary code|like,code execution|like,Arbitrary Code|like,Code Execution|like,code injection|like,Code Injection|like,execute code|like,Execute Code|like,arbitrary command|like,Arbitrary Command|like,arbitrary commands|like,Arbitrary Commands|like,command injection|like,Command Injection|like,command execution|like,Command Execution|like,inject arbitrary commands|like,Inject Arbitrary Commands|like\"} })\n\tcount(vulnerabilities) > 0\n\tt := { \"containersScanID\": scan.containersScanID,\"count\":count(vulnerabilities),\"vulnerabilities\":vulnerabilities}\n\n\tservice := input[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tlabels := wl.spec.template.metadata.labels\n\tnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\n\tnot np_or_lb[service.spec.type]\n\tcautils.is_subobject(service.spec.selector,labels)\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has %v RCE vulnerabilities\", [container.image,count(vulnerabilities)]),\n \"alertScore\": 7,\n\t\t\"fixPaths\":[],\n\t\t\"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"vulnerabilities\" : [vulnerabilities]\n\t\t\t}\n\t\t},\n \n }\n}\n# cronjobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v]\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n\n is_unsafe_image(scan)\n\tscan.containersScanID\n\tvulnerabilities := armo.get_image_scan_details({\"containersScanID\":scan.containersScanID, \"fieldCreteria\":{\"description\":\"RCE|like,Remote Code Execution|like,remote code execution|like,remote command execution|like,Remote Command Execution|like,arbitrary code|like,code execution|like,Arbitrary Code|like,Code Execution|like,code injection|like,Code Injection|like,execute code|like,Execute Code|like,arbitrary command|like,Arbitrary Command|like,arbitrary commands|like,Arbitrary Commands|like,command injection|like,Command Injection|like,command execution|like,Command Execution|like,inject arbitrary commands|like,Inject Arbitrary Commands|like\"} })\n\tcount(vulnerabilities) > 0\n\tt := { \"containersScanID\": scan.containersScanID,\"count\":count(vulnerabilities),\"vulnerabilities\":vulnerabilities}\n\n\tservice := input[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n\tnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\n\tnot np_or_lb[service.spec.type]\n\tcautils.is_subobject(service.spec.selector,labels)\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has %v RCE vulnerabilities\", [container.image,count(vulnerabilities)]),\n \"alertScore\": 7,\n\t\t\"fixPaths\":[],\n\t\t\"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"vulnerabilities\" : [vulnerabilities]\n\t\t\t}\n\t\t},\n }\n}\n\n# ======= RCE + service nodeport/loadbalancer 10 ===========================\ndeny[msga] {\n\tpod := input[_]\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v]\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n\n is_unsafe_image(scan)\n\tscan.containersScanID\n\tvulnerabilities := armo.get_image_scan_details({\"containersScanID\":scan.containersScanID, \"fieldCreteria\":{\"description\":\"RCE|like,Remote Code Execution|like,remote code execution|like,remote command execution|like,Remote Command Execution|like,arbitrary code|like,code execution|like,Arbitrary Code|like,Code Execution|like,code injection|like,Code Injection|like,execute code|like,Execute Code|like,arbitrary command|like,Arbitrary Command|like,arbitrary commands|like,Arbitrary Commands|like,command injection|like,Command Injection|like,command execution|like,Command Execution|like,inject arbitrary commands|like,Inject Arbitrary Commands|like\"} })\n\tcount(vulnerabilities) > 0\n\tt := { \"containersScanID\": scan.containersScanID,\"count\":count(vulnerabilities),\"vulnerabilities\":vulnerabilities}\n\n\tservice := input[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == pod.metadata.namespace\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n\tnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\n\tnp_or_lb[service.spec.type]\n\tcautils.is_subobject(service.spec.selector, filtered_labels)\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has %v RCE vulnerabilities\", [container.image,count(vulnerabilities)]),\n \"alertScore\": 10,\n\t\t\"fixPaths\":[],\n\t\t\"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"vulnerabilities\" : [vulnerabilities]\n\t\t\t}\n\t\t},\n }\n}\n\n# workloads\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v]\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n\n is_unsafe_image(scan)\n\tscan.containersScanID\n\tvulnerabilities := armo.get_image_scan_details({\"containersScanID\":scan.containersScanID, \"fieldCreteria\":{\"description\":\"RCE|like,Remote Code Execution|like,remote code execution|like,remote command execution|like,Remote Command Execution|like,arbitrary code|like,code execution|like,Arbitrary Code|like,Code Execution|like,code injection|like,Code Injection|like,execute code|like,Execute Code|like,arbitrary command|like,Arbitrary Command|like,arbitrary commands|like,Arbitrary Commands|like,command injection|like,Command Injection|like,command execution|like,Command Execution|like,inject arbitrary commands|like,Inject Arbitrary Commands|like\"} })\n\tcount(vulnerabilities) > 0\n\tt := { \"containersScanID\": scan.containersScanID,\"count\":count(vulnerabilities),\"vulnerabilities\":vulnerabilities}\n\n\tservice := input[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tlabels := wl.spec.template.metadata.labels\n\tnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\n\tnp_or_lb[service.spec.type]\n\tcautils.is_subobject(service.spec.selector,labels)\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has %v RCE vulnerabilities\", [container.image,count(vulnerabilities)]),\n \"alertScore\": 10,\n\t\t\"fixPaths\":[],\n\t\t\"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"vulnerabilities\" : [vulnerabilities]\n\t\t\t}\n\t\t},\n }\n}\n\n# cronjobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v]\", [format_int(i, 10)])\n res := armo.get_image_scan_summary({\"type\":\"imageTag\",\"value\":container.image,\"size\":1})\n\tscan := res[_]\n\n is_unsafe_image(scan)\n\tscan.containersScanID\n\tvulnerabilities := armo.get_image_scan_details({\"containersScanID\":scan.containersScanID, \"fieldCreteria\":{\"description\":\"RCE|like,Remote Code Execution|like,remote code execution|like,remote command execution|like,Remote Command Execution|like,arbitrary code|like,code execution|like,Arbitrary Code|like,Code Execution|like,code injection|like,Code Injection|like,execute code|like,Execute Code|like,arbitrary command|like,Arbitrary Command|like,arbitrary commands|like,Arbitrary Commands|like,command injection|like,Command Injection|like,command execution|like,Command Execution|like,inject arbitrary commands|like,Inject Arbitrary Commands|like\"} })\n\tcount(vulnerabilities) > 0\n\tt := { \"containersScanID\": scan.containersScanID,\"count\":count(vulnerabilities),\"vulnerabilities\":vulnerabilities}\n\n\tservice := input[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n\tnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\n\tnp_or_lb[service.spec.type]\n\tcautils.is_subobject(service.spec.selector,labels)\n\n msga := {\n \"alertMessage\": sprintf(\"image %v has %v RCE vulnerabilities\", [container.image,count(vulnerabilities)]),\n \"alertScore\": 10,\n\t\t\"fixPaths\":[],\n\t\t\"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"vulnerabilities\" : [vulnerabilities]\n\t\t\t}\n\t\t},\n }\n}\n\n#treat as potentially critical\nis_unsafe_image(scanresult) {\n\tscanresult.numOfUnknownSeverity > 0\n}\nis_unsafe_image(scanresult) {\n\tscanresult.numOfNegligibleSeverity > 0\n}\n\nis_unsafe_image(scanresult) {\n\tscanresult.numOfLowSeverity > 0\n}\n\nis_unsafe_image(scanresult) {\n\tscanresult.numOfMeduiumSeverity > 0\n}\n\nis_unsafe_image(scanresult) {\n\tscanresult.numOfHighSeverity > 0\n}\n\nis_unsafe_image(scanresult) {\n\tscanresult.numOfCriticalSeverity > 0\n}", + "rule": "package armo_builtins\nimport future.keywords.in\n\n\ndeny[msga] {\n virtualservice := input[_]\n virtualservice.kind == \"VirtualService\"\n\n # Get the namescape of the VirtualService\n vs_ns := get_namespace(virtualservice)\n # Looping over the gateways of the VirtualService\n vs_gw_name := virtualservice.spec.gateways[_]\n # Get the namespace of the Gateway\n vs_gw = get_vs_gw_ns(vs_ns, vs_gw_name)\n\n # Check if the VirtualService is connected to a Gateway\n gateway := input[_]\n gateway.kind == \"Gateway\"\n gateway.metadata.name == vs_gw.name\n get_namespace(gateway) == vs_gw.namespace\n\n # print(\"Found the gateway that the virtualservice is connected to\", gateway)\n\n # Either the gateway is exposed via LoadBalancer service OR has \"public\" suffix\n gateway_service := is_gateway_public(gateway, input)\n\n # print(\"Gateway is public\", gateway)\n\n # Check if the VirtualService is connected to an workload\n # First, find the service that the VirtualService is connected to\n connected_service := input[_]\n connected_service.kind == \"Service\"\n fqsn := get_fqsn(get_namespace(virtualservice), virtualservice.spec.http[i].route[j].destination.host)\n target_ns := split(fqsn,\".\")[1]\n target_name := split(fqsn,\".\")[0]\n # Check if the service is in the same namespace as the VirtualService\n get_namespace(connected_service) == target_ns\n # Check if the service is the target of the VirtualService\n connected_service.metadata.name == target_name\n\n # print(\"Found the service that the virtualservice is connected to\", connected_service)\n\n # Check if the service is connected to a workload\n wl := input[_]\n is_same_namespace(connected_service, wl)\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n pod := get_pod_spec(wl)[\"spec\"]\n wl_connected_to_service(pod, connected_service)\n\n # print(\"Found the workload that the service is connected to\", wl)\n\n failedPaths := [sprintf(\"spec.http[%d].routes[%d].destination.host\", [i,j])]\n\n # print(\"Found the failed paths\", failedPaths)\n\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through virtualservice '%v'\", [wl.metadata.name, virtualservice.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [\n\t {\n\t \"object\": virtualservice,\n\t \"reviewPaths\": failedPaths,\n\t \"failedPaths\": failedPaths,\n\t },\n\t {\n\t \"object\": gateway_service,\n\t },\n\t {\n\t \"object\": gateway,\n\t },\n {\n \"object\": connected_service,\n }\n ]\n }\n}\n\n# ====================================================================================\n\nis_gateway_public(gateway, inputs) = svc {\n endswith(gateway.metadata.name, \"public\")\n inputs[i].kind == \"Service\"\n inputs[i].metadata.namespace == \"istio-system\"\n gateway.spec.selector[_] == inputs[i].metadata.labels[_]\n svc := inputs[i]\n}\n\nis_gateway_public(gateway, inputs) = svc {\n inputs[i].kind == \"Service\"\n inputs[i].metadata.namespace == \"istio-system\"\n gateway.spec.selector[_] == inputs[i].metadata.labels[_]\n is_exposed_service(inputs[i])\n svc := inputs[i]\n}\n\nget_namespace(obj) = namespace {\n obj.metadata\n obj.metadata.namespace\n namespace := obj.metadata.namespace\n}\n\nget_namespace(obj) = namespace {\n not obj.metadata.namespace\n namespace := \"default\"\n}\n\nget_vs_gw_ns(vs_ns, vs_gw_name) = {\"name\": name, \"namespace\": ns} {\n # Check if there is a / in the gateway name\n count(split(vs_gw_name, \"/\")) == 2\n ns := split(vs_gw_name, \"/\")[0]\n name := split(vs_gw_name, \"/\")[1]\n}\n\nget_vs_gw_ns(vs_ns, vs_gw_name) = {\"name\": name, \"namespace\": ns} {\n # Check if there is no / in the gateway name\n count(split(vs_gw_name, \"/\")) == 1\n ns := vs_ns\n name := vs_gw_name\n}\n\nis_same_namespace(obj1, obj2) {\n obj1.metadata.namespace == obj2.metadata.namespace\n}\n\nis_same_namespace(obj1, obj2) {\n not obj1.metadata.namespace\n obj2.metadata.namespace == \"default\"\n}\n\nis_same_namespace(obj1, obj2) {\n not obj2.metadata.namespace\n obj1.metadata.namespace == \"default\"\n}\n\nis_same_namespace(obj1, obj2) {\n not obj1.metadata.namespace\n not obj2.metadata.namespace\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.spec.template.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nsvc_connected_to_virtualservice(svc, virtualservice) = result {\n host := virtualservice.spec.http[i].route[j].destination.host\n svc.metadata.name == host\n result := [sprintf(\"spec.http[%d].routes[%d].destination.host\", [i,j])]\n}\n\nget_fqsn(ns, dest_host) = fqsn {\n # verify that this name is without the namespace\n count(split(\".\", dest_host)) == 1\n fqsn := sprintf(\"%v.%v.svc.cluster.local\", [dest_host, ns])\n}\n\nget_fqsn(ns, dest_host) = fqsn {\n count(split(\".\", dest_host)) == 2\n fqsn := sprintf(\"%v.svc.cluster.local\", [dest_host])\n}\n\nget_fqsn(ns, dest_host) = fqsn {\n count(split(\".\", dest_host)) == 4\n fqsn := dest_host\n}\n\n\n\n# get_volume - get resource spec paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_pod_spec(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"spec\": resources.spec.template, \"start_of_path\": \"spec.template.\"}\n}\n\n# get_volume - get resource spec paths for \"Pod\"\nget_pod_spec(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"spec\": resources, \"start_of_path\": \"\"}\n}\n\n# get_volume - get resource spec paths for \"CronJob\"\nget_pod_spec(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"spec\": resources.spec.jobTemplate.spec.template.spec, \"start_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -17229,29 +30100,34 @@ "Job", "CronJob" ] - } - ], - "ruleDependencies": [ + }, { - "packageName": "cautils" + "apiGroups": [ + "networking.istio.io" + ], + "apiVersions": [ + "v1", + "v1beta1" + ], + "resources": [ + "VirtualService", + "Gateways" + ] } ], - "configInputs": null, + "ruleDependencies": null, "controlConfigInputs": null, - "description": "determines if pods has vulnerable image with remote code execution", + "description": "fails if the running workload is bound to a Service that is exposed to the Internet through Istio Gateway.", "remediation": "", - "ruleQuery": "package armo_builtins", + "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, + "name": "outdated-k8s-version", "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\tnode := input[_]\n\tnode.kind == \"Node\"\n\tcurrent_version := node.status.nodeInfo.kubeletVersion\n has_outdated_version(current_version)\n\tpath := \"status.nodeInfo.kubeletVersion\"\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Your kubelet version: %s, in node: %s is outdated\", [current_version, node.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [node]},\n\t}\n}\n\n\nhas_outdated_version(version) {\n\t# the `supported_k8s_versions` is validated in the validations script against \"https://api.github.com/repos/kubernetes/kubernetes/releases\"\n supported_k8s_versions := [\"v1.35\", \"v1.34\", \"v1.33\"]\n\tevery v in supported_k8s_versions{\n\t\tnot startswith(version, v)\n\t}\n}\n", + "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ { @@ -17262,44 +30138,11 @@ "v1" ], "resources": [ - "Pod" + "Node" ] } ], "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Disable anonymous requests to the API server.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "nginx-ingress-snippet-annotation-vulnerability", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable \n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\t\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Deployment", - "ConfigMap" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "", "remediation": "", @@ -17308,181 +30151,10 @@ }, { "guid": "", - "name": "resources-cpu-limit-and-request", - "attributes": { - "armoBuiltin": true - }, + "name": "pod-security-admission-applied-2", "creationTime": "", - "rule": "package armo_builtins\nimport data\n\n# Fails if pod does not have container with CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}, \n\t\t\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}, \n\t\t\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not request_or_limit_cpu(container)\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}, \n\t\t\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\t\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n###################################################################################################################\n\n# Fails if pod exceeds CPU-limit or request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload exceeds CPU-limit or request\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n\tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob doas exceeds CPU-limit or request\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_cpu(container)\n \tresource := is_min_max_exceeded_cpu(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\t\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n\n#################################################################################################################3\n\nrequest_or_limit_cpu(container) {\n\tcontainer.resources.limits.cpu\n\tcontainer.resources.requests.cpu\n}\n\n\nis_min_max_exceeded_cpu(container) = \"resources.limits.cpu\" {\n\tcpu_limit := container.resources.limits.cpu\n\tis_limit_exceeded_cpu(cpu_limit)\n} else = \"resouces.requests.cpu\" {\n\tcpu_req := container.resources.requests.cpu\n\tis_req_exceeded_cpu(cpu_req)\n} else = \"\"\n\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_min_limit_exceeded_cpu(cpu_limit)\n}\n\nis_limit_exceeded_cpu(cpu_limit) {\n\tis_max_limit_exceeded_cpu(cpu_limit)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_max_request_exceeded_cpu(cpu_req)\n}\n\nis_req_exceeded_cpu(cpu_req) {\n\tis_min_request_exceeded_cpu(cpu_req)\n}\n\nis_max_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_max := data.postureControlInputs.cpu_limit_max[_]\n\tcompare_max(cpu_limit_max, cpu_limit)\n}\n\nis_min_limit_exceeded_cpu(cpu_limit) {\n\tcpu_limit_min := data.postureControlInputs.cpu_limit_min[_]\n\tcompare_min(cpu_limit_min, cpu_limit) \n}\n\nis_max_request_exceeded_cpu(cpu_req) {\n\tcpu_req_max := data.postureControlInputs.cpu_request_max[_]\n\tcompare_max(cpu_req_max, cpu_req)\n}\n\nis_min_request_exceeded_cpu(cpu_req) {\n\tcpu_req_min := data.postureControlInputs.cpu_request_min[_]\n\tcompare_min(cpu_req_min, cpu_req) \n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.cpu_request_max", - "settings.postureControlInputs.cpu_request_min", - "settings.postureControlInputs.cpu_limit_min", - "settings.postureControlInputs.cpu_limit_max" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.cpu_request_max", - "name": "cpu_request_max", - "description": "Ensure CPU max requests are set" - }, - { - "path": "settings.postureControlInputs.cpu_request_min", - "name": "cpu_request_min", - "description": "Ensure CPU min requests are set" - }, - { - "path": "settings.postureControlInputs.cpu_limit_max", - "name": "cpu_limit_max", - "description": "Ensure CPU max limits are set" - }, - { - "path": "settings.postureControlInputs.cpu_limit_min", - "name": "cpu_limit_min", - "description": "Ensure CPU min limits are set" - } - ], - "description": "CPU limits and requests are not set.", - "remediation": "Ensure CPU limits and requests are set.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "exposed-critical-pods", - "attributes": { - "m$K8sThreatMatrix": "exposed-critical-pods", - "armoBuiltin": true, - "imageScanRelated": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n \n container.image == vuln.metadata.name\n\n # At least one critical vulnerabilities\n filter_critical_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = { \n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_critical_vulnerabilities(vuln) {\n data := vuln.data[_]\n data.severity == \"Critical\"\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = { \n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Service", - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "armo.vuln.images", - "image.vulnscan.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ImageVulnerabilities" - ] - } - ], - "ruleDependencies": null, - "configInputs": null, - "controlConfigInputs": null, - "description": "Fails if pods have exposed services as well as critical vulnerabilities", - "remediation": "The image of the listed pods might have a fix in a newer version. Alternatively, the pod service might not need to be external facing", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "pod-security-admission-restricted-applied", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"restricted\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", - "resourceEnumerator": "package armo_builtins\nimport future.keywords.every\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels \ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable restricted pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot restricted_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\nrestricted_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue == \"restricted\"\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}", "ruleLanguage": "Rego", "match": [ { @@ -17504,425 +30176,25 @@ "*" ], "resources": [ - "ValidatingWebhookConfiguration", "MutatingWebhookConfiguration" ] } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "Checks that every namespace enabled restricted pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root", + "name": "list-all-mutating-webhooks", "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" + "m$K8sThreatMatrix": "Persistence::Validate admission controller" }, "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-list-all-cluster-admins-v1", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users have cluster admin permissions", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-controller-manager-terminated-pod-gc-threshold-argument-is-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--terminated-pod-gc-threshold\")\n\tresult = {\n\t\t\"alert\": \"Please validate that --terminated-pod-gc-threshold is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--terminated-pod-gc-threshold\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"--terminated-pod-gc-threshold flag not set to an appropriate value\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--terminated-pod-gc-threshold=YOUR_VALUE\"}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Activate garbage collector on pod termination, as appropriate.", - "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--terminated-pod-gc-threshold` to an appropriate threshold, for example:\n\n \n```\n--terminated-pod-gc-threshold=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--terminated-pod-gc-threshold` is set to `12500`.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "rule-can-create-modify-pod", - "attributes": { - "m$K8sThreatMatrix": "Execution::New container, Persistence::Backdoor container", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "\npackage armo_builtins\nimport data.cautils as cautils\n\n\n\n# fails if user has create/modify access to pods \n# RoleBinding to Role\ndeny [msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n can_create_modify_to_pod_resource(rule)\n can_create_modify_to_pod_verb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t\"alertMessage\": sprintf(\"The following %v: %v can create/modify workloads\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user has create/modify access to pods \n# RoleBinding to ClusterRole\ndeny [msga]{\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n can_create_modify_to_pod_resource(rule)\n can_create_modify_to_pod_verb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t\"alertMessage\": sprintf(\"The following %v: %v can create/modify workloads\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user has create/modify access to pods \n# ClusterRoleBinding to ClusterRole\ndeny [msga]{\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n can_create_modify_to_pod_resource(rule)\n can_create_modify_to_pod_verb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v can create/modify workloads\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\ncan_create_modify_to_pod_resource(rule){\n cautils.list_contains(rule.resources,\"pods\")\n}\n\ncan_create_modify_to_pod_resource(rule){\n cautils.list_contains(rule.resources,\"deployments\")\n}\n\ncan_create_modify_to_pod_resource(rule){\n cautils.list_contains(rule.resources,\"daemonsets\")\n}\n\ncan_create_modify_to_pod_resource(rule){\n cautils.list_contains(rule.resources,\"replicasets\")\n}\ncan_create_modify_to_pod_resource(rule){\n cautils.list_contains(rule.resources,\"statefulsets\")\n}\ncan_create_modify_to_pod_resource(rule){\n cautils.list_contains(rule.resources,\"jobs\")\n}\ncan_create_modify_to_pod_resource(rule){\n cautils.list_contains(rule.resources,\"cronjobs\")\n}\ncan_create_modify_to_pod_resource(rule){\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\ncan_create_modify_to_pod_verb(rule) {\n cautils.list_contains(rule.verbs, \"create\")\n}\n\ncan_create_modify_to_pod_verb(rule) {\n cautils.list_contains(rule.verbs, \"patch\")\n}\n\ncan_create_modify_to_pod_verb(rule) {\n cautils.list_contains(rule.verbs, \"update\")\n}\n\ncan_create_modify_to_pod_verb(rule) {\n cautils.list_contains(rule.verbs, \"*\")\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "*" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "determines which users have create/modify permissions on pods", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-profiling-argument-is-set-to-false", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled. This could potentially be exploited to uncover system and program details.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--profiling=true\")\n\tfixed = replace(cmd[i], \"--profiling=true\", \"--profiling=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Disable profiling, if not needed.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-admission-control-plugin-NodeRestriction-is-set", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"NodeRestriction is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"NodeRestriction\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"NodeRestriction\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=NodeRestriction\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", - "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NodeRestriction` is not set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "image-pull-policy-is-not-set-to-always", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n is_bad_container(container)\n\tpaths = [sprintf(\"spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpaths = [sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)]), sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].imagePullPolicy\", [format_int(i, 10)])]\n is_bad_container(container)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v has 'latest' tag on image but imagePullPolicy is not set to 'Always'\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": paths,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image tag is latest\nis_bad_container(container){\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, container.image, -1)\n v := version[_]\n img := v[_]\n img == \":latest\"\n not_image_pull_policy(container)\n}\n\n# No image tag or digest (== latest)\nis_bad_container(container){\n not is_tag_image(container.image)\n not_image_pull_policy(container)\n}\n\n# image tag is only letters (== latest)\nis_bad_container(container){\n is_tag_image_only_letters(container.image)\n not_image_pull_policy(container)\n}\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"Never\"\n}\n\n\nnot_image_pull_policy(container) {\n container.imagePullPolicy == \"IfNotPresent\"\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}\n\n# The image has a tag, and contains only letters\nis_tag_image_only_letters(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n\treg1 := \"^:[a-zA-Z]{1,127}$\"\n\tre_match(reg1, img)\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "check imagePullPolicy filed, if imagePullPolicy = always pass, else fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-Kubernetes-PKI-certificate-file-permissions-are-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\tendswith(file.path, \".crt\")\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "ControlPlaneInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that Kubernetes PKI certificate files have permissions of `600` or more restrictive.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod -R 600 /etc/kubernetes/pki/*.crt\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\":\"The SecurityContextDeny addmission controller is not enabled. This could allow for privilege escalation in the cluster\", \n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"SecurityContextDeny\" in flag.values\n\tnot \"PodSecurityPolicy\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"SecurityContextDeny\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=SecurityContextDeny\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```\n\n#### Impact Statement\nThis admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies\n\n#### Default Value\nBy default, `SecurityContextDeny` is not set.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "K8s common labels usage", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tnot wl.metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, beggining_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels\", [beggining_of_path]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.k8sRecommendedLabels" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.k8sRecommendedLabels", - "name": "Kubernetes Recommended Labels", - "description": "Kubescape checks that workloads have at least one of the following kubernetes recommended labels." - } - ], - "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "list-all-validating-webhooks", - "attributes": { - "m$K8sThreatMatrix": "Credential Access::Malicious admission controller", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"\"],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}", + "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -17934,26 +30206,49 @@ "*" ], "resources": [ - "ValidatingWebhookConfiguration" + "MutatingWebhookConfiguration" ] } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "Returns validating webhook configurations to be verified", + "description": "Returns mutating webhook configurations to be verified", "remediation": "Analyze webhook for malicious behavior", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "ensure-that-the-admission-control-plugin-ServiceAccount-is-set", - "attributes": { - "armoBuiltin": true - }, + "name": "psp-deny-allowed-capabilities", "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"ServiceAccount\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"ServiceAccount\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs have allowedCapabilities\n\t# if even one PSP has allowedCapabilities as an empty list, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tcount(psp.spec.allowedCapabilities) > 0\n\t}\n\n\t# return al the PSPs that have allowedCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tcount(psp.spec.allowedCapabilities) > 0\n\n\tpath := \"spec.allowedCapabilities\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowedCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "k8s-audit-logs-enabled-native-cis", + "creationTime": "", + "rule": "package armo_builtins\n\n# CIS 3.2.1 https://workbench.cisecurity.org/sections/1126657/recommendations/1838582\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server(obj)\n\tcmd := obj.spec.containers[0].command\n\taudit_policy := [command | command := cmd[_]; contains(command, \"--audit-policy-file=\")]\n\tcount(audit_policy) < 1\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", "ruleLanguage": "Rego", "match": [ @@ -17970,21 +30265,109 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "Automate service accounts management.", - "remediation": "Follow the documentation and create `ServiceAccount` objects as per your environment. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and ensure that the `--disable-admission-plugins` parameter is set to a value that does not include `ServiceAccount`.\n\n#### Impact Statement\nNone.\n\n#### Default Value\nBy default, `ServiceAccount` is set.", + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "remediation": "Create an audit policy file for your cluster.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "non-root-containers", + "creationTime": "", + "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tstart_of_path := \"spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, pod, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, pod, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec\"\n\trun_as_user_fixpath := evaluate_workload_run_as_user(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\trun_as_group_fixpath := evaluate_workload_run_as_group(container, wl.spec.jobTemplate.spec.template, start_of_path)\n\tall_fixpaths := array.concat(run_as_user_fixpath, run_as_group_fixpath)\n\tcount(all_fixpaths) > 0\n\tfixPaths := get_fixed_paths(all_fixpaths, i)\n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n \"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nget_fixed_paths(all_fixpaths, i) = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}, {\"path\":replace(all_fixpaths[1].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[1].value}]{\n\tcount(all_fixpaths) == 2\n} else = [{\"path\":replace(all_fixpaths[0].path,\"container_ndx\",format_int(i,10)), \"value\":all_fixpaths[0].value}] \n\n#################################################################################\n# Workload evaluation \n\n# if runAsUser is set to 0 and runAsNonRoot is set to false/ not set - suggest to set runAsUser to 1000\n# if runAsUser is not set and runAsNonRoot is set to false/ not set - suggest to set runAsNonRoot to true\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_user(container, pod, start_of_path) = fixPath {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, start_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, start_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n\tfixPath := alertInfo.fixPath\n} else = [] \n\n\n# if runAsGroup is set to 0/ not set - suggest to set runAsGroup to 1000\n# all checks are both on the pod and the container level\nevaluate_workload_run_as_group(container, pod, start_of_path) = fixPath {\t\n\trunAsGroupValue := get_run_as_group_value(container, pod, start_of_path)\n\trunAsGroupValue.value == 0\n\n\tfixPath := runAsGroupValue.fixPath\n} else = []\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, start_of_path) = runAsNonRoot {\n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = runAsNonRoot {\n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}], \"defined\" : true}\n} else = {\"value\" : false, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]) , \"value\":\"true\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, start_of_path) = runAsUser {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = runAsUser {\n\tpath := sprintf(\"%v.securityContext.runAsUser\", [start_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [start_of_path]), \"value\":\"true\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, start_of_path) = runAsGroup {\n\tpath := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"fixPath\": [{\"path\": path, \"value\": \"1000\"}],\"defined\" : true}\n} else = runAsGroup {\n\tpath := sprintf(\"%v.securityContext.runAsGroup\", [start_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"fixPath\":[{\"path\": path, \"value\": \"1000\"}], \"defined\" : true}\n} else = {\"value\" : 0, \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [start_of_path]), \"value\":\"1000\"}],\n \t\"defined\" : false\n}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if container can run as root", + "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id over 0, or the runAsNonRoot flag is set to true.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-controller-manager-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the controller manager pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "more-than-one-replicas", + "name": "cluster-admin-role", "attributes": { - "armoBuiltin": true + "useFromKubescapeVersion": "v1.0.133", + "resourcesAggregator": "subject-role-rolebinding" }, "creationTime": "", - "rule": "package armo_builtins\n\n\n# Fails if workload has only one replica\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"StatefulSet\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec\n result := replicas_one_or_less(spec)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v: %v has only one replica\", [ wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nreplicas_one_or_less(spec) = [failed_path, fixPath] {\n\tnot spec.replicas\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"spec.replicas\", \"value\": \"YOUR_VALUE\"}\n}\n\nreplicas_one_or_less(spec) = [failed_path, fixPath] {\n\tspec.replicas == 1\n\tfailed_path = \"spec.replicas\"\n\tfixPath = \"\"\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin role\n# regal ignore:rule-length\ndeny[msga] {\n\tsubjectVector := input[_]\n\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\t# check only cluster-admin role and only clusterrolebinding\n\trole.metadata.name == \"cluster-admin\"\n\trolebinding.kind == \"ClusterRoleBinding\"\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s is bound to cluster-admin role\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -17996,102 +30379,117 @@ "*" ], "resources": [ - "Deployment", - "ReplicaSet", - "StatefulSet" + "Role", + "ClusterRole", + "ClusterRoleBinding" ] } ], - "ruleDependencies": [], - "configInputs": null, + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], "controlConfigInputs": null, - "description": "Replicas are set to one.", - "remediation": "Ensure replicas field is set and value is bigger than one.", + "description": "determines which users have cluster admin permissions", + "remediation": "", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "psp-enabled-cloud", - "attributes": { - "armoBuiltin": true - }, + "name": "resource-policies", "creationTime": "", - "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}", + "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tstart_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, start_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tstart_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, start_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, start_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, start_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, start_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, start_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, start_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [start_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ { - "apiGroups": [], - "apiVersions": [], - "resources": [] + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] } ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if namespace has no resource policies defined", + "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-scheduler.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], "dynamicMatch": [ { "apiGroups": [ - "container.googleapis.com", - "eks.amazonaws.com" + "hostdata.kubescape.cloud" ], "apiVersions": [ - "v1" + "v1beta0" ], "resources": [ - "ClusterDescribe" + "ControlPlaneInfo" ] } ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": [ - "EKS", - "GKE" - ] - }, - { - "guid": "", - "name": "etcd-client-auth-cert", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# Check if --client-cert-auth is set to true\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd server is not requiring a valid client certificate\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--client-cert-auth\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--client-cert-auth=true\",\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--client-cert-auth=false\")\n\tfixed = replace(cmd[i], \"--client-cert-auth=false\", \"--client-cert-auth=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n", - "ruleLanguage": "Rego", - "match": [ + "ruleDependencies": [ { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] + "packageName": "cautils" } ], - "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "Enable client authentication on etcd service.", - "remediation": "Edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--client-cert-auth=\"true\"\n\n```\n\n#### Impact Statement\nAll clients attempting to access the etcd server will require a valid client certificate.\n\n#### Default Value\nBy default, the etcd service can be queried by unauthenticated clients.", - "ruleQuery": "armo_builtins", + "description": "Ensure that the `scheduler.conf` file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/scheduler.conf\n\n```", + "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "drop-capability-netraw", - "attributes": { - "armoBuiltin": true - }, + "name": "serviceaccount-token-mount", "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not drop the capability NET_RAW \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %s does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if workload does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if CronJob does not drop the capability NET_RAW\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\n\tpath_to_search := [\"securityContext\", \"capabilities\"]\n\tresult := container_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search)\n\tfailedPaths := get_failed_path(result)\n fixPaths := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not drop the capability NET_RAW\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Checks if workload does not drop the capability NET_RAW\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tnot \"NET_RAW\" in drop_list\n\tnot \"ALL\" in drop_list\n\tnot \"all\" in drop_list\n\tfixpath := sprintf(\"%s[%d].%s[%d]\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_drop), count(drop_list)])\n\tfix_path := [{\"path\": fixpath, \"value\": \"NET_RAW\"}]\n\tfailed_path := \"\"\n}\n\n# Checks if workload drops all capabilities but adds NET_RAW capability\ncontainer_doesnt_drop_NET_RAW(container, i, path_to_containers, path_to_search) = [failed_path, fix_path] {\n\tpath_to_drop := array.concat(path_to_search, [\"drop\"])\n\tdrop_list := object.get(container, path_to_drop, [])\n\tall_in_list(drop_list)\n\tpath_to_add := array.concat(path_to_search, [\"add\"])\n\tadd_list := object.get(container, path_to_add, [])\n\t\"NET_RAW\" in add_list\n\tfailed_path := [sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_add)])]\n\tfix_path := \"\"\n}\n\nall_in_list(list) {\n\t\"all\" in list\n}\n\nall_in_list(list) {\n\t\"ALL\" in list\n}\n\n\nget_failed_path(paths) = paths[0] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = paths[1] {\n\tpaths[1] != \"\"\n} else = []\n\n", - "resourceEnumerator": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n spec := object.get(wl, start_of_path, [])\n\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(spec, sa.metadata.name)\n is_same_namespace(sa.metadata , wl.metadata)\n has_service_account_binding(sa)\n result := is_sa_auto_mounted_and_bound(spec, start_of_path, sa)\n\n failed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"fixPaths\": fixed_path,\n \"reviewPaths\": failed_path,\n \"failedPaths\": failed_path,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa\n }]\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted_and_bound(spec, start_of_path, sa) = [failed_path, fix_path] {\n # automountServiceAccountToken not in pod spec\n not spec.automountServiceAccountToken == false\n not spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n\n fix_path = { \"path\": sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", start_of_path)]), \"value\": \"false\"}\n failed_path = \"\"\n}\n\nis_sa_auto_mounted_and_bound(spec, start_of_path, sa) = [failed_path, fix_path] {\n # automountServiceAccountToken set to true in pod spec\n spec.automountServiceAccountToken == true\n\n failed_path = sprintf(\"%v.automountServiceAccountToken\", [concat(\".\", start_of_path)])\n fix_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n paths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n paths[1] != \"\"\n} else = []\n\n\nis_same_sa(spec, serviceAccountName) {\n spec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n not spec.serviceAccountName \n serviceAccountName == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the given ServiceAccount\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == service_account.metadata.name\n role_binding.subjects[_].namespace == service_account.metadata.namespace\n role_binding.subjects[_].kind == \"ServiceAccount\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the system:authenticated group\n# which gives access to all authenticated users, including service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:authenticated\"\n}\n\n# checks if RoleBinding/ClusterRoleBinding has a bind with the \"system:serviceaccounts\" group\n# which gives access to all service accounts\nhas_service_account_binding(service_account) {\n role_bindings := [role_binding | role_binding = input[_]; endswith(role_binding.kind, \"Binding\")]\n role_binding := role_bindings[_]\n role_binding.subjects[_].name == \"system:serviceaccounts\"\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}", "ruleLanguage": "Rego", "match": [ { @@ -18102,7 +30500,20 @@ "v1" ], "resources": [ - "Pod" + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" ] }, { @@ -18133,67 +30544,9 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "fails if container does not drop the capability NET_RAW", - "remediation": "Define the drop list in security context capabilities to include NET_RAW.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "configured-liveness-probe", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Liveness probe is not configured", - "remediation": "Ensure Liveness probe is configured", + "description": "fails if service account and workloads mount service account token by default", + "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, @@ -18201,11 +30554,10 @@ "guid": "", "name": "kubelet-protect-kernel-defaults", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n#CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.6 https://workbench.cisecurity.org/sections/1126668/recommendations/1838648\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--protect-kernel-defaults=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.protectKernelDefaults == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Property protectKernelDefaults is not set to true\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"protectKernelDefaults\"],\n\t\t\"failedPaths\": [\"protectKernelDefaults\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --protect-kernel-defaults is not set to true.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--protect-kernel-defaults\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -18229,7 +30581,6 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "Determines if the --protect-kernel-defaults argument is set to true.", "remediation": "Set --protect-kernel-defaults to true or if using a config file set the protectKernelDefaults as true", @@ -18238,118 +30589,42 @@ }, { "guid": "", - "name": "insecure-capabilities", - "attributes": { - "armoBuiltin": true - }, + "name": "ensure-azure-rbac-is-set", "creationTime": "", - "rule": "package armo_builtins\nimport data\nimport data.cautils as cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, beggining_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}", + "rule": "package armo_builtins\n\n# fails in case Azure RBAC is not set on AKS instance.\ndeny[msga] {\n \tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot isAzureRBACEnabled(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Azure RBAC is not set. Enable it using the command: az aks update -g -n --enable-azure-rbac\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"az aks update -g -n --enable-azure-rbac\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": cluster_describe\n\t\t},\n\t} \n}\n\n# isAzureRBACEnabled check if Azure RBAC is enabled into ClusterDescribe object\n# retrieved from azure cli.\nisAzureRBACEnabled(properties) {\n properties.aadProfile.enableAzureRBAC == true\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", - "match": [ + "match": null, + "dynamicMatch": [ { "apiGroups": [ - "" + "management.azure.com" ], "apiVersions": [ "v1" ], "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" + "ClusterDescribe" ] } ], "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.insecureCapabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.insecureCapabilities", - "name": "Insecure capabilities", - "description": "You can see the list of capabilities in https://man7.org/linux/man-pages/man7/capabilities.7.html. Kubescape looks for the following capabilities in containers which might lead to attackers getting high privileges in your system." - } - ], - "description": "fails if container has insecure capabilities", - "remediation": "Remove all insecure capabilities which aren’t necessary for the container.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "exec-into-container", - "attributes": { - "m$K8sThreatMatrix": "Privilege Escalation::Exec into container", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" - }, - "creationTime": "", - "rule": "\npackage armo_builtins\nimport data.cautils as cautils\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1 \n# returns subjects that can exec into container\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n \tsubject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1 \n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1 \n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/exec\")\n\t\n}\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/*\")\n}\ncan_exec_to_pod_resource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "rbac.authorization.k8s.io" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "RoleBinding", - "ClusterRoleBinding", - "Role", - "ClusterRole" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, "controlConfigInputs": null, - "description": "determines which users have permissions to exec into pods", - "remediation": "", + "description": "Azure role-based access control (RBAC) is an authorization system built on Azure Resource Manager that provides fine-grained access management of Azure resources.", + "remediation": "Enable Azure RBAC on AKS by using the following command: az aks update -g -n --enable-azure-rbac", "ruleQuery": "armo_builtins", - "relevantCloudProviders": null + "relevantCloudProviders": [ + "AKS" + ] }, { "guid": "", - "name": "ensure-that-the-etcd-data-directory-ownership-is-set-to-etcd-etcd", + "name": "ensure-that-the-scheduler.conf-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdDataDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [], @@ -18371,136 +30646,60 @@ "packageName": "cautils" } ], - "configInputs": null, "controlConfigInputs": null, - "description": "Ensure that the etcd data directory ownership is set to `etcd:etcd`.", - "remediation": "On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command:\n\n \n```\nps -ef | grep etcd\n\n```\n Run the below command (based on the etcd data directory found above). For example,\n\n \n```\nchown etcd:etcd /var/lib/etcd\n\n```", + "description": "Ensure that the `scheduler.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/scheduler.conf\n\n```", "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "list-all-mutating-webhooks", - "attributes": { - "armoBuiltin": true, - "m$K8sThreatMatrix": "Persistence::Malicious admission controller" - }, + "name": "psp-required-drop-capabilities", "creationTime": "", - "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs don't have requiredDropCapabilities\n\t# if even one PSP has requiredDropCapabilities, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot has_requiredDropCapabilities(psp.spec)\n\t}\n\n\t# return al the PSPs that don't have requiredDropCapabilities\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot has_requiredDropCapabilities(psp.spec)\n\n\tfixpath := {\"path\":\"spec.requiredDropCapabilities[0]\", \"value\":\"ALL\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' doesn't have requiredDropCapabilities.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\nhas_requiredDropCapabilities(spec) {\n\tcount(spec.requiredDropCapabilities) > 0\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ { "apiGroups": [ - "admissionregistration.k8s.io" + "policy" ], "apiVersions": [ - "*" + "v1beta1" ], "resources": [ - "MutatingWebhookConfiguration" + "PodSecurityPolicy" ] } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "Returns mutating webhook configurations to be verified", - "remediation": "Analyze webhook for malicious behavior", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "excessive_amount_of_vulnerabilities_pods", - "attributes": { - "microsoftK8sThreatMatrix": "Initial access::Exposed critical vulnerable pods", - "armoBuiltin": true, - "useFromKubescapeVersion": "v1.0.133", - "imageScanRelated": true - }, - "creationTime": "", - "rule": "package armo_builtins\nimport data\n\ndeny[msga] {\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n \n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n # Has ^ amount of vulnerabilities\n check_num_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \t\"name\": pod.metadata.name,\n \t\"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \t\"apiVersion\": \"result.vulnscan.com/v1\",\n \t\"kind\": pod.kind,\n \t\"metadata\": metadata,\n \t\"relatedObjects\": related_objects\n }\n\n msga := {\n \t\"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [path],\n \t\"fixPaths\": [],\n \t\"alertObject\": {\n \"externalObjects\": external_objects\n \t}\n }\n}\n\ncheck_num_vulnerabilities(vuln) {\n exists := count([ x | x = vuln.data[_]; x.severity == \"Critical\" ])\n\n str_max := data.postureControlInputs.max_critical_vulnerabilities[_]\n exists > to_number(str_max)\n}\n\ncheck_num_vulnerabilities(vuln) {\n exists := count([ x | x = vuln.data[_]; x.severity == \"High\" ])\n \n str_max := data.postureControlInputs.max_high_vulnerabilities[_]\n exists > to_number(str_max)\n}", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n \n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \t\"name\": pod.metadata.name,\n \t\"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \t\"apiVersion\": \"result.vulnscan.com/v1\",\n \t\"kind\": pod.kind,\n \t\"metadata\": metadata,\n \t\"relatedObjects\": related_objects\n }\n\n msga := {\n \t\"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [path],\n \t\"fixPaths\": [],\n \t\"alertObject\": {\n \"externalObjects\": external_objects\n \t}\n }\n}", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "dynamicMatch": [ - { - "apiGroups": [ - "armo.vuln.images", - "image.vulnscan.com" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "ImageVulnerabilities" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "kubernetes.api.client" - } - ], - "configInputs": [ - "settings.postureControlInputs.max_critical_vulnerabilities", - "settings.postureControlInputs.max_high_vulnerabilities" - ], - "controlConfigInputs": [ - { - "path": "settings.postureControlInputs.max_critical_vulnerabilities", - "name": "Max critical vulnerabilities", - "description": "Maximum amount of allowed critical risk vulnerabilities" - }, - { - "path": "settings.postureControlInputs.max_high_vulnerabilities", - "name": "Max high vulnerabilities", - "description": "Maximum amount of allowed high risk vulnerabilities" - } - ], - "description": "determines which users have permissions to exec into pods", + "description": "", "remediation": "", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "rule-can-delete-logs", + "name": "ensure-that-the-controller-manager.conf-file-ownership-is-set-to-root-root", "attributes": { - "microsoftK8sThreatMatrix": "Defense Evasion::Clear container logs", - "armoBuiltin": true, - "useUntilKubescapeVersion": "v1.0.133" + "hostSensorRule": "true" }, "creationTime": "", - "rule": "package armo_builtins\nimport data.cautils as cautils\n\n\n\n# fails if user can delete logs of pod \n#RoleBinding to Role\ndeny [msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteLogs(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n \"alertMessage\": sprintf(\"The following %v: %v can delete logs\", [subject.kind, subject.name]),\n \"alertScore\": 6,\n \"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [role,rolebinding],\n\t\t\t \"externalObjects\": {\n\t\t\t\t \"subject\" : [subject]\n\t\t\t }\n }\n }\n\n}\n\n\n# fails if user can delete logs of pod \n# RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteLogs(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n \"alertMessage\": sprintf(\"The following %v: %v can delete logs\", [subject.kind, subject.name]),\n \"alertScore\": 6,\n \"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [role,rolebinding],\n \"externalObjects\": {\n \"subject\" : [subject]\n }\n }\n }\n}\n\n# fails if user can delete logs of pod \n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteLogs(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n \"alertMessage\": sprintf(\"The following %v: %v can delete logs\", [subject.kind, subject.name]),\n \"alertScore\": 6,\n \"failedPaths\": [path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [role,clusterrolebinding],\n \"externalObjects\": {\n \"subject\" : [subject]\n }\n }\n }\n}\n\n\n\n\ncanDeleteLogs(rule) {\n cautils.list_contains(rule.resources,\"*\")\n is_api_group(rule)\n cautils.list_contains(rule.verbs,\"*\")\n}\n\ncanDeleteLogs(rule) {\n cautils.list_contains(rule.resources,\"pods/log\")\n cautils.list_contains(rule.verbs,\"delete\")\n}\ncanDeleteLogs(rule) {\n cautils.list_contains(rule.resources,\"pods/log\")\n cautils.list_contains(rule.verbs,\"*\")\n}\n\ncanDeleteLogs(rule) {\n cautils.list_contains(rule.resources,\"*\")\n is_api_group(rule)\n cautils.list_contains(rule.verbs,\"delete\")\n}\n\ncanDeleteLogs(rule) {\n cautils.list_contains(rule.resources,\"pods/*\")\n cautils.list_contains(rule.verbs,\"delete\")\n}\ncanDeleteLogs(rule) {\n cautils.list_contains(rule.resources,\"pods/*\")\n cautils.list_contains(rule.verbs,\"*\")\n}\n\ncanDeleteLogs(rule) {\n cautils.list_contains(rule.resources,\"*\")\n is_api_group(rule)\n cautils.list_contains(rule.verbs,\"deletecollection\")\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", - "match": [ + "match": [], + "dynamicMatch": [ { "apiGroups": [ - "*" + "hostdata.kubescape.cloud" ], "apiVersions": [ - "*" + "v1beta0" ], "resources": [ - "Role", - "ClusterRole", - "ClusterRoleBinding", - "RoleBinding" + "ControlPlaneInfo" ] } ], @@ -18509,135 +30708,17 @@ "packageName": "cautils" } ], - "configInputs": null, "controlConfigInputs": null, - "description": "determines which users can delete logs inside a container", - "remediation": "", - "ruleQuery": "armo_builtins", + "description": "Ensure that the `controller-manager.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/controller-manager.conf\n\n```", + "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "immutable-container-filesystem", - "attributes": { - "armoBuiltin": true - }, + "name": "ensure-that-the-api-server-profiling-argument-is-set-to-false", "creationTime": "", - "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if container has mutable filesystem", - "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "set-seccomp-profile-RuntimeDefault", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n wl_spec := wl.spec\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl_spec := wl.spec.template.spec\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n wl_spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# container definition takes precedence\nget_seccompProfile_definition(wl, container, i, path_to_containers, path_to_search) = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type == \"RuntimeDefault\"\n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type != \"RuntimeDefault\"\n failed_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type == \"RuntimeDefault\" \n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type != \"RuntimeDefault\" \n\tfailed_path := sprintf(\"%s.%s\", [trim_suffix(concat(\".\", path_to_containers), \".containers\"), concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result{\n\tfix_path := [{\"path\": sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]), \"value\":\"RuntimeDefault\"}]\n\tseccompProfile_result := {\"failed\": true, \"failed_path\": [], \"fix_path\": fix_path}\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "fails if container does not define seccompProfile as RuntimeDefault", - "remediation": "Make sure you define seccompProfile as RuntimeDefault at workload or container lever.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"certificate based kubelet authentication is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t\"--kubelet-client-certificate\",\n\t\t\"--kubelet-client-key\",\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=\", [wanted[i]]),\n\t} |\n\t\twanted[i]\n\t\tnot contains(full_cmd, wanted[i])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"profiling is enabled. This could potentially be exploited to uncover system and program details.\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--profiling=true\")\n\tfixed = replace(cmd[i], \"--profiling=true\", \"--profiling=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--profiling\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--profiling=false\",\n\t\t}],\n\t}\n}\n", "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", "ruleLanguage": "Rego", "match": [ @@ -18654,20 +30735,249 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "Enable certificate based kubelet authentication.", - "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, certificate-based kubelet authentication is not set.", + "description": "Disable profiling, if not needed.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--profiling=false\n\n```\n\n#### Impact Statement\nProfiling information would not be available.\n\n#### Default Value\nBy default, profiling is enabled.", "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "configured-readiness-probe", + "name": "ingress-and-egress-blocked", + "creationTime": "", + "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tpod := pods[_]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n\tgoodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) > 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) < 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) > 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if there are no ingress and egress defined for pod", + "remediation": "Make sure you define ingress and egress policies for all your Pods", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "psp-deny-allowprivilegeescalation", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have allowPrivilegeEscalation set to true\n\t# if even one PSP has allowPrivilegeEscalation set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.allowPrivilegeEscalation == true\n\t}\n\n\t# return al the PSPs that have allowPrivilegeEscalation set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.allowPrivilegeEscalation == true\n\n\tpath := \"spec.allowPrivilegeEscalation\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has allowPrivilegeEscalation set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "CVE-2022-39328", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tclean_image := replace(image,\"-ubuntu\",\"\")\n\tversion := split(clean_image, \":\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 9\n\tminorVersion == 2\n\tsubVersion < 4\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-controller-manager-pod-specification-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true + "hostSensorRule": "true" }, "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the controller manager pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-API-Server-only-makes-use-of-Strong-Cryptographic-Ciphers", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\twanted = [\n\t\t\"TLS_AES_128_GCM_SHA256\",\n\t\t\"TLS_AES_256_GCM_SHA384\",\n\t\t\"TLS_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\",\n\t\t\"TLS_RSA_WITH_3DES_EDE_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_RSA_WITH_AES_256_CBC_SHA\",\n\t\t\"TLS_RSA_WITH_AES_256_GCM_SHA384\",\n\t]\n\tresult = invalid_flag(obj.spec.containers[0].command, wanted)\n\tmsg := {\n\t\t\"alertMessage\": \"The API server is not configured to use strong cryptographic ciphers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--tls-cipher-suites=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd, wanted) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tmissing = [x | x = wanted[_]; not x in flag.values]\n\tcount(missing) > 0\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, missing)\n\tfixed_flag = sprintf(\"%s=%s\", [\"--tls-cipher-suites\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd, wanted) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--tls-cipher-suites\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--tls-cipher-suites=%s\", [concat(\",\", wanted)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Ensure that the API server is configured to only use strong cryptographic ciphers.", + "remediation": "Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml on the Control Plane node and set the below parameter.\n\n \n```\n--tls-cipher-suites=TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.\n\n```\n\n#### Impact Statement\nAPI server clients that cannot support modern cryptographic ciphers will not be able to make connections to the API server.\n\n#### Default Value\nBy default the Kubernetes API server supports a wide range of TLS ciphers", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "set-supplementalgroups-values", + "creationTime": "", + "rule": "package armo_builtins\n\n_deny_supplemental_groups_msg(kind_label, obj, groups, path) = msga {\n\t# regal ignore: use-in-operator\n\tgroups[_] == 0\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v uses disallowed supplemental group '0'\", [kind_label, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [{\"path\": path, \"value\": \"REMOVE_GROUP_0\"}],\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n### POD ###\n\n# Fails if securityContext.supplementalGroups contains the root group (0)\ndeny[msga] {\n\t# verify the object kind\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tgroups := pod.spec.securityContext.supplementalGroups\n\tpath := \"spec.securityContext.supplementalGroups\"\n\tmsga := _deny_supplemental_groups_msg(\"Pod\", pod, groups, path)\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.supplementalGroups contains the root group (0)\ndeny[msga] {\n\t# verify the object kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\tgroups := wl.spec.template.spec.securityContext.supplementalGroups\n\tpath := \"spec.template.spec.securityContext.supplementalGroups\"\n\tmsga := _deny_supplemental_groups_msg(\"Workload\", wl, groups, path)\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.supplementalGroups contains the root group (0)\ndeny[msga] {\n\t# verify the object kind\n\tcj := input[_]\n\tcj.kind == \"CronJob\"\n\n\tgroups := cj.spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\n\tpath := \"spec.jobTemplate.spec.template.spec.securityContext.supplementalGroups\"\n\tmsga := _deny_supplemental_groups_msg(\"CronJob\", cj, groups, path)\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Fails if securityContext.supplementalgroups is not set.", + "remediation": "Set securityContext.supplementalgroups values", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "configured-readiness-probe", + "creationTime": "", "rule": "package armo_builtins\n\n\n# Fails if pod does not have container with readinessProbe\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have readinessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with readinessProbe\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with readinessProbe\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.readinessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].readinessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have readinessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", @@ -18711,7 +31021,6 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "Readiness probe is not configured", "remediation": "Ensure Readiness probe is configured", @@ -18720,15 +31029,54 @@ }, { "guid": "", - "name": "CVE-2022-39328", - "attributes": { - "armoBuiltin": true - }, + "name": "endpoints-in-default-namespace", "creationTime": "", - "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tclean_image := replace(image,\"-ubuntu\",\"\")\n\tversion := split(clean_image, \":\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 9\n\tminorVersion == 2\n\tsubVersion < 4\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"grafana:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-39328\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Endpoints" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-identify-blocklisted-image-registries", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry", + "useUntilKubescapeVersion": "v2.3.8" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n# Check for images from blocklisted repos\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\tregistry := untrusted_registries[_]\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\tregistry := public_registries[_]\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) = result {\n not contains(image, \"/\")\n result := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, { "apiGroups": [ "apps" @@ -18737,27 +31085,304 @@ "v1" ], "resources": [ - "Deployment" + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.publicRegistries", + "name": "Public registries", + "description": "Kubescape checks none of these public container registries are in use." + }, + { + "path": "settings.postureControlInputs.untrustedRegistries", + "name": "Registries block list", + "description": "Kubescape checks none of these user-provided container registries are in use." + } + ], + "description": "Identifying if pod container images are from unallowed registries", + "remediation": "Use images from safe registry", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-can-ssh-to-pod-v1", + "attributes": { + "microsoftK8sThreatMatrix": "Execution::SSH server running inside container", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Service" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" ] } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "a", - "remediation": "a", + "description": "denies pods with SSH ports opened(22/222)", + "remediation": "", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "ensure-that-the-scheduler-pod-specification-file-ownership-is-set-to-root-root", + "name": "insecure-capabilities", + "creationTime": "", + "rule": "package armo_builtins\n\nimport data.cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, start_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": result,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, start_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) > 0\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.insecureCapabilities", + "name": "Insecure capabilities", + "description": "Kubescape looks for these capabilities in containers, which might lead to attackers getting elevated privileges in your cluster. You can see the full list of possible capabilities at https://man7.org/linux/man-pages/man7/capabilities.7.html." + } + ], + "description": "fails if container has insecure capabilities", + "remediation": "Remove all insecure capabilities which aren’t necessary for the container.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "pod-security-admission-applied-1", + "creationTime": "", + "rule": "package armo_builtins\nimport future.keywords.every\n\n# Fails if no 3rd party security admission exists and namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot admission_policy_enabled(namespace)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"YOUR_VALUE\"}\n \n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nadmission_policy_enabled(namespace){\n\tsome label, _ in namespace.metadata.labels \n startswith(label, \"pod-security.kubernetes.io/enforce\")\n}\n\nhas_external_policy_control(inp){\n admissionwebhook := inp[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Checks that every namespace enabled pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks)", + "remediation": "Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads.\n\n#### Impact Statement\nWhere policy control systems are in place, there is a risk that workloads required for the operation of the cluster may be stopped from running. Care is required when implementing admission control policies to ensure that this does not occur.\n\n#### Default Value\nBy default, Pod Security Admission is enabled but no policies are in place.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "kubelet-event-qps", "attributes": { - "armoBuiltin": true, "hostSensorRule": "true" }, "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.9 https://workbench.cisecurity.org/sections/1126668/recommendations/1838656\n\n# if --event-qps is present rule should pass\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.eventRecordQPS == 0\n\n\tmsga := {\n\t\t\"alertMessage\": \"Value of the eventRecordQPS argument is set to 0\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [\"eventRecordQPS\"],\n\t\t\"failedPaths\": [\"eventRecordQPS\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\t# \"--event-qps\" is DEPRECATED\n\t# not contains(command, \"--event-qps\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture.", + "remediation": "Set --event-qps argument to appropiate level or if using a config file set the eventRecordQPS property to the value other than 0", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-can-create-pv", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to persistent volumes\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"persistentvolumes\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create persistent volumes\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines which users can create persistent volumes", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ingress-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Ingress" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-Kubernetes-PKI-directory-and-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"PKIDir\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [], @@ -18779,23 +31404,458 @@ "packageName": "cautils" } ], - "configInputs": null, "controlConfigInputs": null, - "description": "Ensure that the scheduler pod specification file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "description": "Ensure that the Kubernetes PKI directory and file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown -R root:root /etc/kubernetes/pki/\n\n```", "ruleQuery": "", "relevantCloudProviders": null }, { "guid": "", - "name": "list-all-namespaces", + "name": "psp-deny-hostpid", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostPID set to true\n\t# if even one PSP has hostPID set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostPID == true\n\t}\n\n\t# return al the PSPs that have hostPID set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostPID == true\n\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostPID set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "serviceaccount-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ServiceAccount" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-kube-proxy-metrics-service-is-bound-to-localhost", "attributes": { "armoBuiltin": true }, "creationTime": "", - "rule": "package armo_builtins\n\n# returns all namespace objects in cluster\ndeny[msga] {\n\tnamespace = input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"review the following namespace: %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.3.1 - Ensure that the kube-proxy metrics service is bound to localhost\n\n# Deny if metricsBindAddress is exposed on all interfaces (0.0.0.0)\ndeny[msga] {\n\tconfigmap := input[_]\n\tis_kube_proxy_configmap(configmap)\n\n\tconfig_data := get_config_data(configmap)\n\tconfig := yaml.unmarshal(config_data)\n\n\tmetrics_address := config.metricsBindAddress\n\n\t# Fail if bound to all interfaces\n\tstartswith(metrics_address, \"0.0.0.0\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"kube-proxy metrics service is bound to all interfaces (%s) instead of localhost\", [metrics_address]),\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"data\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [configmap]},\n\t}\n}\n\n# Deny if metricsBindAddress is missing (may default to 0.0.0.0)\ndeny[msga] {\n\tconfigmap := input[_]\n\tis_kube_proxy_configmap(configmap)\n\n\tconfig_data := get_config_data(configmap)\n\tconfig := yaml.unmarshal(config_data)\n\n\tnot config.metricsBindAddress\n\n\tmsga := {\n\t\t\"alertMessage\": \"kube-proxy metrics service binding address is not configured (may default to all interfaces)\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"data\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [configmap]},\n\t}\n}\n\n# Deny if metricsBindAddress is empty string\ndeny[msga] {\n\tconfigmap := input[_]\n\tis_kube_proxy_configmap(configmap)\n\n\tconfig_data := get_config_data(configmap)\n\tconfig := yaml.unmarshal(config_data)\n\n\tconfig.metricsBindAddress == \"\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"kube-proxy metrics service binding address is empty (may default to all interfaces)\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"data\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [configmap]},\n\t}\n}\n\n# Helper: Check if this is the kube-proxy ConfigMap\nis_kube_proxy_configmap(configmap) {\n\tconfigmap.kind == \"ConfigMap\"\n\tconfigmap.metadata.name == \"kube-proxy\"\n\tconfigmap.metadata.namespace == \"kube-system\"\n}\n\n# Helper: Get config data from ConfigMap (try different field names)\nget_config_data(configmap) := data {\n\tdata := configmap.data[\"config.conf\"]\n}\n\nget_config_data(configmap) := data {\n\tnot configmap.data[\"config.conf\"]\n\tdata := configmap.data[\"kubeconfig.conf\"]\n}\n\nget_config_data(configmap) := data {\n\tnot configmap.data[\"config.conf\"]\n\tnot configmap.data[\"kubeconfig.conf\"]\n\tdata := configmap.data[\"config\"]\n}\n", + "resourceEnumerator": "package armo_builtins\n\n# Filter to identify kube-proxy ConfigMap in kube-system namespace\ndeny[msga] {\n\tconfigmap := input[_]\n\tconfigmap.kind == \"ConfigMap\"\n\tconfigmap.metadata.name == \"kube-proxy\"\n\tconfigmap.metadata.namespace == \"kube-system\"\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [configmap]},\n\t}\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Do not bind the kube-proxy metrics port to non-loopback addresses. The kube-proxy metrics service exposes internal cluster metrics that could provide information useful to an attacker.", + "remediation": "If running kube-proxy with a configuration file, edit the kube-proxy configuration file and set the metricsBindAddress to `127.0.0.1:10249`.\n\nIf running kube-proxy with command line arguments, set `--metrics-bind-address=127.0.0.1:10249`.\n\nRestart kube-proxy for changes to take effect.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "alert-rw-hostpath", + "attributes": { + "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.\"\n\tfix_path := is_rw_mount(volume_mount, start_of_path, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.template.spec.\"\n\tfix_path := is_rw_mount(volume_mount, start_of_path, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\n\t}\n}\n\n# handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfix_path := is_rw_mount(volume_mount, start_of_path, i, k) \n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": [fix_path],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_rw_mount(mount, start_of_path, i, k) = fix_path {\n\tnot mount.readOnly == true\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [start_of_path, i, k]), \"value\":\"true\"}\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + }, + { + "packageName": "kubernetes.api.client" + } + ], + "controlConfigInputs": null, + "description": "determines if any workload contains a hostPath volume with rw permissions", + "remediation": "Set the readOnly field of the mount to true", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "excessive_amount_of_vulnerabilities_pods", + "attributes": { + "microsoftK8sThreatMatrix": "Initial access::Exposed critical vulnerable pods", + "useFromKubescapeVersion": "v1.0.133", + "imageScanRelated": true + }, + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n # Has ^ amount of vulnerabilities\n check_num_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \t\"name\": pod.metadata.name,\n \t\"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \t\"apiVersion\": \"result.vulnscan.com/v1\",\n \t\"kind\": pod.kind,\n \t\"metadata\": metadata,\n \t\"relatedObjects\": related_objects\n }\n\n msga := {\n \t\"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \"reviewPaths\": [path],\n \t\"failedPaths\": [path],\n \t\"fixPaths\": [],\n \t\"alertObject\": {\n \"externalObjects\": external_objects\n \t}\n }\n}\n\ncheck_num_vulnerabilities(vuln) {\n exists := count([ x | x = vuln.data[_]; x.severity == \"Critical\" ])\n\n str_max := data.postureControlInputs.max_critical_vulnerabilities[_]\n exists > to_number(str_max)\n}\n\ncheck_num_vulnerabilities(vuln) {\n exists := count([ x | x = vuln.data[_]; x.severity == \"High\" ])\n\n str_max := data.postureControlInputs.max_high_vulnerabilities[_]\n exists > to_number(str_max)\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n vuln := vulns[_]\n\n # vuln data is relevant \n count(vuln.data) > 0 \n \n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \t\"name\": pod.metadata.name,\n \t\"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \t\"apiVersion\": \"result.vulnscan.com/v1\",\n \t\"kind\": pod.kind,\n \t\"metadata\": metadata,\n \t\"relatedObjects\": related_objects\n }\n\n msga := {\n \t\"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [path],\n \t\"fixPaths\": [],\n \t\"alertObject\": {\n \"externalObjects\": external_objects\n \t}\n }\n}", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "armo.vuln.images", + "image.vulnscan.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ImageVulnerabilities" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "kubernetes.api.client" + } + ], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.max_critical_vulnerabilities", + "name": "Max Critical vulnerabilities", + "description": "The maximum number of Critical severity vulnerabilities permitted." + }, + { + "path": "settings.postureControlInputs.max_high_vulnerabilities", + "name": "Max High vulnerabilities", + "description": "The maximum number of High severity vulnerabilities permitted." + } + ], + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "configured-liveness-probe", + "creationTime": "", + "rule": "package armo_builtins\n\n\n# Fails if container does not have livenessProbe - for pod\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have livenessProbe\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for wl\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if container does not have livenessProbe - for cronjob\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.livenessProbe\n\tfix_path := {\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].livenessProbe\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have livenessProbe\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"fixPaths\": [fix_path],\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Liveness probe is not configured", + "remediation": "Ensure Liveness probe is configured", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "anonymous-requests-to-kubelet-service-updated", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n# CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authentication.anonymous.enabled == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Determines if anonymous requests to the kubelet service are allowed.", + "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-encryption-provider-config-argument-is-set-as-appropriate", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is not set at all\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\n\tcmd := obj.spec.containers[0].command\n\tnot contains(concat(\" \", cmd), \"--encryption-provider-config\")\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config file not set\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--encryption-provider-config=\",\n\t\t}],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n# Encryption config is set but not covering secrets\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# Check if the config conver secrets\n\tcount({true | \"secrets\" in config_file_content.resources[_].resources}) == 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not covering secrets\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tfilter_input(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nfilter_input(obj){\n\tis_api_server(obj)\n}\nfilter_input(obj){\n\tis_control_plane_info(obj)\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "Encrypt etcd key-value store.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--encryption-provider-config` parameter to the path of that file:\n\n \n```\n--encryption-provider-config=\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `--encryption-provider-config` is not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "cluster-access-manager-api-eks", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# check if the EKS cluster is configured with the Cluster Access Manager API \n# by checking in the ClusterDescribe resource if accessConfig.AuthenticationMode is set to 'CONFIG_MAP'\n# If \"authenticationmode\": \"API\" or \"authenticationmode\": \"API_AND_CONFIG_MAP\", it means the Cluster Access Manager API is enabled.\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\tconfig.Cluster.AccessConfig.AuthenticationMode == \"CONFIG_MAP\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Cluster Access Manager API isn't enabled on the EKS cluster\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "check if the EKS cluster is configured with the Cluster Access Manager API by checking in the ClusterDescribe resource if accessConfig.AuthenticationMode is not set to 'CONFIG_MAP'", + "remediation": "If AuthenticationMode is set to 'API' or 'API_AND_CONFIG_MAP', it means the Cluster Access Manager API is enabled", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": [ + "EKS" + ] + }, + { + "guid": "", + "name": "ensure-that-the-etcd-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"etcdConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the `/etc/kubernetes/manifests/etcd.yaml` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/etcd.yaml\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-controller-manager-RotateKubeletServerCertificate-argument-is-set-to-true", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"`RotateKubeletServerCertificate` is set to false on the controller manager\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"RotateKubeletServerCertificate=false\")\n\tfixed = replace(cmd[i], \"RotateKubeletServerCertificate=false\", \"RotateKubeletServerCertificate=true\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Enable kubelet server certificate rotation on controller-manager.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--feature-gates` parameter to include `RotateKubeletServerCertificate=true`.\n\n \n```\n--feature-gates=RotateKubeletServerCertificate=true\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `RotateKubeletServerCertificate` is set to \"true\" this recommendation verifies that it has not been disabled.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "pod-security-admission-baseline-applied-2", + "creationTime": "", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "ruleLanguage": "Rego", "match": [ { "apiGroups": [ @@ -18807,24 +31867,446 @@ "resources": [ "Namespace" ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "MutatingWebhookConfiguration" + ] } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "lists all namespaces for users to review", + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", "remediation": "", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "k8s-audit-logs-enabled-cloud", + "name": "workload-mounted-secrets", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.secret\n\n\tsecret := input[_]\n\tsecret.kind == \"Secret\"\n\tsecret.metadata.name == volume.secret.secretName\n\tis_same_namespace(secret.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted secret\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": secret\n }]\n\t}\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "Secret" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "fails if workload mounts secrets", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-admission-control-plugin-SecurityContextDeny-is-set-if-PodSecurityPolicy-is-not-used", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\":\"The SecurityContextDeny addmission controller is not enabled. This could allow for privilege escalation in the cluster\", \n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"SecurityContextDeny\" in flag.values\n\tnot \"PodSecurityPolicy\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"SecurityContextDeny\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=SecurityContextDeny\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "The SecurityContextDeny admission controller can be used to deny pods which make use of some SecurityContext fields which could allow for privilege escalation in the cluster. This should be used where PodSecurityPolicy is not in place within the cluster.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--enable-admission-plugins` parameter to include `SecurityContextDeny`, unless `PodSecurityPolicy` is already in place.\n\n \n```\n--enable-admission-plugins=...,SecurityContextDeny,...\n\n```\n\n#### Impact Statement\nThis admission controller should only be used where Pod Security Policies cannot be used on the cluster, as it can interact poorly with certain Pod Security Policies\n\n#### Default Value\nBy default, `SecurityContextDeny` is not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "insecure-port-flag", + "creationTime": "", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}", + "resourceEnumerator": "package armo_builtins\n\nimport data.cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if the api server has insecure-port enabled", + "remediation": "Make sure that the insecure-port flag of the api server is set to 0", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "system-authenticated-allowed-to-take-over-cluster", "attributes": { - "armoBuiltin": true + "resourcesAggregator": "subject-role-rolebinding" }, "creationTime": "", - "rule": "package armo_builtins\nimport data.cautils as cautils\n\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\t\n # If enableComponents is empty, it will disable logging\n # https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\":\"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig := cluster_config.data\n # logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n # types - available cluster control plane log types\n # https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n goodTypes := [logSetup | logSetup = config.Cluster.Logging.ClusterLogging[_]; isAuditLogs(logSetup)]\n count(goodTypes) == 0\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\":\"aws eks update-cluster-config --region --name --logging '{'clusterLogging':[{'types':[''],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\nisAuditLogs(logSetup) {\n logSetup.Enabled == true\n cautils.list_contains(logSetup.Types, \"api\")\n}\n\nisAuditLogs(logSetup) {\n logSetup.Enabled == true\n cautils.list_contains(logSetup.Types, \"audit\")\n}\n\nisAuditLogs(logSetup) {\n logSetup.enabled == true\n cautils.list_contains(logSetup.Types, \"authenticator\")\n}", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n subjectVector := input[_]\n\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(rolebinding.kind, \"Binding\")\n\n\n subject := rolebinding.subjects[k]\n # Check if the subject is gourp\n subject.kind == \"Group\"\n # Check if the subject is system:authenticated\n subject.name == \"system:authenticated\"\n\n\n # Find the bound roles\n\trole := subjectVector.relatedObjects[i]\n\tendswith(role.kind, \"Role\")\n\n # Check if the role and rolebinding bound\n is_same_role_and_binding(role, rolebinding)\n\n\n # Check if the role has access to workloads, exec, attach, portforward\n\trule := role.rules[p]\n rule.resources[l] in [\"*\",\"pods\", \"pods/exec\", \"pods/attach\", \"pods/portforward\",\"deployments\",\"statefulset\",\"daemonset\",\"jobs\",\"cronjobs\",\"nodes\",\"secrets\"]\n\n\tfinalpath := array.concat([\"\"], [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [i]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": \"system:authenticated has sensitive roles\",\n\t\t\"alertScore\": 5,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\" : subjectVector\n\t\t},\n\t}\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"RoleBinding\"\n role.kind == \"Role\"\n rolebinding.metadata.namespace == role.metadata.namespace\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}\n\nis_same_role_and_binding(role, rolebinding) {\n rolebinding.kind == \"ClusterRoleBinding\"\n role.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n startswith(role.apiVersion, rolebinding.roleRef.apiGroup)\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Fails in system:authenticated user has cluster takeover rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", + "remediation": "Remove any RBAC rules which allow system:authenticated users to perform actions", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "workload-with-administrative-roles", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_start_of_path(wl)\n wl_spec := object.get(wl, start_of_path, [])\n\n # get service account wl is using\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(wl_spec, sa.metadata, wl.metadata)\n\n # check service account token is mounted\n is_sa_auto_mounted(wl_spec, sa)\n\n # check if sa has administrative roles\n role := input[_]\n role.kind in [\"Role\", \"ClusterRole\"]\n is_administrative_role(role)\n\n rolebinding := input[_]\n rolebinding.kind in [\"RoleBinding\", \"ClusterRoleBinding\"]\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.subjects[j].kind == \"ServiceAccount\"\n rolebinding.subjects[j].name == sa.metadata.name\n rolebinding.subjects[j].namespace == sa.metadata.namespace\n\n reviewPath := \"roleRef\"\n deletePath := sprintf(\"subjects[%d]\", [j])\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v has administrative roles\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa,\n },\n {\n \"object\": rolebinding,\n \"reviewPaths\": [reviewPath],\n \"deletePaths\": [deletePath],\n },\n {\n \"object\": role,\n },]\n }\n}\n\n\nget_start_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken not in pod spec\n not wl_spec.automountServiceAccountToken == false\n not wl_spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n}\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken set to true in pod spec\n wl_spec.automountServiceAccountToken == true\n}\n\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n wl_spec.serviceAccountName == sa_metadata.name\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n not wl_spec.serviceAccountName \n sa_metadata.name == \"default\"\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\n# is_same_namespace supports cases where ns is not configured in the metadata\n# for yaml scans\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n\nis_administrative_role(role){\n administrative_resources := [\"*\"]\n administrative_verbs := [\"*\"]\n administrative_api_groups := [\"\", \"*\"]\n \n administrative_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in administrative_resources ; \n rule.verbs[b] in administrative_verbs ; \n rule.apiGroups[c] in administrative_api_groups]\n count(administrative_rule) > 0\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-anonymous-auth-argument-is-set-to-false", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"anonymous requests is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--anonymous-auth=true\")\n\tfixed = replace(cmd[i], \"--anonymous-auth=true\", \"--anonymous-auth=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--anonymous-auth\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--anonymous-auth=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Disable anonymous requests to the API server.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n \n```\n--anonymous-auth=false\n\n```\n\n#### Impact Statement\nAnonymous requests will be rejected.\n\n#### Default Value\nBy default, anonymous access is enabled.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "container-image-repository-v1", + "attributes": { + "m$K8sThreatMatrix": "Collection::Images from private registry", + "useFromKubescapeVersion": "v2.9.0" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tcontainers_path := get_containers_path(wl)\n\tcontainers := object.get(wl, containers_path, [])\n\tcontainer := containers[i]\n\tname := image.parse_normalized_name(container.image)\n\tnot image_in_allowed_list(name)\n\tpath := sprintf(\"%s[%d].image\", [concat(\".\", containers_path), i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\tstartswith(image, registry)\n}\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-endpointpublicaccess-is-disabled-on-private-nodes-eks", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Check if EndpointPublicAccess in enabled on a private node for EKS. A private node is a node with no public ips access.\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\n\tconfig := cluster_config.data\n\n\tconfig.Cluster.ResourcesVpcConfig.EndpointPublicAccess == true\n\n\t# filter out private nodes\n\t\"0.0.0.0/0\" in config.Cluster.ResourcesVpcConfig.PublicAccessCidrs\n\n\tmsga := {\n\t\t\"alertMessage\": \"endpointPublicAccess is enabled on a private node\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": [ + "EKS" + ] + }, + { + "guid": "", + "name": "host-pid-ipc-privileges", + "creationTime": "", + "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.", + "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "CVE-2022-23648", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n startswith(node.status.nodeInfo.containerRuntimeVersion,\"containerd://\")\n containerd_version := substring(node.status.nodeInfo.containerRuntimeVersion,13,-1)\n containerd_version_arr := split(containerd_version, \".\")\n major_version := to_number(containerd_version_arr[0]) \n minor_version := to_number(containerd_version_arr[1]) \n subVersion := to_number(containerd_version_arr[2]) \n \n is_vulnerable_version(major_version,minor_version,subVersion)\n\n path := \"status.nodeInfo.containerRuntimeVersion\"\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-23648\",\n \t\t\"alertObject\": {\n \"k8SApiObjects\": [node]\n },\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n \"fixPaths\":[],\n\t}\n}\n\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 0\n} \n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version < 4\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 4\n\tsubVersion < 12\n}\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 5\n\tsubVersion < 10\n}\t\n\nis_vulnerable_version(major_version, minor_version, subVersion) {\n\tmajor_version == 1\n\tminor_version == 6\n\tsubVersion < 1\n}\t\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "psp-enabled-cloud", + "creationTime": "", + "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -18849,7 +32331,6 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "", "remediation": "", @@ -18861,12 +32342,600 @@ }, { "guid": "", - "name": "rbac-enabled-cloud", + "name": "ensure-that-the-admin.conf-file-ownership-is-set-to-root-root", "attributes": { - "armoBuiltin": true + "hostSensorRule": "true" }, "creationTime": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"aks\"\t\n config := cluster_config.data\n config.properties.enableRBAC == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"adminConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the `admin.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/admin.conf\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-admission-control-plugin-NamespaceLifecycle-is-set", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--disable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"NamespaceLifecycle\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"NamespaceLifecycle\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--disable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Reject creating objects in a namespace that is undergoing termination.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--disable-admission-plugins` parameter to ensure it does not include `NamespaceLifecycle`.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NamespaceLifecycle` is set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "resources-cpu-requests", + "creationTime": "", + "rule": "package armo_builtins\n\n# ==================================== no CPU requests =============================================\n# Fails if pod does not have container with CPU request\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU requests\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU requests\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.requests.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "CPU requests are not set.", + "remediation": "Ensure CPU requests are set.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-secure-port-argument-is-not-set-to-0", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tcontains(obj.spec.containers[0].command[i], \"--secure-port=0\")\n\tmsg := {\n\t\t\"alertMessage\": \"the secure port is disabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"failedPaths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Do not disable the secure port.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--secure-port` parameter or set it to a different (non-zero) desired port.\n\n#### Impact Statement\nYou need to set the API Server up with the right TLS certificates.\n\n#### Default Value\nBy default, port 6443 is used as the secure port.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"RBAC\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"RBAC\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=RBAC\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Turn on Role Based Access Control.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nWhen RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.\n\n#### Default Value\nBy default, `RBAC` authorization is not enabled.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "service-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "pods-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\", \"Job\", \"CronJob\", \"Pod\"}\n\tspec_template_spec_patterns[wl.kind]\n\tresult := is_default_namespace(wl.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has pods running in the 'default' namespace\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"} \n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-controller-manager-root-ca-file-argument-is-set-as-appropriate", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"the controller manager is not configured to inject the trusted ca.crt file into pods so that they can verify TLS connections to the API server\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--root-ca-file\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--root-ca-file=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Allow pods to verify the API server's serving certificate before establishing connections.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and set the `--root-ca-file` parameter to the certificate bundle file`.\n\n \n```\n--root-ca-file=\n\n```\n\n#### Impact Statement\nYou need to setup and maintain root certificate authority file.\n\n#### Default Value\nBy default, `--root-ca-file` is not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "audit-policy-content", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\nimport future.keywords.in\n\n# CIS 3.2.2 https://workbench.cisecurity.org/sections/1126657/recommendations/1838583\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\tapi_server_info := obj.data.APIServerInfo\n\n\tnot contains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs are not enabled\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": api_server_info.cmdLine,\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_api_server_info(obj)\n\n\tapi_server_info := obj.data.APIServerInfo\n\n\tcontains(api_server_info.cmdLine, \"--audit-policy-file\")\n\n\trawPolicyFile := api_server_info.auditPolicyFile\n\tpolicyFile = yaml.unmarshal(base64.decode(rawPolicyFile.content))\n\n\tare_audit_file_rules_valid(policyFile.rules)\n\n\tfailed_obj := json.patch(policyFile, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"metadata\",\n\t\t\"value\": {\"name\": sprintf(\"%s - Audit policy file\", [obj.metadata.name])},\n\t}])\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit policy rules do not cover key security areas or audit levels are invalid\",\n\t\t\"alertScore\": 5,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\n# Sample rules object\n# rules:\n# - level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nare_audit_file_rules_valid(rules) if {\n\tseeked_resources_with_audit_level := {\n\t\t\"secrets\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"configmaps\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"tokenreviews\": {\n\t\t\t\"auditLevel\": \"Metadata\",\n\t\t\t\"mode\": \"equal\",\n\t\t},\n\t\t\"pods\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"deployments\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/exec\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/portforward\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"pods/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t\t\"services/proxy\": {\n\t\t\t\"auditLevel\": \"None\",\n\t\t\t\"mode\": \"not-equal\",\n\t\t},\n\t}\n\n\t# Policy file must contain every resource\n\tsome resource, config in seeked_resources_with_audit_level\n\n\t# Every seeked resource mu have valid audit levels\n\tnot test_all_rules_against_one_seeked_resource(resource, config, rules)\n}\n\ntest_all_rules_against_one_seeked_resource(seeked_resource, value_of_seeked_resource, rules) if {\n\t# Filter down rules to only those concerning a seeked resource\n\trules_with_seeked_resource := [rule | rule := rules[_]; is_rule_concering_seeked_resource(rule, seeked_resource)]\n\trules_count := count(rules_with_seeked_resource)\n\n\t# Move forward only if there are some\n\trules_count > 0\n\n\t# Check if rules concerning seeked resource have valid audit levels\n\tvalid_rules := [rule | rule := rules_with_seeked_resource[_]; validate_rule_audit_level(rule, value_of_seeked_resource)]\n\tvalid_rules_count := count(valid_rules)\n\n\tvalid_rules_count > 0\n\n\t# Compare all rules for that specififc resource with those with valid rules, if amount of them differs,\n\t# it means that there are also some rules which invalid audit level\n\tvalid_rules_count == rules_count\n}\n\nis_rule_concering_seeked_resource(rule, seeked_resource) if {\n\tseeked_resource in rule.resources[_].resources\n}\n\n# Sample single rule:\n# \t level: RequestResponse\n# resources:\n# - group: \"\"\n# resources: [\"pods\"]\nvalidate_rule_audit_level(rule, value_of_seeked_resource) := result if {\n\tvalue_of_seeked_resource.mode == \"equal\"\n\tresult := rule.level == value_of_seeked_resource.auditLevel\n} else := result {\n\tresult := rule.level != value_of_seeked_resource.auditLevel\n}\n\nis_api_server_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "APIServerInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Kubernetes can audit the details of requests made to the API server. The `--audit-policy-file` flag must be set for this logging to be enabled.", + "remediation": "Create an audit policy file for your cluster.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "configmap-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "external-secret-storage", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# Encryption config is not using a recommended provider for KMS\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\tresources := config_file_content.resources\n\tevery resource in resources{\n\t\tnot has_recommended_provider(resource)\n\t}\n\n\tfix_paths := [\n\t{\"path\": sprintf(\"resources[%d].resources[%d]\", [count(resources), 0]),\t\"value\": \"secrets\"},\n\t{\"path\": sprintf(\"resources[%d].providers[%d].kms\", [count(resources), 0]),\t\"value\": \"YOUR_EXTERNAL_KMS\"},\n\t]\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using a recommended provider for KMS\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_recommended_provider(resource) {\n\trecommended_providers := {\"akeyless\", \"azurekmsprovider\", \"aws-encryption-provider\"}\n\tsome provider in resource.providers\n\trecommended_providers[provider.kms.name]\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "Consider the use of an external secrets storage and management system, instead of using Kubernetes Secrets directly, if you have more complex secret management needs. Ensure the solution requires authentication to access secrets, has auditing of access to and use of secrets, and encrypts secrets. Some solutions also make it easier to rotate secrets.", + "remediation": "Refer to the secrets management options offered by your cloud provider or a third-party secrets management solution.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-can-delete-k8s-events-v1", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133", + "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines which users can delete events", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-credentials-configmap", + "attributes": { + "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n contains(lower(map_key), lower(key_name))\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n\n regex.match(value , decoded_secret)\n\n # check that value or key weren't allowed by user\n not is_allowed_value(map_secret)\n not is_allowed_key_name(map_key)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"deletePaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n regex.match(allow_val , value)\n}\n\nis_allowed_key_name(key_name) {\n allow_key := data.postureControlInputs.sensitiveKeyNamesAllowed[_]\n contains(lower(key_name), lower(allow_key))\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.sensitiveValues", + "name": "Sensitive Values", + "description": "Strings that identify a value that Kubescape believes should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveValuesAllowed", + "name": "Allowed Values", + "description": "Reduce false positives with known values." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNames", + "name": "Sensitive Keys", + "description": "Key names that identify a potential value that should be stored in a Secret, and not in a ConfigMap or an environment variable." + }, + { + "path": "settings.postureControlInputs.sensitiveKeyNamesAllowed", + "name": "Allowed Keys", + "description": "Reduce false positives with known key names." + } + ], + "description": "fails if ConfigMaps have sensitive information in configuration", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-clusters-are-created-with-private-endpoint-enabled-and-public-access-disabled", + "attributes": { + "hostSensorRule": "false", + "imageScanRelated": false + }, + "creationTime": "", + "rule": "\npackage armo_builtins\n\n# fails in case privateEndpoint.id parameter is not found on ClusterDescribe\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateEndpointEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Private endpoint not enabled.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateEndpointEnabled(config) {\n\tconfig.properties.privateEndpoint.id\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": null, + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "Disable access to the Kubernetes API from outside the node network if it is not required.", + "remediation": "To use a private endpoint, create a new private endpoint in your virtual network then create a link between your virtual network and a new private DNS zone", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "psp-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "CVE-2022-47633", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n\tminorVersion == 8\n\t3 <= subVersion\n\tsubVersion < 5\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"kyverno:\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-47633\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "role-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-hostile-multitenant-workloads", + "attributes": { + "actionRequired": "manual review" + }, + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n\t\t\"alertMessage\": \"Please check it manually.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n \"alertObject\": {}\n }\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "ruleDependencies": [], + "controlConfigInputs": [], + "description": "Currently, Kubernetes environments aren't safe for hostile multi-tenant usage. Extra security features, like Pod Security Policies or Kubernetes RBAC for nodes, efficiently block exploits. For true security when running hostile multi-tenant workloads, only trust a hypervisor. The security domain for Kubernetes becomes the entire cluster, not an individual node.", + "remediation": "Use physically isolated clusters", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rbac-enabled-cloud", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"management.azure.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n\tcluster_config.metadata.provider == \"aks\"\n\tconfig := cluster_config.data\n\tnot config.properties.enableRBAC == true\n\n\tmsga := {\n\t\t\"alertMessage\": \"rbac is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"failedPaths\": [\"data.properties.enableRBAC\"],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n \t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -18879,8 +32948,28 @@ "dynamicMatch": [ { "apiGroups": [ - "management.azure.com", - "container.googleapis.com", + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ + "container.googleapis.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + }, + { + "apiGroups": [ "eks.amazonaws.com" ], "apiVersions": [ @@ -18892,7 +32981,6 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, "description": "", "remediation": "", @@ -18905,63 +32993,401 @@ }, { "guid": "", - "name": "sidecar-injection", - "attributes": { - "m$K8sThreatMatrix": "Execution::Sidecar injection", - "armoBuiltin": true - }, + "name": "workload-with-cluster-takeover-roles", "creationTime": "", - "rule": "package armo_builtins\n\n# =========== looks for containers with lifecycle.type \"Sidecar\" ===========\n#pods\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tcontainer.lifecycle.type == \"Sidecar\"\n\tpath := sprintf(\"spec.containers[%v].lifecycle.type\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"The pod: %v has a sidecar: %v\", [pod.metadata.name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tcontainer.lifecycle.type == \"Sidecar\"\n\tpath := sprintf(\"spec.template.spec.containers[%v].lifecycle.type\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has a sidecar: %v\", [wl.kind, wl.metadata.name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tcontainer.lifecycle.type == \"Sidecar\"\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].lifecycle.type\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has a sidecar: %v\", [wl.metadata.name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# =========== looks for containers \"sidecar\" in name ===========\n#pods\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n contains(lower(container.name), \"sidecar\")\n\tpath := sprintf(\"spec.containers[%v].name\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The pod: %v has a sidecar: %v\", [pod.metadata.name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tcontains(lower(container.name), \"sidecar\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].name\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has a sidecar: %v\", [wl.kind, wl.metadata.name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tcontains(lower(container.name), \"sidecar\")\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].name\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has a sidecar: %v\", [wl.metadata.name, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}", - "resourceEnumerator": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_start_of_path(wl)\n wl_spec := object.get(wl, start_of_path, [])\n\n # get service account wl is using\n sa := input[_]\n sa.kind == \"ServiceAccount\"\n is_same_sa(wl_spec, sa.metadata, wl.metadata)\n\n # check service account token is mounted\n is_sa_auto_mounted(wl_spec, sa)\n\n # check if sa has cluster takeover roles\n role := input[_]\n role.kind in [\"Role\", \"ClusterRole\"]\n is_takeover_role(role)\n\n rolebinding := input[_]\n\trolebinding.kind in [\"RoleBinding\", \"ClusterRoleBinding\"] \n rolebinding.roleRef.name == role.metadata.name\n rolebinding.roleRef.kind == role.kind\n rolebinding.subjects[j].kind == \"ServiceAccount\"\n rolebinding.subjects[j].name == sa.metadata.name\n rolebinding.subjects[j].namespace == sa.metadata.namespace\n\n deletePath := sprintf(\"subjects[%d]\", [j])\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v has cluster takeover roles\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": sa,\n },\n {\n \"object\": rolebinding,\n \"deletePaths\": [deletePath],\n },\n {\n \"object\": role,\n },]\n }\n}\n\n\nget_start_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_start_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}\n\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken not in pod spec\n not wl_spec.automountServiceAccountToken == false\n not wl_spec.automountServiceAccountToken == true\n\n not sa.automountServiceAccountToken == false\n}\n\nis_sa_auto_mounted(wl_spec, sa) {\n # automountServiceAccountToken set to true in pod spec\n wl_spec.automountServiceAccountToken == true\n}\n\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n wl_spec.serviceAccountName == sa_metadata.name\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\nis_same_sa(wl_spec, sa_metadata, wl_metadata) {\n not wl_spec.serviceAccountName \n sa_metadata.name == \"default\"\n is_same_namespace(sa_metadata , wl_metadata)\n}\n\n# is_same_namespace supports cases where ns is not configured in the metadata\n# for yaml scans\nis_same_namespace(metadata1, metadata2) {\n metadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n not metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata2.namespace\n metadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n not metadata1.namespace\n metadata2.namespace == \"default\"\n}\n\n\n# look for rule allowing create/update workloads\nis_takeover_role(role){\n takeover_resources := [\"pods\", \"*\"]\n takeover_verbs := [\"create\", \"update\", \"patch\", \"*\"]\n takeover_api_groups := [\"\", \"*\"]\n \n takeover_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in takeover_resources ; \n rule.verbs[b] in takeover_verbs ; \n rule.apiGroups[c] in takeover_api_groups]\n count(takeover_rule) > 0\n}\n\n# look for rule allowing secret access\nis_takeover_role(role){\n rule := role.rules[i]\n takeover_resources := [\"secrets\", \"*\"]\n takeover_verbs := [\"get\", \"list\", \"watch\", \"*\"]\n takeover_api_groups := [\"\", \"*\"]\n \n takeover_rule := [rule | rule = role.rules[i] ; \n rule.resources[a] in takeover_resources ; \n rule.verbs[b] in takeover_verbs ; \n rule.apiGroups[c] in takeover_api_groups]\n count(takeover_rule) > 0\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n wl := input[_]\n start_of_path := get_beginning_of_path(wl)\n\n msga := {\n \"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 9,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n }\n}\n\n\nget_beginning_of_path(workload) = start_of_path {\n spec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n spec_template_spec_patterns[workload.kind]\n start_of_path := [\"spec\", \"template\", \"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"Pod\"\n start_of_path := [\"spec\"]\n}\n\nget_beginning_of_path(workload) = start_of_path {\n workload.kind == \"CronJob\"\n start_of_path := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"]\n}", "ruleLanguage": "Rego", "match": [ { "apiGroups": [ - "*" + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ServiceAccount" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" ], "apiVersions": [ "*" ], "resources": [ - "Pod", + "Job", + "CronJob" + ] + }, + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "pod-security-admission-baseline-applied-1", + "creationTime": "", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Fails if namespace does not have relevant labels and no 3rd party security admission exists\ndeny[msga] {\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n not has_external_policy_control(input)\n\tfix_path = {\"path\": \"metadata.labels[pod-security.kubernetes.io/enforce]\", \"value\": \"baseline\"}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fix_path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "resourceEnumerator": "package armo_builtins\nimport future.keywords.in\n\n# if no 3rd party security admission exists - Fails if namespace does not have relevant labels\ndeny[msga] {\n not has_external_policy_control(input)\n\tnamespace := input[_]\n\tnamespace.kind == \"Namespace\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Namespace: %v does not enable baseline pod security admission\", [namespace.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\n# Fails if at least 1 namespace does not have relevant labels and 3rd party namespaced security admission EXISTS\n# returns webhook configuration for user to review\ndeny[msga] {\n\tsome namespace in input\n\tnamespace.kind == \"Namespace\"\n\tnot baseline_admission_policy_enabled(namespace)\n\n admissionwebhook := input[_]\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Review webhook: %v ensure that it defines the required policy\", [admissionwebhook.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}\n\n\nbaseline_admission_policy_enabled(namespace){\n\tsome key, value in namespace.metadata.labels \n key == \"pod-security.kubernetes.io/enforce\"\n\tvalue in [\"baseline\", \"restricted\"]\n}\n\nhas_external_policy_control(inp){\n some admissionwebhook in inp\n admissionwebhook.kind in [\"ValidatingWebhookConfiguration\", \"MutatingWebhookConfiguration\"]\n admissionwebhook.webhooks[i].rules[j].scope != \"Cluster\"\n}", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "admissionregistration.k8s.io" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "ValidatingWebhookConfiguration" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Checks that every namespace enabled baseline pod security admission, or if there are external policies applied for namespaced resources (validating/mutating webhooks) - returns them to be reviewed", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "set-fsgroupchangepolicy-value", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n### POD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n pod := input[_]\n pod.kind = \"Pod\"\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(pod.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n### WORKLOAD ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n wl := input[_]\n manifest_kind := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n manifest_kind[wl.kind]\n \n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(wl.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n### CRONJOB ###\n\n# Fails if securityContext.fsGroupChangePolicy does not have an allowed value\ndeny[msga] {\n # verify the object kind\n cj := input[_]\n cj.kind == \"CronJob\"\n\n # check securityContext has fsGroupChangePolicy set\n not fsGroupChangePolicySetProperly(cj.spec.jobTemplate.spec.template.spec.securityContext)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v does not set 'securityContext.fsGroupChangePolicy' with allowed value\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [{\"path\": \"spec.jobTemplate.spec.template.spec.securityContext.fsGroupChangePolicy\", \"value\": \"Always\"}],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [cj]\n\t\t}\n }\n}\n\n# fsGroupChangePolicySetProperly checks if applied value is set as appropriate [Always|OnRootMismatch]\nfsGroupChangePolicySetProperly(securityContext) := true if {\n regex.match(securityContext.fsGroupChangePolicy, \"Always|OnRootMismatch\")\n} else := false\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ "Deployment", "ReplicaSet", "DaemonSet", - "StatefulSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ "Job", "CronJob" ] } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "fails if container lifecycle field is set to sidecar, or if container name includes 'sidecar'.", - "remediation": "", - "ruleQuery": "", + "description": "Fails if securityContext.fsGroup is not set.", + "remediation": "Set securityContext.fsGroup value", + "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "rule-can-delete-create-service-v1", - "attributes": { - "m$K8sThreatMatrix": "Discovery::Access the K8s API server", - "armoBuiltin": true, - "resourcesAggregator": "subject-role-rolebinding", - "useFromKubescapeVersion": "v1.0.133" - }, + "name": "psp-deny-root-container", "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create/delete access to services\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"services\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create/delete services\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if all PSPs permit containers to run as the root user\n\t# if even one PSP restricts containers to run as the root user, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tnot deny_run_as_root(psp.spec.runAsUser)\n\t}\n\n\t# return al the PSPs that permit containers to run as the root user\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tnot deny_run_as_root(psp.spec.runAsUser)\n\n\tpath := \"spec.runAsUser.rule\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' permits containers to run as the root user.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAsNonRoot\"\n}\n\ndeny_run_as_root(runAsUser){\n\trunAsUser.rule == \"MustRunAs\"\n\trunAsUser.ranges[_].min > 0\n}", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ { "apiGroups": [ - "*" + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "horizontalpodautoscaler-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "autoscaling" + ], + "apiVersions": [ + "v2" + ], + "resources": [ + "HorizontalPodAutoscaler" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "etcd-unique-ca", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 2.7 https://workbench.cisecurity.org/sections/1126654/recommendations/1838578\n\ndeny[msga] {\n\tetcdPod := [pod | pod := input[_]; filter_input(pod, \"etcd\")]\n\tetcdCheckResult := get_argument_value_with_path(etcdPod[0].spec.containers[0].command, \"--trusted-ca-file\")\n\n\tapiserverPod := [pod | pod := input[_]; filter_input(pod, \"kube-apiserver\")]\n\tapiserverCheckResult := get_argument_value_with_path(apiserverPod[0].spec.containers[0].command, \"--client-ca-file\")\n\n\tetcdCheckResult.value == apiserverCheckResult.value\n\tmsga := {\n\t\t\"alertMessage\": \"Cert file is the same both for the api server and the etcd\",\n\t\t\"alertScore\": 8,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"failedPaths\": [etcdCheckResult.path, apiserverCheckResult.path],\n\t\t\"fixPaths\": [etcdCheckResult.fix_paths, apiserverCheckResult.fix_paths],\n\t\t\"alertObject\": {\"k8sApiObjects\": [etcdPod[0], apiserverPod[0]]},\n\t}\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"kube-apiserver\")\n}\n\ncommand_api_server_or_etcd(cmd) {\n\tendswith(cmd, \"etcd\")\n}\n\nfilter_input(obj, res) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], res)\n}\n\nget_argument_value(command, argument) = value {\n\targs := split(command, \"=\")\n\tsome i, sprintf(\"%v\", [argument]) in args\n\tvalue := args[i + 1]\n}\n\nget_argument_value_with_path(cmd, argument) = result {\n\tcontains(cmd[i], argument)\n\targumentValue := get_argument_value(cmd[i], argument)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"path\": path,\n\t\t\"value\": argumentValue,\n\t\t\"fix_paths\": {\"path\": path, \"value\": \"\"},\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Use a different certificate authority for etcd from the one used for Kubernetes.", + "remediation": "Follow the etcd documentation and create a dedicated certificate authority setup for the etcd service.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameter.\n\n \n```\n--trusted-ca-file=\n\n```\n\n#### Impact Statement\nAdditional management of the certificates and keys for the dedicated certificate authority will be required.\n\n#### Default Value\nBy default, no etcd certificate is created and used.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-identify-old-k8s-registry", + "attributes": { + "m$K8sThreatMatrix": "Initial Access::Compromised images in registry" + }, + "creationTime": "", + "rule": "package armo_builtins\n\ndeprecatedK8sRepo[msga] {\n\tpod := input[_]\n\tpod.metadata.namespace == \"kube-system\"\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecatedK8sRepo[msga] {\n\twl := input[_]\n\twl.metadata.namespace == \"kube-system\"\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n deprecated_registry(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from the deprecated k8s.gcr.io\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\ndeprecated_registry(image){\n\tstartswith(image, \"k8s.gcr.io/\")\n}\n", + "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.metadata.namespace == \"kube-system\"\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" ], "apiVersions": [ "*" ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Identifying if pod container images are from deprecated K8s registry", + "remediation": "Use images new registry", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-audit-log-maxsize-argument-is-set-to-100-or-as-appropriate", + "attributes": { + "hostSensorRule": "true", + "useFromKubescapeVersion": "v2.0.159" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxsize\")\n\tresult = {\n\t\t\"alert\": \"Please validate that audit-log-maxsize has an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxsize\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max size not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxsize=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Rotate log files on reaching 100 MB or as appropriate.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxsize` parameter to an appropriate size in MB. For example, to set it as 100 MB:\n\n \n```\n--audit-log-maxsize=100\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "CVE-2022-3172", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\tversion = get_api_server_version(api_infos)\n\tis_api_server_version_affected(version)\n\n\t# Find the service that exposes the extended API\n\tservices = [obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\tcount(services) == 1\n\tservice = services[0]\n\n\tmsg := {\n\t\t\"alertMessage\": \"the following pair of APIService and Service may redirect client traffic to any URL\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj, service]},\n\t}\n}\n\n# current kubescpae version (v2.0.171) still not support this resource\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tsemver.is_valid(v)\n\tversion = v\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) == 1\n\tv = replace(split(api_infos[0].data.gitVersion, \"-\")[0], \"v\", \"\")\n\tnot semver.is_valid(v)\n\tversion := \"\"\n}\n\nget_api_server_version(api_infos) = version {\n\tcount(api_infos) != 1\n\tversion = \"\"\n}\n\nis_api_server_version_affected(version) {\n\tversion == \"\"\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.25.0\") == 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.24.0\") >= 0\n\tsemver.compare(version, \"1.24.4\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.23.0\") >= 0\n\tsemver.compare(version, \"1.23.10\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.22.0\") >= 0\n\tsemver.compare(version, \"1.22.13\") <= 0\n}\n\nis_api_server_version_affected(version) {\n\tsemver.compare(version, \"1.21.14\") <= 0\n}\n", + "resourceEnumerator": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\t# find aggregated API APIServices\n\tobj = input[_]\n\tobj.apiVersion == \"apiregistration.k8s.io/v1\"\n\tobj.kind == \"APIService\"\n\tapi_service := obj.spec.service\n\n\t# check API server version vulnerability\n\tapi_infos = [api_info |\n\t\tapi_info := input[i]\n\t\tapi_info.apiVersion == \"apiserverinfo.kubescape.cloud/v1beta0\"\n\t\tapi_info.kind == \"APIServerInfo\"\n\t\tapi_info.metadata.name == \"version\"\n\t]\n\n\t# Find the service that exposes the extended API\n\tservices = [ obj |\n\t\tobj := input[j]\n\t\tobj.apiVersion == \"v1\"\n\t\tobj.kind == \"Service\"\n\t\tobj.metadata.name == api_service.name\n\t]\n\n\n\tmsg := {\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apiregistration.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "APIService" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "apiserverinfo.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "APIServerInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "List aggregated API server APIServices if kube-api-server version is vulnerable to CVE-2022-3172", + "remediation": "Upgrade the Kubernetes version to one of the fixed versions. The following versions are fixed: `v1.25.1`, `v1.24.5`, `v1.23.11`, `v1.22.14`", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-can-create-service-account-token", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user has create access to service account tokens\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"serviceaccounts/token\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can create service account tokens\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], "resources": [ "Role", "ClusterRole", @@ -18971,320 +33397,79 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "determines which users have create/delete permissions on services", + "description": "determines which users can create service account tokens", "remediation": "", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, - { - "guid": "", - "name": "if-the-kubelet-config.yaml-configuration-file-is-being-used-validate-permissions-set-to-600-or-more-restrictive", - "attributes": { - "armoBuiltin": true, - "hostSensorRule": "true" - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test \n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that if the kubelet refers to a configuration file with the `--config` argument, that file has permissions of 600 or more restrictive.", - "remediation": "Run the following command (using the config file location identied in the Audit step)\n\n \n```\nchmod 600 /var/lib/kubelet/config.yaml\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "containers-mounting-docker-socket", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volume := pod.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in pod: %v has mounting to Docker internals.\", [volume.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\t\n}\n\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volume := wl.spec.template.spec.volumes[i]\n\thost_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tvolume = wl.spec.jobTemplate.spec.template.spec.volumes[i]\n host_path := volume.hostPath\n is_docker_mounting(host_path)\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.volumes[%v].hostPath.path\", [format_int(i, 10)])\n msga := {\n\t\t\"alertMessage\": sprintf(\"volume: %v in %v: %v has mounting to Docker internals.\", [ volume.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertScore\": 5,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker.sock\"\n}\n\nis_docker_mounting(host_path) {\n\thost_path.path == \"/var/run/docker\"\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Check hostpath. If the path is set to /var/run/docker.sock or /var/lib/docker , the container has access to Docker internals - fail.", - "remediation": "", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-admission-control-plugin-AlwaysAdmit-is-not-set", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"admission control plugin AlwaysAdmit is enabled. This is equal to turning off all admission controllers\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\t\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\t\"AlwaysAdmit\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := [val | val := flag.values[j]; val != \"AlwaysAdmit\"]\n\tresult = get_retsult(fixed_values, i)\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) == 0\n\tresult = {\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_retsult(fixed_values, i) = result {\n\tcount(fixed_values) > 0\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": sprintf(\"--enable-admission-plugins=%v\", [concat(\",\", fixed_values)]),\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Do not allow all requests.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and either remove the `--enable-admission-plugins` parameter, or set it to a value that does not include `AlwaysAdmit`.\n\n#### Impact Statement\nOnly requests explicitly allowed by the admissions control plugins would be served.\n\n#### Default Value\n`AlwaysAdmit` is not in the list of default admission plugins.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-api-server-authorization-mode-argument-includes-RBAC", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"RBAC is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--authorization-mode=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"RBAC\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"RBAC\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--authorization-mode\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--authorization-mode\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--authorization-mode=RBAC\",\n\t\t}],\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "Turn on Role Based Access Control.", - "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--authorization-mode` parameter to a value that includes `RBAC`, for example:\n\n \n```\n--authorization-mode=Node,RBAC\n\n```\n\n#### Impact Statement\nWhen RBAC is enabled you will need to ensure that appropriate RBAC settings (including Roles, RoleBindings and ClusterRoleBindings) are configured to allow appropriate access.\n\n#### Default Value\nBy default, `RBAC` authorization is not enabled.", - "ruleQuery": "", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access", - "attributes": { - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[beggining_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) > 0\n}\n\nis_vulnerable_version(version) {\n version <= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n", - "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [\"\"],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [\"\"],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [\"\"],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version <= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version >= \"v1.22.0\"\n version <= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.21.0\"\n version <= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version >= \"v1.20.0\"\n version <= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}", - "ruleLanguage": "Rego", - "match": [ - { - "apiGroups": [ - "" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Pod", - "Node" - ] - }, - { - "apiGroups": [ - "apps" - ], - "apiVersions": [ - "v1" - ], - "resources": [ - "Deployment", - "ReplicaSet", - "DaemonSet", - "StatefulSet" - ] - }, - { - "apiGroups": [ - "batch" - ], - "apiVersions": [ - "*" - ], - "resources": [ - "Job", - "CronJob" - ] - } - ], - "ruleDependencies": [], - "configInputs": null, - "controlConfigInputs": null, - "description": "A user may be able to create a container with subPath volume mounts to access files & directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ", - "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.", - "ruleQuery": "armo_builtins", - "relevantCloudProviders": null - }, - { - "guid": "", - "name": "ensure-that-the-kubelet-service-file-ownership-is-set-to-root-root", - "attributes": { - "hostSensorRule": "true", - "armoBuiltin": true - }, - "creationTime": "", - "rule": "package armo_builtins\n\nimport data.cautils as cautils\nimport future.keywords.in\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]), \"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", - "resourceEnumerator": "", - "ruleLanguage": "Rego", - "match": [], - "dynamicMatch": [ - { - "apiGroups": [ - "hostdata.kubescape.cloud" - ], - "apiVersions": [ - "v1beta0" - ], - "resources": [ - "KubeletInfo" - ] - } - ], - "ruleDependencies": [ - { - "packageName": "cautils" - } - ], - "configInputs": null, - "controlConfigInputs": null, - "description": "Ensure that the `kubelet` service file ownership is set to `root:root`.", - "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", - "ruleQuery": "", - "relevantCloudProviders": null - }, { "guid": "", "name": "resources-memory-limit-and-request", - "attributes": { - "armoBuiltin": true - }, "creationTime": "", - "rule": "package armo_builtins\n\n# Fails if pod does not have container with memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot request_or_limit_memory(container)\n\tfixPaths := [\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t\t{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"},\n\t]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\nrequest_or_limit_memory(container) {\n\tcontainer.resources.limits.memory\n\tcontainer.resources.requests.memory\n}\n\n######################################################################################################\n\n# Fails if pod exceeds memory-limit or request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob exceeds memory-limit or request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\trequest_or_limit_memory(container)\n\tresource := is_min_max_exceeded_memory(container)\n\tresource != \"\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), resource])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n######################################################################################################\n\nis_min_max_exceeded_memory(container) = \"resources.limits.memory\" {\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n} else = \"resouces.requests.memory\" {\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n} else = \"\" {\n\ttrue\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_min_limit_exceeded_memory(memory_limit)\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_max_limit_exceeded_memory(memory_limit)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_max_request_exceeded_memory(memory_req)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_min_request_exceeded_memory(memory_req)\n}\n\n# helpers\n\nis_max_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_max :=data.postureControlInputs.memory_limit_max[_]\n\tcompare_max(memory_limit_max, memory_limit)\n}\n\nis_min_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_min := data.postureControlInputs.memory_limit_min[_]\n\tcompare_min(memory_limit_min, memory_limit)\n}\n\nis_max_request_exceeded_memory(memory_req) {\n\tmemory_req_max := data.postureControlInputs.memory_request_max[_]\n\tcompare_max(memory_req_max, memory_req)\n}\n\nis_min_request_exceeded_memory(memory_req) {\n\tmemory_req_min := data.postureControlInputs.memory_request_min[_]\n\tcompare_min(memory_req_min, memory_req)\n}\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given > split_max\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tsplit_given < split_min\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tgiven < min\n}\n\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n", + "rule": "package armo_builtins\n\n# ================================== no memory limits ==================================\n# Fails if pod does not have container with memory-limits\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory-limits\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.limits.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# ================================== no memory requests ==================================\n# Fails if pod does not have container with memory requests\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have memory-limit or request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob does not have container with memory requests\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot container.resources.requests.memory\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.requests.memory\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have memory-limit or request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n\n# ============================================= memory requests exceed min/max =============================================\n\n# Fails if pod exceeds memory request\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n\tpath := \"resources.requests.memory\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds memory request\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload exceeds memory request\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n\tpath := \"resources.requests.memory\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob exceeds memory request\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tmemory_req := container.resources.requests.memory\n\tis_req_exceeded_memory(memory_req)\n\tpath := \"resources.requests.memory\" \n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory request\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# ============================================= memory limits exceed min/max =============================================\n\n# Fails if pod exceeds memory-limit \ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n\tpath := \"resources.limits.memory\"\n\n\tfailed_paths := sprintf(\"spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v exceeds memory-limit \", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\n# Fails if workload exceeds memory-limit \ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n\tpath := \"resources.limits.memory\"\n\n\tfailed_paths := sprintf(\"spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n# Fails if cronjob exceeds memory-limit \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tmemory_limit := container.resources.limits.memory\n\tis_limit_exceeded_memory(memory_limit)\n\tpath := \"resources.limits.memory\"\n\n\tfailed_paths := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].%v\", [format_int(i, 10), path])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v exceeds memory-limit\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [failed_paths],\n\t\t\"failedPaths\": [failed_paths],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\n######################################################################################################\n\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_min_limit_exceeded_memory(memory_limit)\n}\n\nis_limit_exceeded_memory(memory_limit) {\n\tis_max_limit_exceeded_memory(memory_limit)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_max_request_exceeded_memory(memory_req)\n}\n\nis_req_exceeded_memory(memory_req) {\n\tis_min_request_exceeded_memory(memory_req)\n}\n\n# helpers\n\nis_max_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_max := data.postureControlInputs.memory_limit_max[_]\n\tcompare_max(memory_limit_max, memory_limit)\n}\n\nis_min_limit_exceeded_memory(memory_limit) {\n\tmemory_limit_min := data.postureControlInputs.memory_limit_min[_]\n\tcompare_min(memory_limit_min, memory_limit)\n}\n\nis_max_request_exceeded_memory(memory_req) {\n\tmemory_req_max := data.postureControlInputs.memory_request_max[_]\n\tcompare_max(memory_req_max, memory_req)\n}\n\nis_min_request_exceeded_memory(memory_req) {\n\tmemory_req_min := data.postureControlInputs.memory_request_min[_]\n\tcompare_min(memory_req_min, memory_req)\n}\n\n\n##############\n# helpers\n\n# Compare according to unit - max\ncompare_max(max, given) {\n\tendswith(max, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_max := split(max, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n to_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"M\")\n\tendswith(given, \"M\")\n\tsplit_max := split(max, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n to_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tendswith(max, \"m\")\n\tendswith(given, \"m\")\n\tsplit_max := split(max, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n to_number(split_given) > to_number(split_max)\n}\n\ncompare_max(max, given) {\n\tnot is_special_measure(max)\n\tnot is_special_measure(given)\n\tgiven > max\n}\n\n################\n# Compare according to unit - min\ncompare_min(min, given) {\n\tendswith(min, \"Mi\")\n\tendswith(given, \"Mi\")\n\tsplit_min := split(min, \"Mi\")[0]\n\tsplit_given := split(given, \"Mi\")[0]\n\tto_number(split_given) < to_number(split_min)\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"M\")\n\tendswith(given, \"M\")\n\tsplit_min := split(min, \"M\")[0]\n\tsplit_given := split(given, \"M\")[0]\n\tto_number(split_given) < to_number(split_min)\n\n}\n\ncompare_min(min, given) {\n\tendswith(min, \"m\")\n\tendswith(given, \"m\")\n\tsplit_min := split(min, \"m\")[0]\n\tsplit_given := split(given, \"m\")[0]\n\tto_number(split_given) < to_number(split_min)\n\n}\n\ncompare_min(min, given) {\n\tnot is_special_measure(min)\n\tnot is_special_measure(given)\n\tto_number(given) < to_number(min)\n\n}\n\n# Check that is same unit\nis_special_measure(unit) {\n\tendswith(unit, \"m\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"M\")\n}\n\nis_special_measure(unit) {\n\tendswith(unit, \"Mi\")\n}\n", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ { "apiGroups": [ - "*" + "" ], "apiVersions": [ - "*" + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" ], "resources": [ "Deployment", "ReplicaSet", "DaemonSet", - "StatefulSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ "Job", - "Pod", "CronJob" ] } ], "ruleDependencies": [], - "configInputs": [ - "settings.postureControlInputs.memory_request_max", - "settings.postureControlInputs.memory_request_min", - "settings.postureControlInputs.memory_limit_max", - "settings.postureControlInputs.memory_limit_min" - ], "controlConfigInputs": [ { "path": "settings.postureControlInputs.memory_request_max", "name": "memory_request_max", - "description": "Ensure memory max requests are set" + "description": "Ensure a memory resource request is set and is under this defined maximum value." }, { "path": "settings.postureControlInputs.memory_request_min", "name": "memory_request_min", - "description": "Ensure memory min requests are set" + "description": "Ensure a memory resource request is set and is above this defined minimum value." }, { "path": "settings.postureControlInputs.memory_limit_max", "name": "memory_limit_max", - "description": "Ensure memory max limits are set" + "description": "Ensure a memory resource limit is set and is under this defined maximum value." }, { "path": "settings.postureControlInputs.memory_limit_min", "name": "memory_limit_min", - "description": "Ensure memory min limits are set" + "description": "Ensure a memory resource limit is set and is under this defined maximum value." } ], "description": "memory limits and requests are not set.", @@ -19294,13 +33479,10 @@ }, { "guid": "", - "name": "etcd-peer-tls-enabled", - "attributes": { - "armoBuiltin": true - }, + "name": "ensure-that-the-controller-manager-bind-address-argument-is-set-to-127.0.0.1", "creationTime": "", - "rule": "package armo_builtins\n\n# Check if peer tls is enabled in etcd cluster\ndeny[msga] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Etcd encryption for peer connection is not enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--peer-cert-file\", \"\"],\n\t\t[\"--peer-key-file\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [\"spec.containers[0].command\"],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", - "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_etcd_pod(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_etcd_pod(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tcount(obj.spec.containers) == 1\n\tendswith(split(obj.spec.containers[0].command[0], \" \")[0], \"etcd\")\n}\n", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\n\tmsg := {\n\t\t\"alertMessage\": \"the Controller Manager API service is not bound to a localhost interface only\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n\nget_flag_value(cmd) = value {\n\tre := \" ?--bind-address=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, 1)\n\tcount(matchs) == 1\n\tvalue =matchs[0][1]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tval = get_flag_value(cmd[i])\n\tval != \"127.0.0.1\"\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--bind-address\")\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--bind-address=127.0.0.1\"}],\n\t}\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_controller_manager(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_controller_manager(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-controller-manager\")\n}\n", "ruleLanguage": "Rego", "match": [ { @@ -19316,21 +33498,335 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "etcd should be configured to make use of TLS encryption for peer connections.", - "remediation": "Follow the etcd service documentation and configure peer TLS encryption as appropriate for your etcd cluster.\n\n Then, edit the etcd pod specification file `/etc/kubernetes/manifests/etcd.yaml` on the master node and set the below parameters.\n\n \n```\n--peer-client-file=\n--peer-key-file=\n\n```\n\n#### Impact Statement\netcd cluster peers would need to set up TLS for their communication.\n\n#### Default Value\n**Note:** This recommendation is applicable only for etcd clusters. If you are using only one etcd server in your environment then this recommendation is not applicable.\n\n By default, peer communication over TLS is not configured.", + "description": "Do not bind the Controller Manager service to non-loopback insecure addresses.", + "remediation": "Edit the Controller Manager pod specification file `/etc/kubernetes/manifests/kube-controller-manager.yaml` on the Control Plane node and ensure the correct value for the `--bind-address` parameter\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, the `--bind-address` parameter is set to 0.0.0.0", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "validate-kubelet-tls-configuration-updated", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n# CIS 4.2.10 https://workbench.cisecurity.org/sections/1126668/recommendations/1838657\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) != 0\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\tres := not_set_arguments(command)\n\tcount(res) == 2\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tpropsResult := not_set_props(yamlConfig)\n\tcount(propsResult) != 0\n\n\tfailed_props := extract_failed_object(propsResult, \"configProp\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v must be set\", [failed_props]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--config\")\n\n\t# only 1 argument is set via cli\n\tres := not_set_arguments(command)\n\tcount(res) == 1\n\n\t# get yaml config equivalent\n\tnot_set_prop := res[0].configProp\n\n\tfailed_args := extract_failed_object(res, \"cliArg\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\n\tnot yamlConfig[not_set_prop]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v should be set\", [failed_args]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\nextract_failed_object(resultList, keyField) = failed_objects {\n\tfailed_objects_array = [mapped |\n\t\tsingleResult := resultList[_]\n\t\tmapped := singleResult[keyField]\n\t]\n\n\tfailed_objects = concat(\", \", failed_objects_array)\n}\n\nnot_set_arguments(cmd) = result {\n\twanted = [\n\t\t[\"--tls-cert-file\", \"tlsCertFile\"],\n\t\t[\"--tls-private-key-file\", \"tlsPrivateKeyFile\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][0],\n\t\t\"configProp\": wanted[i][1],\n\t} |\n\t\tnot contains(cmd, wanted[i][0])\n\t]\n}\n\nnot_set_props(yamlConfig) = result {\n\twanted = [\n\t\t[\"tlsCertFile\", \"--tls-cert-file\"],\n\t\t[\"tlsPrivateKeyFile\", \"--tls-private-key-file\"],\n\t]\n\n\tresult = [{\n\t\t\"cliArg\": wanted[i][1],\n\t\t\"configProp\": wanted[i][0],\n\t} |\n\t\tnot yamlConfig[wanted[i][0]]\n\t]\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletConfiguration", + "KubeletCommandLine" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate.", + "remediation": "Start the kubelet with the --tls-cert-file and --tls-private-key-file flags, providing the X509 certificate and its matching private key or if using config file set tlsCertFile and tlsPrivateKeyFile properties to the locations of the corresponding files.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "workload-mounted-pvc", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.persistentVolumeClaim\n\n\tPVC := input[_]\n\tPVC.kind == \"PersistentVolumeClaim\"\n\tPVC.metadata.name == volume.persistentVolumeClaim.claimName\n\tis_same_namespace(PVC.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted PVC\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": PVC\n }]\n\t}\n}\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "fails if workload mounts PVC", + "remediation": "", "ruleQuery": "armo_builtins", "relevantCloudProviders": null }, { "guid": "", - "name": "user-id-less-than-thousands", + "name": "CVE-2022-0185", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n\n parsed_kernel_version_arr := parse_kernel_version_to_array(node.status.nodeInfo.kernelVersion)\n is_azure := parsed_kernel_version_arr[4] == \"azure\"\n\n is_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure)\n\n node.status.nodeInfo.operatingSystem == \"linux\"\n path := \"status.nodeInfo.kernelVersion\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n data_userns_clones := [linux_kernel_var | linux_kernel_var = linux_kernel_vars_for_node[_].data[_]; is_unprivileged_userns_clone_enabled(linux_kernel_var)]\n count(data_userns_clones) > 0\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n \"reviewPaths\": [\"kernelVersion\"],\n\t\t\t\"failedPaths\": [\"kernelVersion\"],\n \"fixPaths\":[],\n\t}\n}\n\n# General Kernel versions are between 5.1.1 and 5.16.2\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == false\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 16\n parsed_kernel_version_arr[2] < 2\n}\n\n# Azure kernel version with is 5.4.0-1067-azure\nis_vulnerable_kernel_version(parsed_kernel_version_arr, is_azure) {\n is_azure == true\n parsed_kernel_version_arr[0] == 5\n parsed_kernel_version_arr[1] >= 1\n parsed_kernel_version_arr[1] <= 4\n parsed_kernel_version_arr[2] == 0\n parsed_kernel_version_arr[3] < 1067\n}\n\nis_unprivileged_userns_clone_enabled(linux_kernel_var) {\n\tlinux_kernel_var.key == \"unprivileged_userns_clone\"\n linux_kernel_var.value == \"1\\n\"\n}\n\nparse_kernel_version_to_array(kernel_version_str) = output {\n\tversion_triplet := regex.find_n(`(\\d+\\.\\d+\\.\\d+)`, kernel_version_str,-1)\n version_triplet_array := split(version_triplet[0],\".\")\n\n build_vendor := regex.find_n(`-(\\d+)-(\\w+)`, kernel_version_str,-1)\n build_vendor_array := split(build_vendor[0],\"-\")\n\n output := [to_number(version_triplet_array[0]),to_number(version_triplet_array[1]),to_number(version_triplet_array[2]),to_number(build_vendor_array[1]),build_vendor_array[2]]\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tnode := input[_]\n node.kind == \"Node\"\n \n node.status.nodeInfo.operatingSystem == \"linux\"\n\n linux_kernel_vars := [linux_kernel_var | linux_kernel_var = input[_]; linux_kernel_var.kind == \"LinuxKernelVariables\"]\n linux_kernel_vars_for_node := [linux_kernel_var | linux_kernel_var = linux_kernel_vars[_]; linux_kernel_var.metadata.name == node.metadata.name]\n\n external_vector := {\n \"name\": node.metadata.name,\n\t\t\"namespace\": \"\",\n\t\t\"kind\": node.kind,\n\t\t\"relatedObjects\": linux_kernel_vars_for_node,\n \"kernelVersion\": node.status.nodeInfo.kernelVersion\n }\n\n \tmsga := {\n\t\t\t\"alertMessage\": \"You are vulnerable to CVE-2022-0185\",\n \t\t\"alertObject\": {\n \"externalObjects\": external_vector\n },\n\t\t\t\"failedPaths\": [],\n \"fixPaths\":[],\n\t}\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "LinuxKernelVariables" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-can-portforward-v1", "attributes": { - "armoBuiltin": true + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" }, "creationTime": "", - "rule": "package armo_builtins\n\n# TODO - FIX FAILED PATHS IF THE CONTROL WILL BE ACTIVE AGAIN\n\n# Fails if pod has container configured to run with id less than 1000\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_root_container(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v runs with id less than 1000\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has container configured to run with id less than 1000\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"\"\n result := is_root_pod(pod, container, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v runs with id less than 1000\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n\n# Fails if workload has container configured to run with id less than 1000\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_root_container(container, beggining_of_path, i)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v runs with id less than 1000\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload has container configured to run with id less than 1000\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.\"\n result := is_root_pod(wl.spec.template, container, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v runs with id less than 1000\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container configured to run with id less than 1000\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_root_container(container, beggining_of_path, i)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v runs with id less than 1000\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n# Fails if workload has container configured to run with id less than 1000\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.\"\n result := is_root_pod(wl.spec.jobTemplate.spec.template, container, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v runs with id less than 1000\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nis_root_pod(pod, container, beggining_of_path) = path {\n\tnot container.securityContext.runAsGroup\n not container.securityContext.runAsUser\n pod.spec.securityContext.runAsUser < 1000\n\tnot pod.spec.securityContext.runAsGroup\n\tpath = sprintf(\"%vspec.securityContext.runAsUser\", [beggining_of_path])\n}\n\nis_root_pod(pod, container, beggining_of_path) = path {\n\tnot container.securityContext.runAsUser\n not container.securityContext.runAsGroup\n pod.spec.securityContext.runAsGroup < 1000\n\tnot pod.spec.securityContext.runAsUser\n\tpath = sprintf(\"%vspec.securityContext.runAsGroup\", [beggining_of_path])\n}\n\nis_root_pod(pod, container, beggining_of_path) = path {\n pod.spec.securityContext.runAsGroup > 1000\n\t pod.spec.securityContext.runAsUser < 1000\n\tpath = sprintf(\"%vspec.securityContext.runAsUser\", [beggining_of_path])\n}\n\nis_root_pod(pod, container, beggining_of_path) = path {\n pod.spec.securityContext.runAsGroup < 1000\n\tpod.spec.securityContext.runAsUser > 1000\n\tpath = sprintf(\"%vspec.securityContext.runAsGroup\", [beggining_of_path])\n}\n\nis_root_pod(pod, container, beggining_of_path) = path {\n pod.spec.securityContext.runAsGroup < 1000\n\t pod.spec.securityContext.runAsUser < 1000\n\tpath = sprintf(\"%vspec.securityContext\", [beggining_of_path])\n}\n\n\nis_root_container(container, beggining_of_path, i) = path {\n container.securityContext.runAsUser < 1000\n\tnot container.securityContext.runAsGroup\n\tpath = sprintf(\"%vcontainers[%v].securityContext.runAsUser\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_root_container(container, beggining_of_path, i) = path {\n container.securityContext.runAsGroup < 1000\n\tnot container.securityContext.runAsUser\n\tpath = sprintf(\"%vcontainers[%v].securityContext.runAsGroup\", [beggining_of_path, format_int(i, 10)])\n}", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/portforward\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can do port forwarding\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "persistentvolumeclaim-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PersistentVolumeClaim" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "instance-metadata-api-access", + "attributes": { + "m$K8sThreatMatrix": "Credential Access::Instance Metadata API", + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "cloudProviderInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Checks if there is access from the nodes to cloud prividers instance metadata services", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-excessive-delete-rights-v1", + "attributes": { + "m$K8sThreatMatrix": "Impact::Data Destruction", + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if user can delete important resources", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "k8s-audit-logs-enabled-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) < 1\n\tpath := \"spec.containers[0].command\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\n\t\t}\n\t}\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-service-account-extend-token-expiration-is-set-to-false", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"service account token expiration extension is enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--service-account-extend-token-expiration=true\")\n\tfixed = replace(cmd[i], \"--service-account-extend-token-expiration=true\", \"--service-account-extend-token-expiration=false\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": fixed}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--service-account-extend-token-expiration\")\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--service-account-extend-token-expiration=false\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Ensure that the --service-account-extend-token-expiration parameter is set to false to use shorter token lifetimes.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the below parameter.\n\n```\n--service-account-extend-token-expiration=false\n```\n\n#### Impact Statement\nService account tokens will expire according to the default shorter lifetime. Workloads using long-lived tokens may need to be updated to handle token refresh.\n\n#### Default Value\nBy default, `--service-account-extend-token-expiration` is set to `true`.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "set-seccomp-profile", + "creationTime": "", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n spec := wl.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n spec := wl.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_search := [\"securityContext\", \"seccompProfile\"]\n\tseccompProfile_not_defined(spec, path_to_search)\n\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n seccompProfile_not_defined(container, path_to_search)\n\n\tfix_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]) \n\tfixPaths := [{\"path\": fix_path, \"value\": \"YOUR_VALUE\"}]\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nseccompProfile_not_defined(spec, path_to_search){\n\tobject.get(spec, path_to_search, \"\") == \"\"\n}", "resourceEnumerator": "", "ruleLanguage": "Rego", "match": [ @@ -19373,23 +33869,1922 @@ } ], "ruleDependencies": [], - "configInputs": null, "controlConfigInputs": null, - "description": "fails if container can run as high user (id less than 1000)", - "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000.", + "description": "fails if container does not define seccompProfile", + "remediation": "Make sure you define seccompProfile at workload or container lever.", "ruleQuery": "armo_builtins", "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-kubelet-service-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"serviceFiles\"]\n\tfiles := object.get(obj, file_obj_path, false)\n\tfile := files[file_index]\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tsprintf(\"%s/%d\", [concat(\"/\", file_obj_path), file_index]),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the `kubelet` service file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 /etc/systemd/system/kubelet.service.d/kubeadm.conf\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "nginx-ingress-snippet-annotation-vulnerability", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(`[0-9]+\\.[0-9]+\\.[0-9]+`, image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable\n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) < 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] < 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag <= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag <= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag <= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag <= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) < 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "*" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Deployment", + "ConfigMap" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-kubelet-certificate-authority-argument-is-set-as-appropriate", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"TLS certificate authority file is not specified\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--kubelet-certificate-authority\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--kubelet-certificate-authority=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Verify kubelet's certificate before establishing connection.", + "remediation": "Follow the Kubernetes documentation and setup the TLS connection between the apiserver and kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--kubelet-certificate-authority` parameter to the path to the cert file for the certificate authority.\n\n \n```\n--kubelet-certificate-authority=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, `--kubelet-certificate-authority` argument is not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "kubelet-hostname-override", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.8 https://workbench.cisecurity.org/sections/1126668/recommendations/1838654\n\ndeny[msga] {\n\tkubelet_info := input[_]\n\tkubelet_info.kind == \"KubeletInfo\"\n\tkubelet_info.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tcommand := kubelet_info.data.cmdLine\n\n\tcontains(command, \"--hostname-override\")\n\n\texternal_obj := json.filter(kubelet_info, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Argument --hostname-override is set.\",\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Ensure that the --hostname-override argument is not set.", + "remediation": "Unset the --hostname-override argument.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "k8s-common-labels-usage", + "creationTime": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_K8s_label_or_no_K8s_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pod the kubernetes common labels are not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v the kubernetes common labels are is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_K8s_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs the kubernetes common labels are not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n\n# There is no label-usage in WL and also for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath2 := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n\tpath := no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_K8s_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_K8s_label_or_no_K8s_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_K8s_label_or_no_K8s_label_usage(wl, \"\")\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_K8s_label_or_no_K8s_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot all_kubernetes_labels(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nall_kubernetes_labels(labels){\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.k8sRecommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.k8sRecommendedLabels", + "name": "Kubernetes Recommended Labels", + "description": "Kubescape checks that workloads have at least one of this list of configurable labels, as recommended in the Kubernetes documentation." + } + ], + "description": "Check if the list of label that start with app.kubernetes.io/ are defined.", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-certificate-authorities-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"clientCAFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the certificate authorities file has permissions of `600` or more restrictive.", + "remediation": "Run the following command to modify the file permissions of the `--client-ca-file`\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "psp-deny-hostipc", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostIPC set to true\n\t# if even one PSP has hostIPC set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostIPC == true\n\t}\n\n\t# return al the PSPs that have hostIPC set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostIPC == true\n\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostIPC set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "has-image-signature", + "attributes": { + "useFromKubescapeVersion": "v2.1.3" + }, + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n failedPath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\twl_kinds[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n not cosign.has_signature(container.image)\n\n\tfailedPath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"image: %v is not signed\", [ container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [failedPath],\n\t\t\"failedPaths\": [failedPath],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Ensures that all images contain some signature", + "remediation": "Replace the image with a signed image", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-admission-control-plugin-NodeRestriction-is-set", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"NodeRestriction is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\nget_flag_values(cmd) = {\"origin\": origin, \"values\": values} {\n\tre := \" ?--enable-admission-plugins=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd, -1)\n\tcount(matchs) == 1\n\tvalues := [val | val := split(matchs[0][1], \",\")[j]; val != \"\"]\n\torigin := matchs[0][0]\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tflag := get_flag_values(cmd[i])\n\n\t# value check\n\tnot \"NodeRestriction\" in flag.values\n\n\t# get fixed and failed paths\n\tfixed_values := array.concat(flag.values, [\"NodeRestriction\"])\n\tfixed_flag = sprintf(\"%s=%s\", [\"--enable-admission-plugins\", concat(\",\", fixed_values)])\n\tfixed_cmd = replace(cmd[i], flag.origin, fixed_flag)\n\tpath := sprintf(\"spec.containers[0].command[%d]\", [i])\n\n\tresult := {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed_cmd,\n\t\t}],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd := concat(\" \", cmd)\n\tnot contains(full_cmd, \"--enable-admission-plugins\")\n\n\tpath = sprintf(\"spec.containers[0].command[%d]\", [count(cmd)])\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": \"--enable-admission-plugins=NodeRestriction\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Limit the `Node` and `Pod` objects that a kubelet could modify.", + "remediation": "Follow the Kubernetes documentation and configure `NodeRestriction` plug-in on kubelets. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the `--enable-admission-plugins` parameter to a value that includes `NodeRestriction`.\n\n \n```\n--enable-admission-plugins=...,NodeRestriction,...\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, `NodeRestriction` is not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-scheduler-pod-specification-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"schedulerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the scheduler pod specification file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/manifests/kube-scheduler.yaml\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "restrict-access-to-the-control-plane-endpoint", + "attributes": { + "hostSensorRule": "false", + "imageScanRelated": false + }, + "creationTime": "", + "rule": "\npackage armo_builtins\n\n# fails in case authorizedIPRanges is not set.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isAuthorizedIPRangesSet(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Parameter 'authorizedIPRanges' was not set.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks update -n '' -g '' --api-server-authorized-ip-ranges '0.0.0.0/32'\",\n \t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n }\n }\n\n}\n\nisAuthorizedIPRangesSet(config) {\n\tcount(config.properties.apiServerAccessProfile.authorizedIPRanges) > 0\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": null, + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "Enable Endpoint Private Access to restrict access to the cluster's control plane to only an allowlist of authorized IPs.", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-encryption-providers-are-appropriately-configured", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Encryption config is set but not using one of the recommended providers\ndeny[msg] {\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\tconfig_file := obj.data.APIServerInfo.encryptionProviderConfigFile\n\tconfig_file_content = decode_config_file(base64.decode(config_file.content))\n\n\t# For each resource check if it does not have allowed provider\n\tfix_paths := [{\n\t\t\"path\": sprintf(\"resources[%d].providers[%d]\", [i, count(resource.providers)]),\n\t\t\"value\": \"{\\\"aescbc\\\" | \\\"secretbox\\\" | \\\"kms\\\" : }\", # must be string\n\t} |\n\t\tresource := config_file_content.resources[i]\n\t\tcount({true |\n\t\t\tsome provider in resource.providers\n\t\t\thas_one_of_keys(provider, [\"aescbc\", \"secretbox\", \"kms\"])\n\t\t}) == 0\n\t]\n\n\tcount(fix_paths) > 0\n\n\t# Add name to the failed object so that\n\t# it fit the format of the alert object\n\tfailed_obj := json.patch(config_file_content, [{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"name\",\n\t\t\"value\": \"encryption-provider-config\",\n\t}])\n\n\tmsg := {\n\t\t\"alertMessage\": \"Encryption provider config is not using one of the allowed providers (aescbc, secretbox, kms)\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": failed_obj},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\ndecode_config_file(content) := parsed {\n\tparsed := yaml.unmarshal(content)\n} else := json.unmarshal(content)\n\nhas_key(x, k) {\n\t_ = x[k]\n}\n\nhas_one_of_keys(x, keys) {\n\thas_key(x, keys[_])\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "Where `etcd` encryption is used, appropriate providers should be configured.", + "remediation": "Follow the Kubernetes documentation and configure a `EncryptionConfig` file. In this file, choose `aescbc`, `kms` or `secretbox` as the encryption provider.\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, no encryption provider is set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-image-scanning-enabled-cloud", + "creationTime": "", + "rule": "package armo_builtins\nimport future.keywords.in\n\n# Check if image scanning enabled for EKS\ndeny[msga] {\n\tdescribe_repositories := input[_]\n\tdescribe_repositories.apiVersion == \"eks.amazonaws.com/v1\"\n\tdescribe_repositories.kind == \"DescribeRepositories\"\n\tdescribe_repositories.metadata.provider == \"eks\"\n\trepos := describe_repositories.data.Repositories\n\tsome repo in repos\n\tnot image_scanning_configured(repo)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": \"image scanning is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": describe_repositories,\n\t\t},\n\t}\n}\n\nimage_scanning_configured(repo) {\n\trepo.ImageScanningConfiguration.ScanOnPush == true\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": null, + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "DescribeRepositories" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": [ + "EKS" + ] + }, + { + "guid": "", + "name": "ensure-that-the-API-server-pod-specification-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"APIServerInfo\", \"specsFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the API server pod specification file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchown root:root /etc/kubernetes/manifests/kube-apiserver.yaml\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-aws-policies-are-present", + "creationTime": "", + "rule": "package armo_builtins\n\n# deny if policies are not present on AWS\ndeny[msg] {\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\tmsg := {\n\t\t\"alertMessage\": \"Cluster has not policies to minimize access to Amazon ECR; Add some policy in order to minimize access on it.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": policies\n\t\t}\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": null, + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if aws policies are not found", + "remediation": "Implement policies to minimize user access to Amazon ECR", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": [ + "EKS" + ] + }, + { + "guid": "", + "name": "CVE-2022-24348", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tis_vulnerable_image(image)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"reviewPaths\": [path],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n\nis_vulnerable_image(image) {\n\tversion := split(image, \":v\")[1]\n\tversionTriplet := split(version, \".\")\n\tcount(versionTriplet) == 3\n\tmajor_version := to_number(versionTriplet[0])\n\tminorVersion := to_number(versionTriplet[1])\n\tsubVersion := to_number(versionTriplet[2]) \n\tisVulnerableVersion(major_version,minorVersion,subVersion)\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 1\n} \n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 0\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 1\n\tsubVersion < 9\n}\n\nisVulnerableVersion(major_version, minorVersion, subVersion) {\n\tmajor_version == 2\n\tminorVersion == 2\n\tsubVersion < 4\n}\t\n\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tcontains(image, \"argocd:v\")\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n msga := {\n\t\t\t\"alertMessage\": \"You may be vulnerable to CVE-2022-24348\",\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": { \n \"k8SApiObjects\": [deployment]\n },\n\t\t}\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "a", + "remediation": "a", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-privilege-escalation", + "attributes": { + "mitre": "Privilege Escalation", + "mitreCode": "TA0004", + "m$K8sThreatMatrix": "Privilege Escalation::privileged container" + }, + "creationTime": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n# privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tstart_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n# handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tstart_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, start_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"deletePaths\": path,\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) > 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) < 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite && securityContext.privileged == true\nisPrivilegedContainer(container, i, start_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [start_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) > 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [start_of_path, format_int(i, 10)])])\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines if pods/deployments defined as privileged true", + "remediation": "avoid defining pods as privilleged", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "workload-mounted-configmap", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\tresource := input[_]\n\tvolumes_path := get_volumes_path(resource)\n\tvolumes := object.get(resource, volumes_path, [])\n\tvolume := volumes[i]\n\tvolume.configMap\n\n\tconfigMap := input[_]\n\tconfigMap.kind == \"ConfigMap\"\n\tconfigMap.metadata.name == volume.configMap.name\n\tis_same_namespace(configMap.metadata, resource.metadata)\n\n\tcontainers_path := get_containers_path(resource)\n\tcontainers := object.get(resource, containers_path, [])\n\tcontainer := containers[j]\n\tcontainer.volumeMounts\n\n \t# check if volume is mounted\n\tcontainer.volumeMounts[k].name == volume.name\n\n\tfailedPaths := sprintf(\"%s[%d].volumeMounts[%d]\", [concat(\".\", containers_path), j, k])\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has mounted configMap\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [failedPaths],\n\t\t\"failedPaths\": [failedPaths],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t},\n \"relatedObjects\": [{\n \"object\": configMap\n }]\n\t}\n}\n\n\n\n# get_containers_path - get resource containers paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_containers_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"Pod\"\nget_containers_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"containers\"]\n}\n\n# get_containers_path - get resource containers paths for \"CronJob\"\nget_containers_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n}\n\n# get_volume_path - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes_path(resource) := result {\n\tresource_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresource_kinds[resource.kind]\n\tresult = [\"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"Pod\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"Pod\"\n\tresult = [\"spec\", \"volumes\"]\n}\n\n# get_volumes_path - get resource volumes paths for \"CronJob\"\nget_volumes_path(resource) := result {\n\tresource.kind == \"CronJob\"\n\tresult = [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"volumes\"]\n}\n\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod", + "ConfigMap" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "fails if workload mounts ConfigMaps", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-default-service-accounts-has-only-default-roles", + "creationTime": "", + "rule": "package armo_builtins\n\n\n# deny if a default ServiceAccount has rules bound to it that are not defaults. \ndeny[msga] {\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"RoleBinding\", \"ClusterRoleBinding\"}\n\tspec_template_spec_patterns[wl.kind]\n\n # filter service accounts\n wl.subjects[i].kind == \"ServiceAccount\"\n\n # filter defaults\n wl.subjects[i].name == \"default\"\n\n not wl.metadata.labels[\"kubernetes.io/bootstrapping\"] == \"rbac-defaults\"\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %v has for ServiceAccount 'default' rules bound to it that are not defaults\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"deletePaths\": [sprintf(\"subjects[%d]\", [i])],\n \"failedPaths\": [sprintf(\"subjects[%d]\", [i])],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "set-seccomp-profile-RuntimeDefault", + "creationTime": "", + "rule": "package armo_builtins\n\n# Fails if pod does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n wl.kind == \"Pod\"\n wl_spec := wl.spec\n\tpath_to_containers := [\"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl_spec := wl.spec.template.spec\n\tpath_to_containers := [\"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if CronJob does not define seccompProfile as RuntimeDefault\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n wl_spec := wl.spec.jobTemplate.spec.template.spec\n\tpath_to_containers := [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\", \"containers\"]\n\tcontainers := object.get(wl, path_to_containers, [])\n\tcontainer := containers[i]\n\t\n\tpath_to_search := [\"securityContext\", \"seccompProfile\", \"type\"]\n\n\tseccompProfile_result := get_seccompProfile_definition(wl_spec, container, i, path_to_containers, path_to_search)\n\tseccompProfile_result.failed == true\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v does not define seccompProfile as RuntimeDefault\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": seccompProfile_result.failed_path,\n\t\t\"failedPaths\": seccompProfile_result.failed_path,\n\t\t\"fixPaths\": seccompProfile_result.fix_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# container definition takes precedence\nget_seccompProfile_definition(wl, container, i, path_to_containers, path_to_search) = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type == \"RuntimeDefault\"\n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\tcontainer.securityContext.seccompProfile.type != \"RuntimeDefault\"\n failed_path := sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type == \"RuntimeDefault\" \n seccompProfile_result := {\"failed\": false, \"failed_path\": [], \"fix_path\": []}\n\n} else = seccompProfile_result {\n\twl.securityContext.seccompProfile.type != \"RuntimeDefault\" \n\tfailed_path := sprintf(\"%s.%s\", [trim_suffix(concat(\".\", path_to_containers), \".containers\"), concat(\".\", path_to_search)])\n seccompProfile_result := {\"failed\": true, \"failed_path\": [failed_path], \"fix_path\": []}\n\n} else = seccompProfile_result{\n\tfix_path := [{\"path\": sprintf(\"%s[%d].%s\", [concat(\".\", path_to_containers), i, concat(\".\", path_to_search)]), \"value\":\"RuntimeDefault\"}]\n\tseccompProfile_result := {\"failed\": true, \"failed_path\": [], \"fix_path\": fix_path}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "fails if container does not define seccompProfile as RuntimeDefault", + "remediation": "Make sure you define seccompProfile as RuntimeDefault at workload or container lever.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "exposed-critical-pods", + "attributes": { + "m$K8sThreatMatrix": "exposed-critical-pods", + "imageScanRelated": true + }, + "creationTime": "", + "rule": "package armo_builtins\n\n# regal ignore:rule-length\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n\n container.image == vuln.metadata.name\n\n # At least one critical vulnerabilities\n filter_critical_vulnerabilities(vuln)\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n\t\t\"reviewPaths\": [path],\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_critical_vulnerabilities(vuln) {\n data := vuln.data[_]\n data.severity == \"Critical\"\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", + "resourceEnumerator": "package armo_builtins\n\n# regal ignore:rule-length\ndeny[msga] {\n services := [ x | x = input[_]; x.kind == \"Service\" ]\n pods := [ x | x = input[_]; x.kind == \"Pod\" ]\n vulns := [ x | x = input[_]; x.kind == \"ImageVulnerabilities\"]\n\n pod := pods[_]\n service := services[_]\n vuln := vulns[_]\n\n # vuln data is relevant\n count(vuln.data) > 0\n\n # service is external-facing\n filter_external_access(service)\n\n # pod has the current service\n service_to_pod(service, pod) > 0\n\n # get container image name\n container := pod.spec.containers[i]\n\n # image has vulnerabilities\n container.image == vuln.metadata.name\n\n related_objects := [pod, vuln]\n\n path := sprintf(\"status.containerStatuses[%v].imageID\", [format_int(i, 10)])\n\n metadata = {\n \"name\": pod.metadata.name,\n \"namespace\": pod.metadata.namespace\n }\n\n external_objects = {\n \"apiVersion\": \"result.vulnscan.com/v1\",\n \"kind\": pod.kind,\n \"metadata\": metadata,\n \"relatedObjects\": related_objects\n }\n\n msga := {\n \"alertMessage\": sprintf(\"pod '%v' exposed with critical vulnerabilities\", [pod.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n \"alertObject\": {\n \"externalObjects\": external_objects\n }\n }\n}\n\nfilter_external_access(service) {\n service.spec.type != \"ClusterIP\"\n}\n\nservice_to_pod(service, pod) = res {\n # Make sure we're looking on the same namespace\n service.metadata.namespace == pod.metadata.namespace\n\n service_selectors := [ x | x = service.spec.selector[_] ]\n\n res := count([ x | x = pod.metadata.labels[_]; x == service_selectors[_] ])\n}", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Service", + "Pod" + ] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "armo.vuln.images", + "image.vulnscan.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ImageVulnerabilities" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "Fails if pods have exposed services as well as critical vulnerabilities", + "remediation": "The image of the listed pods might have a fix in a newer version. Alternatively, the pod service might not need to be external facing", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "podtemplate-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PodTemplate" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "replicationcontroller-in-default-namespace", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresult := is_default_namespace(resource.metadata)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is in the 'default' namespace\", [resource.kind, resource.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": failed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resource]\n\t\t}\n\t}\n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tmetadata.namespace == \"default\"\n\tfailed_path = \"metadata.namespace\"\n\tfixPath = \"\" \n}\n\nis_default_namespace(metadata) = [failed_path, fixPath] {\n\tnot metadata.namespace\n\tfailed_path = \"\"\n\tfixPath = {\"path\": \"metadata.namespace\", \"value\": \"YOUR_NAMESPACE\"}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ReplicationController" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-clusters-are-created-with-private-nodes", + "attributes": { + "imageScanRelated": false, + "hostSensorRule": false + }, + "creationTime": "", + "rule": "\npackage armo_builtins\n\n# fails in case enablePrivateCluster is set to false.\ndeny[msga] {\n\tobj := input[_]\n\tobj.apiVersion == \"management.azure.com/v1\"\n\tobj.kind == \"ClusterDescribe\"\n\tobj.metadata.provider == \"aks\"\n\tconfig = obj.data\n\n\tnot isPrivateClusterEnabled(config)\n\n\tmsga := {\n \t\"alertMessage\": \"Cluster does not have private nodes.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 7,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr\",\n \t\"alertObject\": {\n\t\t\"externalObjects\": obj\n }\n }\n}\n\nisPrivateClusterEnabled(config) {\n\tconfig.properties.apiServerAccessProfile.enablePrivateCluster == true\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": null, + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "Disable public IP addresses for cluster nodes, so that they only have private IP addresses. Private Nodes are nodes with no public IP addresses.", + "remediation": "az aks create --resource-group --name --load-balancer-sku standard --enable-private-cluster --network-plugin azure --vnet-subnet-id --docker-bridge-address --dns-service-ip --service-cidr", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-kubelet-client-certificate-and-kubelet-client-key-arguments-are-set-as-appropriate", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"certificate based kubelet authentication is not enabled\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t\"--kubelet-client-certificate\",\n\t\t\"--kubelet-client-key\",\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=\", [wanted[i]]),\n\t} |\n\t\twanted[i]\n\t\tnot contains(full_cmd, wanted[i])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Enable certificate based kubelet authentication.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and kubelets. Then, edit API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the kubelet client certificate and key parameters as below.\n\n \n```\n--kubelet-client-certificate=\n--kubelet-client-key=\n\n```\n\n#### Impact Statement\nYou require TLS to be configured on apiserver as well as kubelets.\n\n#### Default Value\nBy default, certificate-based kubelet authentication is not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "psp-deny-hostnetwork", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\ndeny[msga] {\n\t# only fail resources if there all PSPs have hostNetwork set to true\n\t# if even one PSP has hostNetwork set to false, then the rule will not fail\n\tevery psp in input {\n\t\tpsp.kind == \"PodSecurityPolicy\"\n\t\tpsp.spec.hostNetwork == true\n\t}\n\n\t# return al the PSPs that have hostNetwork set to true\n\tpsp := input[_]\n\tpsp.kind == \"PodSecurityPolicy\"\n\tpsp.spec.hostNetwork == true\n\n\tpath := \"spec.hostNetwork\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"PodSecurityPolicy: '%v' has hostNetwork set as true.\", [psp.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"deletePaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [psp]},\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "policy" + ], + "apiVersions": [ + "v1beta1" + ], + "resources": [ + "PodSecurityPolicy" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-controller-manager.conf-file-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_control_plane_info(obj)\n\n\tfile_obj_path := [\"data\", \"controllerManagerInfo\", \"configFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_control_plane_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the `controller-manager.conf` file has permissions of 600 or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the Control Plane node. For example,\n\n \n```\nchmod 600 /etc/kubernetes/controller-manager.conf\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-etcd-certfile-and-etcd-keyfile-arguments-are-set-as-appropriate", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"etcd is not configured to use TLS properly\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\twanted = [\n\t\t[\"--etcd-certfile\", \"\"],\n\t\t[\"--etcd-keyfile\", \"\"],\n\t]\n\n\tfix_paths = [{\n\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd) + i]),\n\t\t\"value\": sprintf(\"%s=%s\", wanted[i]),\n\t} |\n\t\tnot contains(full_cmd, wanted[i][0])\n\t]\n\n\tcount(fix_paths) > 0\n\n\tresult = {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": fix_paths,\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate and key file parameters.\n\n \n```\n--etcd-certfile= \n--etcd-keyfile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-certfile` and `--etcd-keyfile` arguments are not set", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "anonymous-access-enabled", + "creationTime": "", + "rule": "package armo_builtins\n\n# Fails is rolebinding/clusterrolebinding gives permissions to anonymous user\ndeny[msga] {\n rolebindings := [rolebinding | rolebinding = input[_]; endswith(rolebinding.kind, \"Binding\")]\n rolebinding := rolebindings[_]\n subject := rolebinding.subjects[i]\n isAnonymous(subject)\n delete_path := sprintf(\"subjects[%d]\", [i])\n msga := {\n \"alertMessage\": sprintf(\"the following RoleBinding: %v gives permissions to anonymous users\", [rolebinding.metadata.name]),\n \"alertScore\": 9,\n \"deletePaths\": [delete_path],\n \"failedPaths\": [delete_path],\n \"packagename\": \"armo_builtins\",\n \"alertObject\": {\n \"k8sApiObjects\": [rolebinding]\n }\n }\n}\n\n\nisAnonymous(subject) {\n subject.name == \"system:anonymous\"\n}\n\nisAnonymous(subject) {\n subject.name == \"system:unauthenticated\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Fails in case anonymous or unauthenticated user has any rbac permissions (is bound by a RoleBinding/ClusterRoleBinding)", + "remediation": "Remove any RBAC rules which allow anonymous users to perform actions", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "label-usage-for-resources", + "creationTime": "", + "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tfixPath := no_label_or_no_label_usage(pod, \"\")\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"in the following pods a certain set of labels is not defined: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tpodSpec := wl.spec.template\n\tbeggining_of_pod_path := \"spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v a certain set of labels is not defined:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tpodSpec := wl.spec.jobTemplate.spec.template\n\tbeggining_of_pod_path := \"spec.jobTemplate.spec.template.\"\n\tfixPath := no_label_usage(wl, podSpec, beggining_of_pod_path)\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs a certain set of labels is not defined: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPath,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n# There is no label-usage in WL and also for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tpath1 := no_label_or_no_label_usage(wl, \"\")\n\tpath2 := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath = array.concat(path1, path2)\n}\n\n# There is label-usage for WL but not for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(wl, \"\")\n\tpath := no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n}\n\n# There is no label-usage for WL but there is for his Pod\nno_label_usage(wl, podSpec, beggining_of_pod_path) = path{\n\tnot no_label_or_no_label_usage(podSpec, beggining_of_pod_path)\n\tpath := no_label_or_no_label_usage(wl, \"\")\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tnot wl.metadata\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tmetadata := wl.metadata\n\tnot metadata.labels\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nno_label_or_no_label_usage(wl, start_of_path) = path{\n\tlabels := wl.metadata.labels\n\tnot is_desired_label(labels)\n\tlabel_key := get_label_key(\"\")\n\tpath = [{\"path\": sprintf(\"%vmetadata.labels[%v]\", [start_of_path, label_key]), \"value\": \"YOUR_VALUE\"}]\n}\n\nis_desired_label(labels) {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n\trecommended_label := recommended_labels[_]\n\tlabels[recommended_label]\n}\n\n# get_label_key accepts a parameter so it's not considered a rule\nget_label_key(unused_param) = key {\n\trecommended_labels := data.postureControlInputs.recommendedLabels\n count(recommended_labels) > 0\n key := recommended_labels[0]\n} else = \"YOUR_LABEL\"\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.recommendedLabels", + "name": "Recommended Labels", + "description": "Kubescape checks that workloads have at least one label that identifies semantic attributes." + } + ], + "description": "check if a certain set of labels is defined, this is a configurable control. Initial list: app, tier, phase, version, owner, env.", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "resources-cpu-limits", + "creationTime": "", + "rule": "package armo_builtins\n\n\n# ==================================== no CPU limits =============================================\n# Fails if pod does not have container with CPU-limits\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tnot container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v does not have CPU-limit or request\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload does not have container with CPU-limits\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob does not have container with CPU-limits\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n not container.resources.limits.cpu\n\n\tfixPaths := [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].resources.limits.cpu\", [format_int(i, 10)]), \"value\": \"YOUR_VALUE\"}]\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v does not have CPU-limit or request\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "CPU limits are not set.", + "remediation": "Ensure CPU limits are set.", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "alert-container-optimized-os-not-in-use", + "creationTime": "", + "rule": "package armo_builtins\nimport future.keywords.in\n\n\n# checks if a node is not using a \"Container-Optimized OS\". \n# \"Container-Optimized OS\" prefixes are configured in 'container_optimized_os_prefixes'. \n# deny if 'nodes.status.nodeInfo.osImage' not starting with at least one item in 'container_optimized_os_prefixes'.\ndeny[msga] {\n\n\tnodes := input[_]\n\tnodes.kind == \"Node\"\n\n\t# list of \"Container-Optimized OS\" images prefixes \n\tcontainer_optimized_os_prefixes = [\"Bottlerocket\"]\n\n\t# check if osImage starts with at least one prefix\n\tsome str in container_optimized_os_prefixes\n\tnot startswith(nodes.status.nodeInfo.osImage, str)\n\n\t# prepare message data.\n\talert_message := \"Prefer using Container-Optimized OS when possible\"\n\n\tfailedPaths:= [\"status.nodeInfo.osImage\"]\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"reviewPaths\": failedPaths,\n\t\t\"failedPaths\": failedPaths,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [nodes]\n\t\t}\n\t}\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": [ + "EKS" + ] + }, + { + "guid": "", + "name": "ensure-that-the-api-server-token-auth-file-parameter-is-not-set", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server TLS is not configured\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tre := \" ?--token-auth-file=(.+?)(?: |$)\"\n\tmatchs := regex.find_all_string_submatch_n(re, cmd[i], -1)\n\tcount(matchs) > 0\n\tfixed = replace(cmd[i], matchs[0][0], \"\")\n\tresult = get_result(sprintf(\"spec.containers[0].command[%d]\", [i]), fixed)\n}\n\n# Get fix and failed paths\nget_result(path, fixed) = result {\n\tfixed == \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [],\n\t}\n}\n\nget_result(path, fixed) = result {\n\tfixed != \"\"\n\tresult = {\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": path,\n\t\t\t\"value\": fixed,\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Do not use token based authentication.", + "remediation": "Follow the documentation and configure alternate mechanisms for authentication. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and remove the `--token-auth-file=` parameter.\n\n#### Impact Statement\nYou will have to configure and use alternate authentication mechanisms such as certificates. Static token based authentication could not be used.\n\n#### Default Value\nBy default, `--token-auth-file` argument is not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "verify-image-signature", + "attributes": { + "useFromKubescapeVersion": "v2.1.3" + }, + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.containers[%v].image\", [i])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t},\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tverified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n verified_keys := [trusted_key | trusted_key = data.postureControlInputs.trustedCosignPublicKeys[_]; cosign.verify(container.image, trusted_key)]\n count(verified_keys) == 0\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [i])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"signature not verified for image: %v\", [container.image]),\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t},\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.trustedCosignPublicKeys", + "name": "Trusted Cosign public keys", + "description": "A list of trusted Cosign public keys that are used for validating container image signatures." + } + ], + "description": "Verifies the signature of each image with given public keys", + "remediation": "Replace the image with an image that is signed correctly", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-kubeconfig-kubelet.conf-file-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubelet_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeletInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "Ensure that the `kubelet.conf` file ownership is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root /etc/kubernetes/kubelet.conf\n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "internal-networking", + "attributes": { + "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping" + }, + "creationTime": "", + "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}", + "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Namespace" + ] + }, + { + "apiGroups": [ + "networking.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "NetworkPolicy" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "lists namespaces in which no network policies are defined", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-manual", + "attributes": { + "hostSensorRule": false, + "imageScanRelated": false, + "actionRequired": "manual review" + }, + "creationTime": "", + "rule": "\npackage armo_builtins\n\ndeny[msga] {\n\n\tmsga := {\n \t\"alertMessage\": \"Please check it manually.\",\n \t\"packagename\": \"armo_builtins\",\n \t\"alertScore\": 2,\n \t\"failedPaths\": [],\n \t\"fixPaths\":[],\n \"fixCommand\": \"\",\n \t\"alertObject\": {\n\t\t\t\"k8sObject\": []\n }\n }\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "ruleDependencies": null, + "controlConfigInputs": null, + "description": "Due to the difficulty of performing a good check, the review is left manual to the user.", + "remediation": "", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "container-image-repository", + "attributes": { + "useUntilKubescapeVersion": "v2.3.8", + "m$K8sThreatMatrix": "Collection::Images from private registry" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\nuntrusted_image_repo[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\timage := container.image\n\tnot image_in_allowed_list(image)\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nuntrusted_image_repo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\timage := container.image\n not image_in_allowed_list(image)\n\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"alertScore\": 2,\n \"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": [path],\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# image_in_allowed_list - rule to check if an image complies with imageRepositoryAllowList.\nimage_in_allowed_list(image){\n\n\t# see default-config-inputs.json for list values\n\tallowedlist := data.postureControlInputs.imageRepositoryAllowList\n\tregistry := allowedlist[_]\n\n\tregex.match(regexify(registry), docker_host_wrapper(image))\n}\n\n\n# docker_host_wrapper - wrap an image without a host with a docker hub host 'docker.io'.\n# An image that doesn't contain '/' is assumed to not having a host and therefore associated with docker hub.\ndocker_host_wrapper(image) := result if {\n\tnot contains(image, \"/\")\n\tresult := sprintf(\"docker.io/%s\", [image])\n} else := image\n\n\n# regexify - returns a registry regex to be searched only for the image host.\nregexify(registry) := result {\n\tendswith(registry, \"/\")\n\tresult = sprintf(\"^%s.*$\", [registry])\n} else := sprintf(\"^%s\\/.*$\", [registry])\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": [ + { + "path": "settings.postureControlInputs.imageRepositoryAllowList", + "name": "Allowed image repositories", + "description": "Kubescape checks that all container images are from repositories explicitly allowed in this list." + } + ], + "description": "Fails if image is not from allowed repository", + "remediation": "", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-can-approve-cert-signing-request", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can approve certificate signing requests\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"certificates.k8s.io\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"certificatesigningrequests/approval\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can approve certificate signing requests\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role", + "ClusterRole", + "ClusterRoleBinding", + "RoleBinding" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines which users can approve certificate signing requests", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "rule-cni-enabled-aks", + "creationTime": "", + "rule": "package armo_builtins\n\n# fails if cni is not enabled like defined in:\n# https://learn.microsoft.com/en-us/azure/aks/use-network-policies#create-an-aks-cluster-and-enable-network-policy\ndeny[msga] {\n\tcluster_describe := input[_]\n\tcluster_describe.apiVersion == \"management.azure.com/v1\"\n\tcluster_describe.kind == \"ClusterDescribe\"\n\tcluster_describe.metadata.provider == \"aks\"\n\tproperties := cluster_describe.data.properties\n\n\tnot cni_enabled_aks(properties)\n\n\tmsga := {\n\t\t\"alertMessage\": \"cni is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_describe,\n\t\t},\n\t}\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"azure\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"azure\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n\ncni_enabled_aks(properties) {\n\tproperties.networkProfile.networkPlugin == \"kubenet\"\n\tproperties.networkProfile.networkPolicy == \"calico\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": null, + "dynamicMatch": [ + { + "apiGroups": [ + "management.azure.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ClusterDescribe" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": [ + "AKS" + ] + }, + { + "guid": "", + "name": "exec-into-container-v1", + "attributes": { + "resourcesAggregator": "subject-role-rolebinding", + "useFromKubescapeVersion": "v1.0.133", + "m$K8sThreatMatrix": "Privilege Escalation::Exec into container" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) > 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) > 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) > 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"reviewPaths\": finalpath,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "RoleBinding", + "ClusterRoleBinding", + "Role", + "ClusterRole" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "determines which users have permissions to exec into pods", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "kubelet-authorization-mode-alwaysAllow", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# CIS 4.2.2 https://workbench.cisecurity.org/sections/1126668/recommendations/1838640\n\n# has cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--authorization-mode\")\n\tcontains(command, \"--authorization-mode=AlwaysAllow\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n# has config\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tyamlConfig.authorization.mode == \"AlwaysAllow\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [\"authorization.mode\"],\n\t\t\"failedPaths\": [\"authorization.mode\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n# has no config and cli\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests are enabled\",\n\t\t\"alertScore\": 10,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--authorization-mode\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 6,\n\t\t\"reviewPaths\": [],\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data\n\t\t}}\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [], + "apiVersions": [], + "resources": [] + } + ], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeletInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Do not allow all requests. Enable explicit authorization.", + "remediation": "Change authorization mode to Webhook.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "review-roles-with-aws-iam-authenticator", + "creationTime": "", + "rule": "package armo_builtins\n\ndeny[msga] {\n resource := input[_]\n\tresource.kind == \"Role\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"For namespace '%v', make sure Kubernetes RBAC users are managed with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156\", [resource.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resource\n\t\t}\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "rbac.authorization.k8s.io" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Role" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure_nodeinstancerole_has_right_permissions_for_ecr", + "attributes": { + "useFromKubescapeVersion": "v2.2.5" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.every\n\n# deny if a NodeInstanceRole has a policies not compliant with the following:\n# {\n# \"Version\": \"YYY-MM-DD\",\n# \"Statement\": [\n# {\n# \"Effect\": \"Allow\",\n# \"Action\": [\n# \"ecr:BatchCheckLayerAvailability\",\n# \"ecr:BatchGetImage\",\n# \"ecr:GetDownloadUrlForLayer\",\n# \"ecr:GetAuthorizationToken\"\n# ],\n# \"Resource\": \"*\"\n# }\n# ]\n# }\ndeny[msga] {\n\tresources := input[_]\n\tresources.kind == \"ListEntitiesForPolicies\"\n\tresources.metadata.provider == \"eks\"\n\n\trole_policies := resources.data.rolesPolicies\n\tnode_instance_role_policies := [key | role_policies[key]; contains(role_policies[key].PolicyRoles[_].RoleName, \"NodeInstance\")]\n\n\t# check if the policy satisfies the minimum prerequisites\n\tpolicies := input[_]\n\tpolicies.kind == \"PolicyVersion\"\n\tpolicies.metadata.provider == \"eks\"\n\n\t# node_instance_role_policies := [\"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly\"]\n\tsome policy in node_instance_role_policies\n\t\tsome stat, _ in policies.data.policiesDocuments[policy].Statement\n\t\t\tnot isPolicyCompliant(policies, policy, stat)\n\n\tmsga := {\n\t\t\"alertMessage\": \"Cluster has none read-only access to ECR; Review AWS ECS worker node IAM role (NodeInstanceRole) IAM Policy Permissions to verify that they are set and the minimum required level.\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": resources\n\t\t}\n\t}\n}\n\nisPolicyCompliant(policies, policy, stat) {\n\t# allowed action provided by the CIS\n\tallowed_actions := [\"ecr:BatchCheckLayerAvailability\",\n \t \"ecr:BatchGetImage\",\n \t \"ecr:GetAuthorizationToken\",\n \t \"ecr:GetDownloadUrlForLayer\"]\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Effect == \"Allow\"\n\tpolicies.data.policiesDocuments[policy].Statement[stat].Resource == \"*\"\n\tsorted_actions := sort(policies.data.policiesDocuments[policy].Statement[stat].Action)\n\tsorted_actions == allowed_actions\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": null, + "dynamicMatch": [ + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "ListEntitiesForPolicies" + ] + }, + { + "apiGroups": [ + "eks.amazonaws.com" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "PolicyVersion" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": [ + "EKS" + ] + }, + { + "guid": "", + "name": "etcd-encryption-native", + "attributes": { + "resourcesAggregator": "apiserver-pod", + "useFromKubescapeVersion": "v1.0.133" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport data.cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) < 1\n\tfixpath := {\"path\":sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]), \"value\": \"--encryption-provider-config=YOUR_VALUE\"}\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [fixpath],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "alert-fargate-not-in-use", + "creationTime": "", + "rule": "package armo_builtins\n\n\n\n\n# deny if fargate is not being used in any of the nodes in cluster.\n# a Node is identified as using fargate if it's name starts with 'fargate'.\ndeny[msga] {\n\n\n # get all nodes\n nodes := [node | node = input[_]; node.kind == \"Node\"]\n count(nodes) > 0\n\n # get all nodes without fargate\n nodes_not_fargate := [node | node = nodes[_]; not startswith(node.metadata.name, \"fargate\")]\n\n # if count of all nodes equals to count of nodes_not_fargate it means fargate is not being used.\n count(nodes) == count(nodes_not_fargate)\n\n\t# prepare message data.\n\talert_message := \"Consider Fargate for running untrusted workloads\"\n\n\tmsga := {\n\t\t\"alertMessage\": alert_message,\n\t\t\"packagename\": \"armo_builtins\",\n\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": nodes_not_fargate\n\t\t}\n\t}\n}", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Node" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "if-proxy-kubeconfig-file-exists-ensure-permissions-are-set-to-600-or-more-restrictive", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual permissions test\n\tallowed_perms := 384 # == 0o600\n\tnot cautils.unix_permissions_allow(allowed_perms, file.permissions)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"the permissions of %s are too permissive. maximum allowed: %o. actual: %o\", [file.path, allowed_perms, file.permissions])\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chmod %o %s\", [allowed_perms, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeProxyInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "If `kube-proxy` is running, and if it is using a file-based kubeconfig file, ensure that the proxy kubeconfig file has permissions of `600` or more restrictive.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchmod 600 \n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-etcd-cafile-argument-is-set-as-appropriate", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": \"API server is not configured to use SSL Certificate Authority file for etcd\",\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--etcd-cafile\")\n\tresult := {\n\t\t\"failed_paths\": [],\n\t\t\"fix_paths\": [{\n\t\t\t\"path\": sprintf(\"spec.containers[0].command[%d]\", [count(cmd)]),\n\t\t\t\"value\": \"--etcd-cafile=\",\n\t\t}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "etcd should be configured to make use of TLS encryption for client connections.", + "remediation": "Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. Then, edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the master node and set the etcd certificate authority file parameter.\n\n \n```\n--etcd-cafile=\n\n```\n\n#### Impact Statement\nTLS and client certificate authentication must be configured for etcd.\n\n#### Default Value\nBy default, `--etcd-cafile` is not set.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "if-proxy-kubeconfig-file-exists-ensure-ownership-is-set-to-root-root", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\nimport data.cautils\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\tis_kubproxy_info(obj)\n\n\tfile_obj_path := [\"data\", \"kubeConfigFile\"]\n\tfile := object.get(obj, file_obj_path, false)\n\n\t# Actual ownership check\n\tallowed_user := \"root\"\n\tallowed_group := \"root\"\n\tnot allowed_ownership(file.ownership, allowed_user, allowed_group)\n\n\t# Build the message\n\t# filter out irrelevant host-sensor data\n\tobj_filtered := json.filter(obj, [\n\t\tconcat(\"/\", file_obj_path),\n\t\t\"apiVersion\",\n\t\t\"kind\",\n\t\t\"metadata\",\n\t])\n\n\talert := sprintf(\"%s is not owned by %s:%s (actual owners are %s:%s)\", [\n\t\tfile.path,\n\t\tallowed_user,\n\t\tallowed_group,\n\t\tfile.ownership.username,\n\t\tfile.ownership.groupname,\n\t])\n\n\tmsg := {\n\t\t\"alertMessage\": alert,\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": sprintf(\"chown %s:%s %s\", [allowed_user, allowed_group, file.path]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\t}\n}\n\n\nis_kubproxy_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"KubeProxyInfo\"\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.error # Do not fail if ownership is not found\n}\n\nallowed_ownership(ownership, user, group) {\n\townership.username == user\n\townership.groupname == group\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "KubeProxyInfo" + ] + } + ], + "ruleDependencies": [ + { + "packageName": "cautils" + } + ], + "controlConfigInputs": null, + "description": "If `kube-proxy` is running, ensure that the file ownership of its kubeconfig file is set to `root:root`.", + "remediation": "Run the below command (based on the file location on your system) on the each worker node. For example,\n\n \n```\nchown root:root \n\n```", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "set-procmount-default", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.if\n\n# Fails if container does not define the \"procMount\" parameter as \"Default\"\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if procMount paramenter has the right value in containers\n\tpod := input[_]\n\tpod.kind = \"Pod\"\n\n\t# retrieve container list\n\tcontainer := pod.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [pod]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\twl := input[_]\n\tmanifest_kind := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"}\n\tmanifest_kind[wl.kind]\n\n\t# retrieve container list\n\tcontainer := wl.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Workload: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [wl]},\n\t}\n}\n\ndeny[msga] {\n\t# checks at first if we the procMountType feature gate is enabled on the api-server\n\tobj := input[_]\n\tis_control_plane_info(obj)\n\tis_proc_mount_type_enabled(obj.data.APIServerInfo.cmdLine)\n\n\t# checks if we are managing the right workload kind\n\tcj := input[_]\n\tcj.kind = \"CronJob\"\n\n\t# retrieve container list\n\tcontainer := cj.spec.jobTemplate.spec.template.spec.containers[i]\n\tnot procMountSetProperly(container.securityContext)\n\n\tfixPaths = [{\"path\": sprintf(\"spec.jobTemplate.spec.template.spec.containers[%d].securityContext.procMount\", [i]), \"value\": \"Default\"}]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"CronJob: %v has containers that do not set 'securityContext.procMount' to 'Default'\", [cj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPaths,\n\t\t\"alertObject\": {\"k8sApiObjects\": [cj]},\n\t}\n}\n\n# check if we are managing ControlPlaneInfo\nis_control_plane_info(obj) if {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"ControlPlaneInfo\"\n}\n\n# check if ProcMountType feature-gate is enabled\nis_proc_mount_type_enabled(command) if {\n\tcontains(command, \"--feature-gates=\")\n\targs := regex.split(` +`, command)\n\tsome i\n\tregex.match(`ProcMountType=true`, args[i])\n}\n\n# procMountSetProperly checks if procMount has value of \"Default\".\nprocMountSetProperly(securityContext) if {\n\tsecurityContext.procMount == \"Default\"\n} else := false\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "ControlPlaneInfo" + ] + }, + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + }, + { + "apiGroups": [ + "apps" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Deployment", + "ReplicaSet", + "DaemonSet", + "StatefulSet" + ] + }, + { + "apiGroups": [ + "batch" + ], + "apiVersions": [ + "*" + ], + "resources": [ + "Job", + "CronJob" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Fails if container does not define securityContext.procMount to Default.", + "remediation": "Set securityContext.procMount to Default", + "ruleQuery": "armo_builtins", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-that-the-api-server-audit-log-maxbackup-argument-is-set-to-10-or-as-appropriate", + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tresult = invalid_flag(obj.spec.containers[0].command)\n\tmsg := {\n\t\t\"alertMessage\": result.alert,\n\t\t\"alertScore\": 2,\n\t\t\"reviewPaths\": result.failed_paths,\n\t\t\"failedPaths\": result.failed_paths,\n\t\t\"fixPaths\": result.fix_paths,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n\n# Assume flag set only once\ninvalid_flag(cmd) = result {\n\tcontains(cmd[i], \"--audit-log-maxbackup\")\n\tresult = {\n\t\t\"alert\": \"Please validate that the audit log max backup is set to an appropriate value\",\n\t\t\"failed_paths\": [sprintf(\"spec.containers[0].command[%v]\", [i])],\n\t\t\"fix_paths\": [],\n\t}\n}\n\ninvalid_flag(cmd) = result {\n\tfull_cmd = concat(\" \", cmd)\n\tnot contains(full_cmd, \"--audit-log-maxbackup\")\n\tpath = sprintf(\"spec.containers[0].command[%v]\", [count(cmd)])\n\tresult = {\n\t\t\"alert\": \"Audit log max backup is not set\",\n\t\t\"failed_paths\": [path],\n\t\t\"fix_paths\": [{\"path\": path, \"value\": \"--audit-log-maxbackup=YOUR_VALUE\"}],\n\t}\n}\n", + "resourceEnumerator": "package armo_builtins\n\ndeny[msg] {\n\tobj = input[_]\n\tis_api_server(obj)\n\tmsg := {\"alertObject\": {\"k8sApiObjects\": [obj]}}\n}\n\nis_api_server(obj) {\n\tobj.apiVersion == \"v1\"\n\tobj.kind == \"Pod\"\n\tobj.metadata.namespace == \"kube-system\"\n\tcount(obj.spec.containers) == 1\n\tcount(obj.spec.containers[0].command) > 0\n\tendswith(obj.spec.containers[0].command[0], \"kube-apiserver\")\n}\n", + "ruleLanguage": "Rego", + "match": [ + { + "apiGroups": [ + "" + ], + "apiVersions": [ + "v1" + ], + "resources": [ + "Pod" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "Retain 10 or an appropriate number of old log files.", + "remediation": "Edit the API server pod specification file `/etc/kubernetes/manifests/kube-apiserver.yaml` on the Control Plane node and set the `--audit-log-maxbackup` parameter to 10 or to an appropriate value.\n\n \n```\n--audit-log-maxbackup=10\n\n```\n\n#### Impact Statement\nNone\n\n#### Default Value\nBy default, auditing is not enabled.", + "ruleQuery": "", + "relevantCloudProviders": null + }, + { + "guid": "", + "name": "ensure-network-policy-is-enabled-eks", + "attributes": { + "hostSensorRule": "true" + }, + "creationTime": "", + "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# EKS supports Calico and Cilium add-ons, both supports Network Policy.\n# Deny if at least on of them is not in the list of CNINames.\n\ndeny[msg] {\n\t# Filter out irrelevent resources\n\tobj = input[_]\n\n is_CNIInfos(obj)\n\n\tnot \"Calico\" in obj.data.CNINames\n\tnot \"Cilium\" in obj.data.CNINames\n\n\t# filter out irrelevant host-sensor data\n obj_filtered := json.filter(obj, [\"apiVersion\", \"kind\", \"metadata\", \"data/CNINames\"])\n\n msg := {\n\t\t\"alertMessage\": \"CNI doesn't support Network Policies.\",\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"\",\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": obj_filtered},\n\n\t}\n}\n\nis_CNIInfos(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"CNIInfo\"\n}\n", + "resourceEnumerator": "", + "ruleLanguage": "Rego", + "match": [], + "dynamicMatch": [ + { + "apiGroups": [ + "hostdata.kubescape.cloud" + ], + "apiVersions": [ + "v1beta0" + ], + "resources": [ + "CNIInfo" + ] + } + ], + "ruleDependencies": [], + "controlConfigInputs": null, + "description": "", + "remediation": "", + "ruleQuery": "", + "relevantCloudProviders": [ + "EKS" + ] } ], "SystemPostureExceptionPolicies": [ { "guid": "", - "name": "exclude-minikube-kube-system-resources-1", + "name": "exclude-gke-kube-system-resources-1", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -19412,12 +35807,38 @@ }, { "guid": "", - "name": "exclude-minikube-kube-system-resources-2", + "name": "exclude-gke-kube-system-resources-2", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "kube-proxy-[A-Za-z0-9-]+", + "kind": "Pod", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-3", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -19440,12 +35861,11 @@ }, { "guid": "", - "name": "exclude-minikube-kube-system-resources-3", + "name": "exclude-gke-kube-system-resources-4", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -19453,9 +35873,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "Pod", + "kind": "DaemonSet", "namespace": "kube-system", - "name": "kube-proxy-.*" + "name": "metadata-proxy-v[0-9.]+" } } ], @@ -19468,12 +35888,11 @@ }, { "guid": "", - "name": "exclude-minikube-kube-system-resources-4", + "name": "exclude-gke-kube-system-resources-5", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -19481,9 +35900,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "Deployment", + "kind": "DaemonSet", "namespace": "kube-system", - "name": "coredns" + "name": "node-local-dns" } } ], @@ -19496,12 +35915,416 @@ }, { "guid": "", - "name": "exclude-minikube-kube-system-resources-5", + "name": "exclude-gke-kube-system-resources-6", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "gke-metrics-agent.*", + "kind": "DaemonSet" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-7", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "pdcsi-node-windows" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-8", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "anetd" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-9", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "netd" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-10", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentbit-gke-big" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-11", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "fluentbit-gke-small", + "kind": "DaemonSet", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-12", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentbit-gke-max" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-13", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentbit-gke.*" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-14", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nccl-fastsocket-installer" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-15", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "filestore-node", + "kind": "DaemonSet", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-16", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "pdcsi-node" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-17", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "ip-masq-agent" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-18", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "anetd-win", + "kind": "DaemonSet", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-19", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "gke-metadata-server", + "kind": "DaemonSet" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-20", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "gke-metrics-agent-windows" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-21", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -19524,12 +36347,11 @@ }, { "guid": "", - "name": "exclude-minikube-kube-system-resources-6", + "name": "exclude-gke-kube-system-resources-22", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -19537,8 +36359,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "Namespace", - "name": "kube-system" + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-device-plugin" } } ], @@ -19551,12 +36374,416 @@ }, { "guid": "", - "name": "exclude-minikube-kube-system-resources-7", + "name": "exclude-gke-kube-system-resources-23", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-device-plugin-large" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-24", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-device-plugin-medium" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-25", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "image-package-extractor" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-26", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "image-package-extractor-cleanup", + "kind": "CronJob", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-27", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "nvidia-gpu-device-plugin-small", + "kind": "DaemonSet", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-28", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Service", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-29", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "kube-dns", + "kind": "Deployment", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-30", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "egress-nat-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-31", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "event-exporter-gke" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-32", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "antrea-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-33", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "antrea-controller-horizontal-autoscaler" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-34", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "kube-dns-autoscaler" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-35", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "metrics-server-v[0-9.]+" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-36", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "konnectivity-agent-autoscaler" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-37", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "fluentd-elasticsearch" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-gke-kube-system-resources-38", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -19565,8 +36792,8 @@ "designatorType": "Attributes", "attributes": { "namespace": "kube-system", - "name": "storage-provisioner", - "kind": "Pod" + "name": "konnectivity-agent", + "kind": "Deployment" } } ], @@ -19579,12 +36806,11 @@ }, { "guid": "", - "name": "exclude-minikube-kube-system-resources-8", + "name": "exclude-gke-kube-system-resources-39", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -19592,9 +36818,9 @@ { "designatorType": "Attributes", "attributes": { + "kind": "Deployment", "namespace": "kube-system", - "name": "kube-scheduler-.*", - "kind": "Pod" + "name": "l7-default-backend" } } ], @@ -19607,66 +36833,11 @@ }, { "guid": "", - "name": "exclude-minikube-kube-system-resources-9", + "name": "exclude-gke-kube-public-resources", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "kube-controller-manager-.*" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-minikube-kube-public-resources-1", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "kube-public" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-minikube-kube-public-resources-2", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -19688,38 +36859,11 @@ }, { "guid": "", - "name": "exclude-minikube-kube-node-lease-resources-1", + "name": "exclude-gke-kube-node-lease-resources", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "kube-node-lease" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-minikube-kube-node-lease-resources-2", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -19746,7 +36890,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -19756,7 +36899,7 @@ "attributes": { "kind": "ServiceAccount", "namespace": "kube-system", - "name": "default" + "name": "konnectivity-agent-cpha" } } ], @@ -19773,7 +36916,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -19783,7 +36925,7 @@ "attributes": { "kind": "ServiceAccount", "namespace": "kube-system", - "name": "certificate-controller" + "name": "metrics-server" } } ], @@ -19800,7 +36942,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -19808,9 +36949,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "ServiceAccount", "namespace": "kube-system", - "name": "bootstrap-signer" + "name": "endpointslicemirroring-controller", + "kind": "ServiceAccount" } } ], @@ -19827,250 +36968,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "clusterrole-aggregation-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-5", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "root-ca-cert-publisher", - "kind": "ServiceAccount" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-6", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "pvc-protection-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-7", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "statefulset-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-8", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ttl-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-9", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "coredns" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-10", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "service-account-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-11", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "horizontal-pod-autoscaler" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-12", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "expand-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-13", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20092,93 +36989,11 @@ }, { "guid": "", - "name": "exclude-kube-system-service-accounts-14", + "name": "exclude-kube-system-service-accounts-5", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "replication-controller", - "kind": "ServiceAccount", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-16", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "resourcequota-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-17", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "endpoint-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-18", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20200,12 +37015,11 @@ }, { "guid": "", - "name": "exclude-kube-system-service-accounts-19", + "name": "exclude-kube-system-service-accounts-6", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20213,196 +37027,7 @@ { "designatorType": "Attributes", "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "endpointslicemirroring-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-20", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ephemeral-volume-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-21", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "node-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-22", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "pv-protection-controller", - "kind": "ServiceAccount" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-23", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "job-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-24", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "daemon-set-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-25", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "deployment-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-26", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "generic-garbage-collector", + "name": "service-account-controller", "kind": "ServiceAccount", "namespace": "kube-system" } @@ -20416,120 +37041,11 @@ }, { "guid": "", - "name": "exclude-kube-system-service-accounts-27", + "name": "exclude-kube-system-service-accounts-7", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "persistent-volume-binder", - "kind": "ServiceAccount" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-28", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "storage-provisioner" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-29", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "token-cleaner" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-30", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "kube-proxy", - "kind": "ServiceAccount", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-31", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20551,12 +37067,11 @@ }, { "guid": "", - "name": "exclude-kube-system-service-accounts-32", + "name": "exclude-kube-system-service-accounts-8", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20566,7 +37081,7 @@ "attributes": { "kind": "ServiceAccount", "namespace": "kube-system", - "name": "cronjob-controller" + "name": "clusterrole-aggregation-controller" } } ], @@ -20578,12 +37093,11 @@ }, { "guid": "", - "name": "exclude-kube-system-service-accounts-33", + "name": "exclude-kube-system-service-accounts-9", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20593,7 +37107,7 @@ "attributes": { "kind": "ServiceAccount", "namespace": "kube-system", - "name": "attachdetach-controller" + "name": "generic-garbage-collector" } } ], @@ -20605,12 +37119,11 @@ }, { "guid": "", - "name": "exclude-kube-system-service-accounts-34", + "name": "exclude-kube-system-service-accounts-10", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20620,7 +37133,7 @@ "attributes": { "kind": "ServiceAccount", "namespace": "kube-system", - "name": "service-controller" + "name": "certificate-controller" } } ], @@ -20632,12 +37145,11 @@ }, { "guid": "", - "name": "exclude-kube-system-service-accounts-35", + "name": "exclude-kube-system-service-accounts-11", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20645,9 +37157,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "ServiceAccount", "namespace": "kube-system", - "name": "disruption-controller" + "name": "daemon-set-controller", + "kind": "ServiceAccount" } } ], @@ -20659,12 +37171,453 @@ }, { "guid": "", - "name": "exclude-kube-system-service-accounts-36", + "name": "exclude-kube-system-service-accounts-12", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "cloud-provider" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-13", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ephemeral-volume-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-14", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "root-ca-cert-publisher" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-16", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "bootstrap-signer" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-18", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "expand-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-19", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "disruption-controller", + "kind": "ServiceAccount" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-20", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "ttl-after-finished-controller", + "kind": "ServiceAccount" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-21", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "job-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-22", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "pv-protection-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-23", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "persistent-volume-binder" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-24", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "pvc-protection-controller", + "kind": "ServiceAccount" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-25", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "statefulset-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-26", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "deployment-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-27", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "node-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-28", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "cronjob-controller", + "kind": "ServiceAccount", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-29", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "resourcequota-controller", + "kind": "ServiceAccount" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-30", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "endpoint-controller", + "kind": "ServiceAccount", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-31", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20686,12 +37639,11 @@ }, { "guid": "", - "name": "exclude-kube-system-service-accounts-37", + "name": "exclude-kube-system-service-accounts-32", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20701,7 +37653,683 @@ "attributes": { "kind": "ServiceAccount", "namespace": "kube-system", - "name": "ttl-after-finished-controller" + "name": "ttl-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-33", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "token-cleaner" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-34", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "kube-dns", + "kind": "ServiceAccount", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-35", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "attachdetach-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-36", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "kube-proxy", + "kind": "ServiceAccount", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-37", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "konnectivity-agent", + "kind": "ServiceAccount" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-38", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "replication-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-39", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "default", + "kind": "ServiceAccount", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-40", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "service-controller", + "kind": "ServiceAccount", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-41", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "kube-dns-autoscaler" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-42", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "netd" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-43", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "metadata-proxy" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-44", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "antrea-controller", + "kind": "ServiceAccount", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-45", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "cilium", + "kind": "ServiceAccount" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-46", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "node-local-dns" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-47", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "gke-metrics-agent" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-48", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "egress-nat-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-49", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "antrea-agent" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-50", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "event-exporter-sa" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-51", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "antrea-cpha" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-52", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "fluentbit-gke" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-53", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "pdcsi-node-sa" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-54", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ip-masq-agent" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-55", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "filestorecsi-node-sa" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-56", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "gke-metadata-server", + "kind": "ServiceAccount", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-users-and-groups-1", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "namespace": "kube-system", + "name": "system:vpa-recommender" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-users-and-groups-2", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "namespace": "kube-system", + "name": "system:anet-operator" } } ], @@ -20718,7 +38346,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20726,9 +38353,8 @@ { "designatorType": "Attributes", "attributes": { - "apiVersion": "rbac.authorization.k8s.io", - "name": "system:kube-scheduler", - "kind": "User" + "kind": "User", + "name": "system:clustermetrics" } } ], @@ -20745,7 +38371,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20754,8 +38379,7 @@ "designatorType": "Attributes", "attributes": { "kind": "User", - "apiVersion": "rbac.authorization.k8s.io", - "name": "system:kube-controller-manager" + "name": "system:controller:glbc" } } ], @@ -20772,7 +38396,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20780,7 +38403,231 @@ { "designatorType": "Attributes", "attributes": { - "apiVersion": "rbac.authorization.k8s.io", + "kind": "User", + "name": "system:l7-lb-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-users-and-groups-4", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:managed-certificate-controller" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-users-and-groups-5", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:gke-common-webhooks" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-users-and-groups-6", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:kube-scheduler" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-users-and-groups-8", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:gcp-controller-manager" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-users-and-groups-9", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "system:resource-tracker", + "kind": "User" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-users-and-groups-10", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:storageversionmigrator" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-users-and-groups-11", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:kube-controller-manager" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-users-and-groups-12", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "User", + "name": "system:kubestore-collector" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-users-and-groups-13", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { "name": "system:masters", "kind": "Group" } @@ -20794,12 +38641,11 @@ }, { "guid": "", - "name": "exclude-kubescape-prometheus-security-context", + "name": "exclude-system-resources-1", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20807,39 +38653,24 @@ { "designatorType": "Attributes", "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape-prometheus" + "kind": "ValidatingWebhookConfiguration", + "name": "ca-validate-cfg" } } ], "posturePolicies": [ { - "frameworkName": "", - "controlID": "c-0055" - }, - { - "frameworkName": "", - "controlID": "c-0017" - }, - { - "frameworkName": "", - "controlID": "C-0210" - }, - { - "frameworkName": "", - "controlID": "C-0211" + "frameworkName": "" } ] }, { "guid": "", - "name": "exclude-kubescape-prometheus-deployment-allowed-registry", + "name": "exclude-system-resources-2", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20847,31 +38678,785 @@ { "designatorType": "Attributes", "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape-prometheus" + "kind": "ValidatingWebhookConfiguration", + "name": "flowcontrol-guardrails.config.common-webhooks.networking.gke.io" } } ], "posturePolicies": [ { - "frameworkName": "", - "controlID": "c-0001" - }, - { - "frameworkName": "", - "controlID": "c-0078" + "frameworkName": "" } ] }, { "guid": "", - "name": "exclude-kubescape-prometheus-deployment-ingress-and-egress", + "name": "exclude-system-resources-3", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "validation-webhook.snapshot.storage.gke.io" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-4", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "gmp-operator.gmp-system.monitoring.googleapis.com" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-5", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "warden-validating.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-6", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "nodelimit.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-7", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "gkepolicy.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-8", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ValidatingWebhookConfiguration", + "name": "validation-webhook.snapshot.storage.k8s.io" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-9", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "APIService", + "name": "v1beta1.metrics.k8s.io" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-10", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "pod-ready.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-11", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "ca-mutate-cfg" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-12", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "neg-annotation.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-13", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "mutate-scheduler-profile.config.common-webhooks.networking.gke.io", + "kind": "MutatingWebhookConfiguration" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-14", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "sasecret-redacter.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-15", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "workload-defaulter.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-16", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "admissionwebhookcontroller.config.common-webhooks.networking.gke.io" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-17", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "gke-vpa-webhook-config" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-18", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "MutatingWebhookConfiguration", + "name": "filestorecsi-mutation-webhook.storage.k8s.io" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-19", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-20", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "gmp-public" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-system-resources-21", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "gmp-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-1", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "kube-controller-manager", + "namespace": "kube-system", + "kind": "ServiceAccount" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-2", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kube-scheduler", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-3", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "route-controller", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-4", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "superadmin", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-5", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "pkgextract-service", + "namespace": "kube-system", + "kind": "ServiceAccount" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-7", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "default", + "namespace": "gmp-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-8", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "collector", + "namespace": "gmp-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-9", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "gmp-system", + "kind": "ServiceAccount", + "name": "operator" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-10", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "collector", + "namespace": "gmp-public" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-11", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "StatefulSet", + "name": "alertmanager", + "namespace": "gmp-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-12", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "collector", + "namespace": "gmp-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-13", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20880,15 +39465,66 @@ "designatorType": "Attributes", "attributes": { "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape-prometheus" + "name": "rule-evaluator", + "namespace": "gmp-system" } } ], "posturePolicies": [ { - "frameworkName": "", - "controlID": "c-0030" + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-14", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "gmp-operator", + "namespace": "gmp-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-15", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "gke-metrics-agent-conf", + "namespace": "kube-system", + "kind": "ConfigMap" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" } ] }, @@ -20899,7 +39535,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20927,7 +39562,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20955,7 +39589,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -20963,9 +39596,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "Pod", "namespace": "kube-system", - "name": "metrics-server-[A-Za-z0-9]+-[A-Za-z0-9]+" + "name": "metrics-server-[A-Za-z0-9]+-[A-Za-z0-9]+", + "kind": "Pod" } } ], @@ -20983,7 +39616,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21004,6 +39636,33 @@ } ] }, + { + "guid": "", + "name": "exclude-eks-resources-7", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "coredns", + "kind": "Deployment" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, { "guid": "", "name": "exclude-eks-resources-8", @@ -21011,7 +39670,114 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "eventrouter" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-eks-resources-9", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "ebs-csi-controller", + "kind": "Deployment", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-eks-resources-10", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "ebs-csi-node", + "kind": "DaemonSet" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-eks-resources-11", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "ebs-csi-node-windows", + "kind": "DaemonSet", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-eks-resources-12", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", "actions": [ "alertOnly" ], @@ -21034,12 +39800,11 @@ }, { "guid": "", - "name": "exclude-eks-resources-9", + "name": "exclude-eks-resources-13", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21047,9 +39812,36 @@ { "designatorType": "Attributes", "attributes": { + "namespace": "kube-system", + "name": "coredns-[A-Za-z0-9]+", + "kind": "ReplicaSet" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-eks-resources-14", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "metrics-server-[A-Za-z0-9]+", "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "coredns-[A-Za-z0-9]+" + "namespace": "kube-system" } } ], @@ -21062,68 +39854,11 @@ }, { "guid": "", - "name": "exclude-eks-resources-10", + "name": "exclude-eks-resources-16", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "metrics-server-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-eks-resources-11", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Service", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-eks-resources-12", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21146,12 +39881,11 @@ }, { "guid": "", - "name": "exclude-eks-resources-13", + "name": "exclude-eks-resources-17", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21159,9 +39893,9 @@ { "designatorType": "Attributes", "attributes": { + "kind": "ServiceAccount", "namespace": "kube-system", - "name": "aws-cloud-provider", - "kind": "ServiceAccount" + "name": "aws-cloud-provider" } } ], @@ -21174,12 +39908,11 @@ }, { "guid": "", - "name": "exclude-eks-resources-14", + "name": "exclude-eks-resources-18", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21202,12 +39935,11 @@ }, { "guid": "", - "name": "exclude-eks-resources-15", + "name": "exclude-eks-resources-19", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21215,9 +39947,9 @@ { "designatorType": "Attributes", "attributes": { + "namespace": "kube-system", "name": "eks-admin", - "kind": "ServiceAccount", - "namespace": "kube-system" + "kind": "ServiceAccount" } } ], @@ -21230,12 +39962,11 @@ }, { "guid": "", - "name": "exclude-eks-resources-16", + "name": "exclude-eks-resources-20", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21258,12 +39989,11 @@ }, { "guid": "", - "name": "exclude-eks-resources-17", + "name": "exclude-eks-resources-21", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21286,12 +40016,11 @@ }, { "guid": "", - "name": "exclude-eks-resources-18", + "name": "exclude-eks-resources-22", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21314,12 +40043,38 @@ }, { "guid": "", - "name": "exclude-eks-resources-19", + "name": "exclude-eks-resources-23", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "vpc-resource-controller", + "kind": "ServiceAccount" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-eks-resources-24", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21329,7 +40084,7 @@ "attributes": { "kind": "ServiceAccount", "namespace": "kube-system", - "name": "vpc-resource-controller" + "name": "eventrouter" } } ], @@ -21342,12 +40097,11 @@ }, { "guid": "", - "name": "exclude-eks-resources-20", + "name": "exclude-eks-resources-25", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21355,8 +40109,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "User", - "name": "eks:fargate-manager" + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ebs-csi-controller-sa" } } ], @@ -21369,12 +40124,64 @@ }, { "guid": "", - "name": "exclude-eks-resources-21", + "name": "exclude-eks-resources-26", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "ebs-csi-node-sa" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-eks-resources-27", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "eks:fargate-manager", + "kind": "User" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-eks-resources-28", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21396,12 +40203,11 @@ }, { "guid": "", - "name": "exclude-eks-resources-22", + "name": "exclude-eks-resources-29", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21423,12 +40229,11 @@ }, { "guid": "", - "name": "exclude-eks-resources-23", + "name": "exclude-eks-resources-30", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21450,12 +40255,11 @@ }, { "guid": "", - "name": "exclude-eks-resources-24", + "name": "exclude-eks-resources-31", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21477,12 +40281,37 @@ }, { "guid": "", - "name": "exclude-aks-kube-system-deployments-1", + "name": "exclude-eks-resources-32", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "system:public-info-viewer", + "kind": "ClusterRoleBinding" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-security-context-1", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -21491,1624 +40320,77 @@ "designatorType": "Attributes", "attributes": { "kind": "Deployment", - "namespace": "kube-system", - "name": "coredns" - } - } - ], - "posturePolicies": [] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-deployments-2", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "coredns-autoscaler", - "kind": "Deployment" - } - } - ], - "posturePolicies": [] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-deployments-3", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "konnectivity-agent", - "kind": "Deployment" - } - } - ], - "posturePolicies": [] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-deployments-4", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "metrics-server", - "kind": "Deployment" - } - } - ], - "posturePolicies": [] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-deployments-5", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "omsagent-rs", - "kind": "Deployment", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-pods-1", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "azure-ip-masq-agent-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-pods-2", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "cloud-node-manager-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-pods-3", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "coredns-autoscaler--[A-Za-z0-9]+-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-pods-5", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "csi-azuredisk-node-[A-Za-z0-9]+", - "kind": "Pod", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-pods-6", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "csi-azurefile-node-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-pods-7", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "konnectivity-agent-[A-Za-z0-9]+-[A-Za-z0-9]+", - "kind": "Pod" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-pods-10", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Pod", - "namespace": "kube-system", - "name": "omsagent-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-pods-11", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "omsagent-rs-[A-Za-z0-9]+-[A-Za-z0-9]+", - "kind": "Pod", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-services-1", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Service", - "namespace": "kube-system", - "name": "kube-dns" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-services-2", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Service", - "namespace": "kube-system", - "name": "metrics-server" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-daemonsets-1", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "azure-ip-masq-agent" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-daemonsets-2", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "cloud-node-manager", - "kind": "DaemonSet" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-daemonsets-3", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "cloud-node-manager-windows" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-daemonsets-4", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "csi-azuredisk-node" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-daemonsets-5", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "csi-azuredisk-node-win" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-daemonsets-6", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "csi-azurefile-node", - "kind": "DaemonSet", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-daemonsets-7", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "csi-azurefile-node-win" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-daemonsets-8", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "kube-proxy" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-daemonsets-9", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "omsagent", - "kind": "DaemonSet" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-daemonsets-10", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "omsagent-win" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-replicasets-1", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "coredns-autoscaler-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-replicasets-2", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "coredns-[A-Za-z0-9]+", - "kind": "ReplicaSet" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-replicasets-3", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "konnectivity-agent-[A-Za-z0-9]+", - "kind": "ReplicaSet" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-replicasets-4", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ReplicaSet", - "namespace": "kube-system", - "name": "metrics-server-[A-Za-z0-9]+" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-replicasets-5", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "omsagent-rs-[A-Za-z0-9]+", - "kind": "ReplicaSet", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-namespaces-1", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Namespace", - "name": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-2", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "azure-cloud-provider" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-5", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "cloud-node-manager", - "kind": "ServiceAccount", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-8", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "coredns-autoscaler" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-10", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "csi-azuredisk-node-sa" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-11", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "csi-azurefile-node-sa" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-24", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "konnectivity-agent" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-26", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "metrics-server", - "kind": "ServiceAccount" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-29", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "omsagent", - "kind": "ServiceAccount", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-45", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "default", - "name": "kube-root-ca.crt" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-46", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-node-lease", - "name": "kube-root-ca.crt" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-47", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-public", - "name": "kube-root-ca.crt" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-48", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "azure-ip-masq-agent-config-reconciled" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-49", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "cluster-autoscaler-status", - "kind": "ConfigMap" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-50", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "container-azm-ms-aks-k8scluster" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-51", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "coredns", - "kind": "ConfigMap" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-52", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "coredns-autoscaler" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-53", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "coredns-custom" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-54", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "extension-apiserver-authentication", - "kind": "ConfigMap", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-55", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "kube-root-ca.crt" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-56", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", - "namespace": "kube-system", - "name": "omsagent-rs-config" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-57", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "overlay-upgrade-data", - "kind": "ConfigMap", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-58", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "aks-webhook-admission-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-59", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "aks-node-mutating-webhook" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-60", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "aks-node-validating-webhook", - "kind": "ValidatingWebhookConfiguration" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-61", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Group", - "name": "system:masters" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-62", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Group", - "name": "system:nodes" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-63", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "clusterAdmin" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-64", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:kube-controller-manager" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-aks-kube-system-sa-65", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:kube-scheduler" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-default-namespace-resources-1", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ConfigMap", "name": "kubescape", - "namespace": "default" + "namespace": "kubescape" } } ], "posturePolicies": [ { - "frameworkName": "" + "frameworkName": "", + "controlID": "c-0076" + }, + { + "frameworkName": "", + "controlID": "c-0237" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0017" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0211" + }, + { + "frameworkName": "", + "controlID": "c-0058" + }, + { + "frameworkName": "", + "controlID": "c-0038" } ] }, { "guid": "", - "name": "exclude-default-namespace-resources-2", + "name": "exclude-kubescape-deployment-security-context-2", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -23116,25 +40398,1093 @@ { "designatorType": "Attributes", "attributes": { - "kind": "Namespace", + "kind": "Deployment", + "name": "operator", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0076" + }, + { + "frameworkName": "", + "controlID": "c-0237" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0017" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0211" + }, + { + "frameworkName": "", + "controlID": "c-0058" + }, + { + "frameworkName": "", + "controlID": "c-0038" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-security-context-3", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "gateway", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0076" + }, + { + "frameworkName": "", + "controlID": "c-0237" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0017" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0211" + }, + { + "frameworkName": "", + "controlID": "c-0058" + }, + { + "frameworkName": "", + "controlID": "c-0038" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-security-context-4", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "synchronizer", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0076" + }, + { + "frameworkName": "", + "controlID": "c-0237" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0017" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0211" + }, + { + "frameworkName": "", + "controlID": "c-0058" + }, + { + "frameworkName": "", + "controlID": "c-0038" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-security-context-5", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubevuln", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0076" + }, + { + "frameworkName": "", + "controlID": "c-0237" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0017" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0211" + }, + { + "frameworkName": "", + "controlID": "c-0058" + }, + { + "frameworkName": "", + "controlID": "c-0038" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-security-context-6", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "StatefulSet", + "name": "kollector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0076" + }, + { + "frameworkName": "", + "controlID": "c-0237" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0017" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0211" + }, + { + "frameworkName": "", + "controlID": "c-0058" + }, + { + "frameworkName": "", + "controlID": "c-0038" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-security-context-7", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kubescape", + "kind": "Deployment", + "name": "storage" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0076" + }, + { + "frameworkName": "", + "controlID": "c-0237" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0017" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0211" + }, + { + "frameworkName": "", + "controlID": "c-0058" + }, + { + "frameworkName": "", + "controlID": "c-0038" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-security-context-8", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "otel-collector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0076" + }, + { + "frameworkName": "", + "controlID": "c-0237" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0017" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0211" + }, + { + "frameworkName": "", + "controlID": "c-0058" + }, + { + "frameworkName": "", + "controlID": "c-0038" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-security-context-9", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "node-agent", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0076" + }, + { + "frameworkName": "", + "controlID": "c-0237" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0017" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0211" + }, + { + "frameworkName": "", + "controlID": "c-0058" + }, + { + "frameworkName": "", + "controlID": "c-0038" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-allowed-registry-1", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kubescape", + "kind": "Deployment", + "name": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0078" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-allowed-registry-2", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "operator", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0078" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-allowed-registry-3", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "gateway", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0078" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-allowed-registry-4", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubevuln", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0078" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-allowed-registry-5", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "StatefulSet", + "name": "kollector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0078" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-ingress-and-egress-1", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0013" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-ingress-and-egress-2", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "operator", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0013" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-ingress-and-egress-3", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "gateway", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0013" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-ingress-and-egress-4", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubevuln", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0013" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-ingress-and-egress-5", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "StatefulSet", + "name": "kollector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0013" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-deployment-ingress-and-egress", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "node-agent", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0045" + }, + { + "frameworkName": "", + "controlID": "c-0046" + }, + { + "frameworkName": "", + "controlID": "c-0048" + }, + { + "frameworkName": "", + "controlID": "c-0057" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0016" + }, + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0034" + }, + { + "frameworkName": "", + "controlID": "c-0074" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0017" + }, + { + "frameworkName": "", + "controlID": "c-0018" + } + ] + }, + { + "guid": "", + "name": "exclude-ks-service-account", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "ks-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0007" + }, + { + "frameworkName": "", + "controlID": "c-0015" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-service-account", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kubescape", + "kind": "ServiceAccount", + "name": "kubescape-sa" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0007" + }, + { + "frameworkName": "", + "controlID": "c-0015" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-default-service-account", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kubescape", + "kind": "ServiceAccount", "name": "default" } } ], "posturePolicies": [ { - "frameworkName": "" + "frameworkName": "", + "controlID": "c-0034" + }, + { + "frameworkName": "", + "controlID": "c-0189" + }, + { + "frameworkName": "", + "controlID": "c-0190" } ] }, { "guid": "", - "name": "exclude-default-namespace-resources-3", + "name": "exclude-kubescape-service-accounts-1", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -23143,8 +41493,801 @@ "designatorType": "Attributes", "attributes": { "kind": "ServiceAccount", - "name": "default", - "namespace": "default" + "name": "ks-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0034" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0015" + }, + { + "frameworkName": "", + "controlID": "c-0053" + }, + { + "frameworkName": "", + "controlID": "c-0186" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-service-accounts-2", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "storage", + "namespace": "kubescape", + "kind": "ServiceAccount" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0034" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0015" + }, + { + "frameworkName": "", + "controlID": "c-0053" + }, + { + "frameworkName": "", + "controlID": "c-0186" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-service-accounts-3", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "kubescape-sa", + "namespace": "kubescape", + "kind": "ServiceAccount" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0034" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0015" + }, + { + "frameworkName": "", + "controlID": "c-0053" + }, + { + "frameworkName": "", + "controlID": "c-0186" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-service-accounts-4", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "node-agent", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0034" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0015" + }, + { + "frameworkName": "", + "controlID": "c-0053" + }, + { + "frameworkName": "", + "controlID": "c-0186" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-service-accounts-5", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kubevuln", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0034" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0015" + }, + { + "frameworkName": "", + "controlID": "c-0053" + }, + { + "frameworkName": "", + "controlID": "c-0186" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-service-accounts-6", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "storage-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0034" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0015" + }, + { + "frameworkName": "", + "controlID": "c-0053" + }, + { + "frameworkName": "", + "controlID": "c-0186" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-service-accounts-7", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "synchronizer", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0034" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0015" + }, + { + "frameworkName": "", + "controlID": "c-0053" + }, + { + "frameworkName": "", + "controlID": "c-0186" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-service-accounts-8", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "node-agent-service-account", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0034" + }, + { + "frameworkName": "", + "controlID": "c-0207" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0015" + }, + { + "frameworkName": "", + "controlID": "c-0053" + }, + { + "frameworkName": "", + "controlID": "c-0186" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-otel", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "otel-collector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0017" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0056" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-host-scanner-resources-1", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "host-scanner", + "namespace": "kubescape-host-scanner", + "kind": "DaemonSet" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-host-scanner-resources-2", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "name": "host-scanner", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-schedulers-1", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "CronJob", + "name": "kubevuln-schedule-.*", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0026" + }, + { + "frameworkName": "", + "controlID": "c-0076" + }, + { + "frameworkName": "", + "controlID": "c-0077" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0211" + } + ] + }, + { + "guid": "", + "name": "exclude-schedulers-2", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "kubescape-registry-scan-.*", + "namespace": "kubescape", + "kind": "CronJob" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0026" + }, + { + "frameworkName": "", + "controlID": "c-0076" + }, + { + "frameworkName": "", + "controlID": "c-0077" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0211" + } + ] + }, + { + "guid": "", + "name": "exclude-schedulers-3", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "CronJob", + "name": "kubevuln-scheduler", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0026" + }, + { + "frameworkName": "", + "controlID": "c-0076" + }, + { + "frameworkName": "", + "controlID": "c-0077" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0211" + } + ] + }, + { + "guid": "", + "name": "exclude-schedulers-4", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kubescape", + "kind": "CronJob", + "name": "kubescape-scheduler" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0013" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0026" + }, + { + "frameworkName": "", + "controlID": "c-0076" + }, + { + "frameworkName": "", + "controlID": "c-0077" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0211" + } + ] + }, + { + "guid": "", + "name": "exclude-storage-apiserver", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "storage-apiserver", + "namespace": "kubescape", + "kind": "Deployment" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0030" + }, + { + "frameworkName": "", + "controlID": "c-0034" + }, + { + "frameworkName": "", + "controlID": "c-0260" + }, + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0056" + }, + { + "frameworkName": "", + "controlID": "c-0017" + }, + { + "frameworkName": "", + "controlID": "c-0018" + }, + { + "frameworkName": "", + "controlID": "c-0076" + } + ] + }, + { + "guid": "", + "name": "exclude-ns", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kubescape" } } ], @@ -23161,7 +42304,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -23190,7 +42332,7 @@ }, { "frameworkName": "", - "controlID": "c-0013 " + "controlID": "c-0013" }, { "frameworkName": "", @@ -23210,15 +42352,11 @@ }, { "frameworkName": "", - "controlID": "c-0004" + "controlID": "C-0270" }, { "frameworkName": "", - "controlID": "c-0050" - }, - { - "frameworkName": "", - "controlID": "c-0009" + "controlID": "C-0271" }, { "frameworkName": "", @@ -23232,512 +42370,11 @@ }, { "guid": "", - "name": "exclude-kubescape-deployment-security-context-1", + "name": "exclude-minikube-kube-system-resources-3", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0055" - }, - { - "frameworkName": "", - "controlID": "c-0017" - }, - { - "frameworkName": "", - "controlID": "C-0210" - }, - { - "frameworkName": "", - "controlID": "C-0211" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-security-context-2", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "operator", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0055" - }, - { - "frameworkName": "", - "controlID": "c-0017" - }, - { - "frameworkName": "", - "controlID": "C-0210" - }, - { - "frameworkName": "", - "controlID": "C-0211" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-security-context-3", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "gateway", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0055" - }, - { - "frameworkName": "", - "controlID": "c-0017" - }, - { - "frameworkName": "", - "controlID": "C-0210" - }, - { - "frameworkName": "", - "controlID": "C-0211" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-security-context-4", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "kubevuln", - "namespace": "kubescape", - "kind": "Deployment" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0055" - }, - { - "frameworkName": "", - "controlID": "c-0017" - }, - { - "frameworkName": "", - "controlID": "C-0210" - }, - { - "frameworkName": "", - "controlID": "C-0211" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-security-context-5", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "StatefulSet", - "name": "kollector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0055" - }, - { - "frameworkName": "", - "controlID": "c-0017" - }, - { - "frameworkName": "", - "controlID": "C-0210" - }, - { - "frameworkName": "", - "controlID": "C-0211" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-allowed-registry-1", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0001" - }, - { - "frameworkName": "", - "controlID": "c-0078" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-allowed-registry-2", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "operator", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0001" - }, - { - "frameworkName": "", - "controlID": "c-0078" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-allowed-registry-3", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "gateway", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0001" - }, - { - "frameworkName": "", - "controlID": "c-0078" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-allowed-registry-4", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubevuln", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0001" - }, - { - "frameworkName": "", - "controlID": "c-0078" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-allowed-registry-5", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "StatefulSet", - "name": "kollector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0001" - }, - { - "frameworkName": "", - "controlID": "c-0078" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-ingress-and-egress-1", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubescape", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0030" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-ingress-and-egress-2", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "operator", - "namespace": "kubescape", - "kind": "Deployment" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0030" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-ingress-and-egress-3", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "gateway", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0030" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-ingress-and-egress-4", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "name": "kubevuln", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0030" - } - ] - }, - { - "guid": "", - "name": "exclude-kubescape-deployment-ingress-and-egress-5", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "StatefulSet", - "name": "kollector", - "namespace": "kubescape" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "c-0030" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-2", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -23745,9 +42382,9 @@ { "designatorType": "Attributes", "attributes": { + "kind": "Pod", "namespace": "kube-system", - "name": "kube-proxy-[A-Za-z0-9-]+", - "kind": "Pod" + "name": "kube-proxy-.*" } } ], @@ -23760,12 +42397,11 @@ }, { "guid": "", - "name": "exclude-gke-kube-system-resources-4", + "name": "exclude-minikube-kube-system-resources-5", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -23773,9 +42409,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "DaemonSet", + "kind": "Deployment", "namespace": "kube-system", - "name": "metadata-proxy-v[0-9.]+" + "name": "sealed-secrets-controller" } } ], @@ -23788,68 +42424,11 @@ }, { "guid": "", - "name": "exclude-gke-kube-system-resources-5", + "name": "exclude-minikube-kube-system-resources-6", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "node-local-dns" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-6", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "gke-metrics-agent.*" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-7", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -23858,7 +42437,7 @@ "designatorType": "Attributes", "attributes": { "namespace": "kube-system", - "name": "pdcsi-node-windows", + "name": "tpu-device-plugin", "kind": "DaemonSet" } } @@ -23872,12 +42451,11 @@ }, { "guid": "", - "name": "exclude-gke-kube-system-resources-8", + "name": "exclude-minikube-kube-system-resources-7", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -23885,35 +42463,7 @@ { "designatorType": "Attributes", "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "anetd" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-9", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "netd", + "name": "runsc-metric-server", "kind": "DaemonSet", "namespace": "kube-system" } @@ -23928,12 +42478,11 @@ }, { "guid": "", - "name": "exclude-gke-kube-system-resources-10", + "name": "exclude-minikube-kube-system-resources-8", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -23941,872 +42490,222 @@ { "designatorType": "Attributes", "attributes": { - "name": "fluentbit-gke-big", "kind": "DaemonSet", + "namespace": "kube-system", + "name": "nvidia-gpu-.*" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-minikube-kube-system-resources-10", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "kube-system", + "kind": "Namespace" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-minikube-kube-system-resources-11", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "storage-provisioner" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-minikube-kube-system-resources-12", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-scheduler-.*" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-minikube-kube-system-resources-13", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "kube-controller-manager-.*" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-minikube-kube-public-resources-1", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kube-public" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-minikube-kube-node-lease-resources-1", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Namespace", + "name": "kube-node-lease" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-65", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "coredns" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-kube-system-service-accounts-67", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "horizontal-pod-autoscaler", + "kind": "ServiceAccount", "namespace": "kube-system" } } ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-11", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "fluentbit-gke-small" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-12", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "fluentbit-gke-max" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-13", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "fluentbit-gke.*", - "kind": "DaemonSet" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-14", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "nccl-fastsocket-installer", - "kind": "DaemonSet", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-15", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "filestore-node" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-16", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "pdcsi-node", - "kind": "DaemonSet" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-17", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "ip-masq-agent" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-18", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "anetd-win", - "kind": "DaemonSet", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-19", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "gke-metadata-server" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-20", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "gke-metrics-agent-windows" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-22", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "namespace": "kube-system", - "name": "nvidia-gpu-device-plugin" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-24", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "kube-dns" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-25", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "egress-nat-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-26", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "event-exporter-gke", - "kind": "Deployment" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-27", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "antrea-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-28", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "antrea-controller-horizontal-autoscaler" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-29", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "kube-dns-autoscaler", - "kind": "Deployment" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-30", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "metrics-server-v[0-9.]+" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-31", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "konnectivity-agent-autoscaler" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-32", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "fluentd-elasticsearch", - "kind": "DaemonSet" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-33", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "Deployment", - "namespace": "kube-system", - "name": "konnectivity-agent" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-gke-kube-system-resources-34", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "l7-default-backend", - "kind": "Deployment", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "", - "controlID": "C-.*" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-38", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "konnectivity-agent-cpha" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-49", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "cloud-provider" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-71", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "kube-dns" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-78", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "kube-dns-autoscaler" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-79", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "netd" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-80", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "metadata-proxy" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-81", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "antrea-controller", - "kind": "ServiceAccount" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-82", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "cilium" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-83", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "node-local-dns" - } - } - ], "posturePolicies": [ { "frameworkName": "" @@ -24820,7 +42719,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -24830,7 +42728,7 @@ "attributes": { "kind": "ServiceAccount", "namespace": "kube-system", - "name": "gke-metrics-agent" + "name": "storage-provisioner" } } ], @@ -24842,12 +42740,11 @@ }, { "guid": "", - "name": "exclude-kube-system-service-accounts-85", + "name": "exclude-system-users-and-groups-14", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -24855,251 +42752,8 @@ { "designatorType": "Attributes", "attributes": { - "namespace": "kube-system", - "name": "egress-nat-controller", - "kind": "ServiceAccount" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-86", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "antrea-agent" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-87", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "name": "event-exporter-sa", - "kind": "ServiceAccount", - "namespace": "kube-system" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-88", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "antrea-cpha" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-89", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "fluentbit-gke" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-90", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "pdcsi-node-sa" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-91", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "ip-masq-agent" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-92", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "filestorecsi-node-sa" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-service-accounts-93", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "ServiceAccount", - "namespace": "kube-system", - "name": "gke-metadata-server" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-kube-system-users-and-groups-1", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "namespace": "kube-system", - "name": "system:vpa-recommender", + "apiVersion": "rbac.authorization.k8s.io", + "name": "system:kube-scheduler", "kind": "User" } } @@ -25110,241 +42764,6 @@ } ] }, - { - "guid": "", - "name": "exclude-kube-system-users-and-groups-2", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "namespace": "kube-system", - "name": "system:anet-operator" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-system-users-and-groups-4", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:clustermetrics" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-system-users-and-groups-5", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:controller:glbc" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-system-users-and-groups-6", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:l7-lb-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-system-users-and-groups-7", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:managed-certificate-controller" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-system-users-and-groups-8", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:gke-common-webhooks" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-system-users-and-groups-11", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:gcp-controller-manager" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-system-users-and-groups-12", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:resource-tracker" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-system-users-and-groups-13", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "User", - "name": "system:storageversionmigrator" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, { "guid": "", "name": "exclude-system-users-and-groups-15", @@ -25352,7 +42771,6 @@ "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -25360,8 +42778,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "User", - "name": "system:kubestore-collector" + "apiVersion": "rbac.authorization.k8s.io", + "name": "system:kube-controller-manager", + "kind": "User" } } ], @@ -25373,12 +42792,11 @@ }, { "guid": "", - "name": "exclude-system-resources-1", + "name": "exclude-system-users-and-groups-16", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -25386,8 +42804,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "ca-validate-cfg" + "apiVersion": "rbac.authorization.k8s.io", + "name": "system:masters", + "kind": "Group" } } ], @@ -25399,12 +42818,11 @@ }, { "guid": "", - "name": "exclude-system-resources-2", + "name": "exclude-default-namespace-resources-1", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -25412,8 +42830,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "flowcontrol-guardrails.config.common-webhooks.networking.gke.io" + "kind": "ConfigMap", + "name": "kubescape", + "namespace": "default" } } ], @@ -25425,12 +42844,11 @@ }, { "guid": "", - "name": "exclude-system-resources-3", + "name": "exclude-default-namespace-resources-2", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -25438,8 +42856,8 @@ { "designatorType": "Attributes", "attributes": { - "name": "validation-webhook.snapshot.storage.gke.io", - "kind": "ValidatingWebhookConfiguration" + "kind": "Namespace", + "name": "default" } } ], @@ -25451,12 +42869,11 @@ }, { "guid": "", - "name": "exclude-system-resources-4", + "name": "exclude-default-namespace-resources-3", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -25464,8 +42881,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "nodelimit.config.common-webhooks.networking.gke.io" + "kind": "ServiceAccount", + "name": "default", + "namespace": "default" } } ], @@ -25477,12 +42895,11 @@ }, { "guid": "", - "name": "exclude-system-resources-5", + "name": "exclude-otel", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -25490,8 +42907,347 @@ { "designatorType": "Attributes", "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "gkepolicy.config.common-webhooks.networking.gke.io" + "kind": "Deployment", + "name": "otel-collector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0034" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-16", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "operator", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0007" + }, + { + "frameworkName": "", + "controlID": "c-0186" + }, + { + "frameworkName": "", + "controlID": "c-0053" + }, + { + "frameworkName": "", + "controlID": "c-0015" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-17", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kubescape", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0007" + }, + { + "frameworkName": "", + "controlID": "c-0186" + }, + { + "frameworkName": "", + "controlID": "c-0053" + }, + { + "frameworkName": "", + "controlID": "c-0015" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-18", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "kollector", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0007" + }, + { + "frameworkName": "", + "controlID": "c-0186" + }, + { + "frameworkName": "", + "controlID": "c-0053" + }, + { + "frameworkName": "", + "controlID": "c-0015" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-19", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "name": "storage-aggregated-apiserver-sa", + "namespace": "kubescape" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0007" + }, + { + "frameworkName": "", + "controlID": "c-0186" + }, + { + "frameworkName": "", + "controlID": "c-0053" + }, + { + "frameworkName": "", + "controlID": "c-0015" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-20", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kubescape", + "kind": "ServiceAccount", + "name": "storage" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0034" + }, + { + "frameworkName": "", + "controlID": "c-0190" + } + ] + }, + { + "guid": "", + "name": "exclude-service-accounts-21", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kubescape", + "kind": "ServiceAccount", + "name": "node-agent" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0034" + }, + { + "frameworkName": "", + "controlID": "c-0190" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-prometheus-security-context", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape-prometheus" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0055" + }, + { + "frameworkName": "", + "controlID": "c-0017" + }, + { + "frameworkName": "", + "controlID": "c-0210" + }, + { + "frameworkName": "", + "controlID": "c-0211" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-prometheus-deployment-allowed-registry", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "kubescape", + "namespace": "kubescape-prometheus", + "kind": "Deployment" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0078" + } + ] + }, + { + "guid": "", + "name": "exclude-kubescape-prometheus-deployment-ingress-and-egress", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "name": "kubescape", + "namespace": "kubescape-prometheus" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "c-0030" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-deployments-1", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "coredns", + "kind": "Deployment" } } ], @@ -25503,12 +43259,11 @@ }, { "guid": "", - "name": "exclude-system-resources-6", + "name": "exclude-aks-kube-system-deployments-2", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -25516,8 +43271,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "ValidatingWebhookConfiguration", - "name": "validation-webhook.snapshot.storage.k8s.io" + "kind": "Deployment", + "namespace": "kube-system", + "name": "coredns-autoscaler" } } ], @@ -25529,12 +43285,11 @@ }, { "guid": "", - "name": "exclude-system-resources-7", + "name": "exclude-aks-kube-system-deployments-3", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -25542,8 +43297,9 @@ { "designatorType": "Attributes", "attributes": { - "kind": "APIService", - "name": "v1beta1.metrics.k8s.io" + "kind": "Deployment", + "namespace": "kube-system", + "name": "konnectivity-agent" } } ], @@ -25555,12 +43311,1215 @@ }, { "guid": "", - "name": "exclude-system-resources-8", + "name": "exclude-aks-kube-system-deployments-6", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-deployments-8", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "csi-azuredisk-node-win" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-deployments-9", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "azure-ip-masq-agent" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-deployments-10", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "cloud-node-manager", + "kind": "DaemonSet" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-deployments-11", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "cloud-node-manager-windows" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-deployments-13", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Deployment", + "namespace": "kube-system", + "name": "omsagent-rs" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-pods-1", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "azure-ip-masq-agent-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-pods-2", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "cloud-node-manager-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-pods-3", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "coredns-autoscaler--[A-Za-z0-9]+-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-pods-5", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "csi-azuredisk-node-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-pods-6", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "csi-azurefile-node-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-pods-7", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "konnectivity-agent-[A-Za-z0-9]+-[A-Za-z0-9]+", + "kind": "Pod" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-pods-10", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "omsagent-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-pods-11", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Pod", + "namespace": "kube-system", + "name": "omsagent-rs-[A-Za-z0-9]+-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "", + "controlID": "C-.*" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-services-1", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Service", + "namespace": "kube-system", + "name": "kube-dns" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-services-2", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "Service", + "namespace": "kube-system", + "name": "metrics-server" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-daemonsets-4", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "csi-azuredisk-node" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-daemonsets-6", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "csi-azurefile-node" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-daemonsets-7", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "csi-azurefile-node-win" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-daemonsets-8", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "kube-proxy", + "kind": "DaemonSet" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-daemonsets-9", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "DaemonSet", + "namespace": "kube-system", + "name": "omsagent" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-daemonsets-10", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "omsagent-win", + "kind": "DaemonSet", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-replicasets-1", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "coredns-autoscaler-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-replicasets-2", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "coredns-[A-Za-z0-9]+", + "kind": "ReplicaSet" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-replicasets-3", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ReplicaSet", + "namespace": "kube-system", + "name": "konnectivity-agent-[A-Za-z0-9]+" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-replicasets-4", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "metrics-server-[A-Za-z0-9]+", + "kind": "ReplicaSet" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-replicasets-5", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "omsagent-rs-[A-Za-z0-9]+", + "kind": "ReplicaSet", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-2", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "azure-cloud-provider", + "kind": "ServiceAccount", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-6", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "cloud-node-manager", + "kind": "ServiceAccount", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-9", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "coredns-autoscaler" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-11", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "csi-azuredisk-node-sa" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-12", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ServiceAccount", + "namespace": "kube-system", + "name": "csi-azurefile-node-sa" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-30", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "omsagent", + "kind": "ServiceAccount", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-46", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "default", + "name": "kube-root-ca.crt", + "kind": "ConfigMap" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-47", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-node-lease", + "name": "kube-root-ca.crt" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-48", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-public", + "name": "kube-root-ca.crt", + "kind": "ConfigMap" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-49", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "azure-ip-masq-agent-config-reconciled" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-50", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "cluster-autoscaler-status" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-51", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "container-azm-ms-aks-k8scluster" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-52", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "coredns" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-53", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "coredns-autoscaler", + "kind": "ConfigMap", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-54", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "coredns-custom" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-55", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "extension-apiserver-authentication" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-56", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "namespace": "kube-system", + "name": "kube-root-ca.crt", + "kind": "ConfigMap" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-57", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "name": "omsagent-rs-config", + "kind": "ConfigMap", + "namespace": "kube-system" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-58", + "attributes": { + "systemException": true + }, + "policyType": "postureExceptionPolicy", + "actions": [ + "alertOnly" + ], + "resources": [ + { + "designatorType": "Attributes", + "attributes": { + "kind": "ConfigMap", + "namespace": "kube-system", + "name": "overlay-upgrade-data" + } + } + ], + "posturePolicies": [ + { + "frameworkName": "" + } + ] + }, + { + "guid": "", + "name": "exclude-aks-kube-system-sa-59", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -25569,7 +44528,7 @@ "designatorType": "Attributes", "attributes": { "kind": "MutatingWebhookConfiguration", - "name": "pod-ready.config.common-webhooks.networking.gke.io" + "name": "aks-webhook-admission-controller" } } ], @@ -25581,12 +44540,11 @@ }, { "guid": "", - "name": "exclude-system-resources-9", + "name": "exclude-aks-kube-system-sa-60", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -25594,7 +44552,7 @@ { "designatorType": "Attributes", "attributes": { - "name": "ca-mutate-cfg", + "name": "aks-node-mutating-webhook", "kind": "MutatingWebhookConfiguration" } } @@ -25607,12 +44565,11 @@ }, { "guid": "", - "name": "exclude-system-resources-10", + "name": "exclude-aks-kube-system-sa-61", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -25620,8 +44577,8 @@ { "designatorType": "Attributes", "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "neg-annotation.config.common-webhooks.networking.gke.io" + "kind": "ValidatingWebhookConfiguration", + "name": "aks-node-validating-webhook" } } ], @@ -25633,12 +44590,11 @@ }, { "guid": "", - "name": "exclude-system-resources-11", + "name": "exclude-aks-kube-system-sa-63", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -25646,8 +44602,8 @@ { "designatorType": "Attributes", "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "mutate-scheduler-profile.config.common-webhooks.networking.gke.io" + "kind": "Group", + "name": "system:nodes" } } ], @@ -25659,12 +44615,11 @@ }, { "guid": "", - "name": "exclude-system-resources-12", + "name": "exclude-aks-kube-system-sa-64", "attributes": { "systemException": true }, "policyType": "postureExceptionPolicy", - "creationTime": "", "actions": [ "alertOnly" ], @@ -25672,139 +44627,8 @@ { "designatorType": "Attributes", "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "sasecret-redacter.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-system-resources-13", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "workload-defaulter.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-system-resources-14", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "admissionwebhookcontroller.config.common-webhooks.networking.gke.io" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-system-resources-15", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "gke-vpa-webhook-config" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-system-resources-16", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "MutatingWebhookConfiguration", - "name": "filestorecsi-mutation-webhook.storage.k8s.io" - } - } - ], - "posturePolicies": [ - { - "frameworkName": "" - } - ] - }, - { - "guid": "", - "name": "exclude-host-scanner-resources", - "attributes": { - "systemException": true - }, - "policyType": "postureExceptionPolicy", - "creationTime": "", - "actions": [ - "alertOnly" - ], - "resources": [ - { - "designatorType": "Attributes", - "attributes": { - "kind": "DaemonSet", - "name": "host-scanner", - "namespace": "kubescape" + "kind": "User", + "name": "clusterAdmin" } } ],