mirror of
https://github.com/aquasecurity/kube-hunter.git
synced 2026-02-15 18:40:19 +00:00
Compare commits
31 Commits
feature/mu
...
logo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c849b597b2 | ||
|
|
eb31026d8e | ||
|
|
a578726495 | ||
|
|
c442172715 | ||
|
|
d7df38fc95 | ||
|
|
9ce385a190 | ||
|
|
ebd8e2e405 | ||
|
|
585b490f19 | ||
|
|
6c4ad4f6fd | ||
|
|
e6a3c12098 | ||
|
|
2a7020682e | ||
|
|
e1896f3983 | ||
|
|
fc7fbbf1fc | ||
|
|
7c62cc21af | ||
|
|
c17aa17096 | ||
|
|
4204879251 | ||
|
|
a746bd0eb1 | ||
|
|
b379e64314 | ||
|
|
00eb0dfa87 | ||
|
|
8d045fb1a8 | ||
|
|
83b19d4208 | ||
|
|
473e4fe2b5 | ||
|
|
f67f08225c | ||
|
|
c96312b91e | ||
|
|
a7d26452fb | ||
|
|
e63efddf9f | ||
|
|
6689005544 | ||
|
|
0b90e0e43d | ||
|
|
65eefed721 | ||
|
|
599e9967e3 | ||
|
|
5745f4a32b |
2
.flake8
2
.flake8
@@ -1,5 +1,5 @@
|
||||
[flake8]
|
||||
ignore = E203, E266, E501, W503, B903, T499
|
||||
ignore = E203, E266, E501, W503, B903, T499, B020
|
||||
max-line-length = 120
|
||||
max-complexity = 18
|
||||
select = B,C,E,F,W,B9,T4
|
||||
|
||||
36
.github/workflows/publish.yml
vendored
36
.github/workflows/publish.yml
vendored
@@ -8,8 +8,8 @@ env:
|
||||
ALIAS: aquasecurity
|
||||
REP: kube-hunter
|
||||
jobs:
|
||||
publish:
|
||||
name: Publish
|
||||
dockerhub:
|
||||
name: Publish To Docker Hub
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Check Out Repo
|
||||
@@ -39,7 +39,7 @@ jobs:
|
||||
password: ${{ secrets.ECR_SECRET_ACCESS_KEY }}
|
||||
- name: Get version
|
||||
id: get_version
|
||||
uses: crazy-max/ghaction-docker-meta@v1
|
||||
uses: crazy-max/ghaction-docker-meta@v3
|
||||
with:
|
||||
images: ${{ env.REP }}
|
||||
tag-semver: |
|
||||
@@ -63,3 +63,33 @@ jobs:
|
||||
|
||||
- name: Image digest
|
||||
run: echo ${{ steps.docker_build.outputs.digest }}
|
||||
|
||||
pypi:
|
||||
name: Publish To PyPI
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.9'
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
pip install -U pip
|
||||
make deps
|
||||
|
||||
- name: Build project
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip install wheel
|
||||
make build
|
||||
|
||||
- name: Publish distribution package to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
uses: pypa/gh-action-pypi-publish@master
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
|
||||
10
.github/workflows/release.yml
vendored
10
.github/workflows/release.yml
vendored
@@ -10,7 +10,7 @@ name: Release
|
||||
jobs:
|
||||
build:
|
||||
name: Upload Release Asset
|
||||
runs-on: ubuntu-16.04
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
@@ -18,12 +18,14 @@ jobs:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.9'
|
||||
python-version: '3.8'
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip install -U pip
|
||||
python -m pip install -r requirements-dev.txt
|
||||
pip install -U pip
|
||||
pip install pyinstaller
|
||||
make deps
|
||||
|
||||
- name: Build project
|
||||
shell: bash
|
||||
|
||||
10
.github/workflows/test.yml
vendored
10
.github/workflows/test.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.6", "3.7", "3.8", "3.9"]
|
||||
os: [ubuntu-20.04, ubuntu-18.04, ubuntu-16.04]
|
||||
os: [ubuntu-20.04, ubuntu-18.04]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -38,11 +38,11 @@ jobs:
|
||||
${{ matrix.os }}-${{ matrix.python-version }}-
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip install -U pip
|
||||
python -m pip install -U wheel
|
||||
python -m pip install -r requirements.txt
|
||||
python -m pip install -r requirements-dev.txt
|
||||
pip install -U pip
|
||||
make dev-deps
|
||||
make install
|
||||
|
||||
- name: Test
|
||||
shell: bash
|
||||
|
||||
@@ -26,4 +26,7 @@ RUN apk add --no-cache \
|
||||
COPY --from=builder /usr/local/lib/python3.8/site-packages /usr/local/lib/python3.8/site-packages
|
||||
COPY --from=builder /usr/local/bin/kube-hunter /usr/local/bin/kube-hunter
|
||||
|
||||
# Add default plugins: https://github.com/aquasecurity/kube-hunter-plugins
|
||||
RUN pip install kube-hunter-arp-spoof>=0.0.3 kube-hunter-dns-spoof>=0.0.3
|
||||
|
||||
ENTRYPOINT ["kube-hunter"]
|
||||
|
||||
2
Makefile
2
Makefile
@@ -31,7 +31,7 @@ lint-check:
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
pytest
|
||||
python -m pytest
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
|
||||
126
README.md
126
README.md
@@ -18,7 +18,8 @@ kube-hunter hunts for security weaknesses in Kubernetes clusters. The tool was d
|
||||
|
||||
**Run kube-hunter**: kube-hunter is available as a container (aquasec/kube-hunter), and we also offer a web site at [kube-hunter.aquasec.com](https://kube-hunter.aquasec.com) where you can register online to receive a token allowing you to see and share the results online. You can also run the Python code yourself as described below.
|
||||
|
||||
**Explore vulnerabilities**: The kube-hunter knowledge base includes articles about discoverable vulnerabilities and issues. When kube-hunter reports an issue, it will show its VID (Vulnerability ID) so you can look it up in the KB at https://aquasecurity.github.io/kube-hunter/
|
||||
**Explore vulnerabilities**: The kube-hunter knowledge base includes articles about discoverable vulnerabilities and issues. When kube-hunter reports an issue, it will show its VID (Vulnerability ID) so you can look it up in the KB at https://aquasecurity.github.io/kube-hunter/
|
||||
_If you're interested in kube-hunter's integration with the Kubernetes ATT&CK Matrix [Continue Reading](#kuberentes-attck-matrix)_
|
||||
|
||||
**Contribute**: We welcome contributions, especially new hunter modules that perform additional tests. If you would like to develop your modules please read [Guidelines For Developing Your First kube-hunter Module](https://github.com/aquasecurity/kube-hunter/blob/main/CONTRIBUTING.md).
|
||||
|
||||
@@ -27,24 +28,44 @@ kube-hunter hunts for security weaknesses in Kubernetes clusters. The tool was d
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
* [Hunting](#hunting)
|
||||
* [Where should I run kube-hunter?](#where-should-i-run-kube-hunter)
|
||||
* [Scanning options](#scanning-options)
|
||||
* [Active Hunting](#active-hunting)
|
||||
* [List of tests](#list-of-tests)
|
||||
* [Nodes Mapping](#nodes-mapping)
|
||||
* [Output](#output)
|
||||
* [Dispatching](#dispatching)
|
||||
* [Advanced Usage](#advanced-usage)
|
||||
* [Deployment](#deployment)
|
||||
* [On Machine](#on-machine)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Container](#container)
|
||||
* [Pod](#pod)
|
||||
* [Contribution](#contribution)
|
||||
|
||||
## Hunting
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Kubernetes ATT&CK Matrix](#kubernetes-attck-matrix)
|
||||
- [Hunting](#hunting)
|
||||
- [Where should I run kube-hunter?](#where-should-i-run-kube-hunter)
|
||||
- [Scanning options](#scanning-options)
|
||||
- [Authentication](#authentication)
|
||||
- [Active Hunting](#active-hunting)
|
||||
- [List of tests](#list-of-tests)
|
||||
- [Nodes Mapping](#nodes-mapping)
|
||||
- [Output](#output)
|
||||
- [Dispatching](#dispatching)
|
||||
- [Advanced Usage](#advanced-usage)
|
||||
- [Azure Quick Scanning](#azure-quick-scanning)
|
||||
- [Custom Hunting](#custom-hunting)
|
||||
- [Deployment](#deployment)
|
||||
- [On Machine](#on-machine)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Install with pip](#install-with-pip)
|
||||
- [Run from source](#run-from-source)
|
||||
- [Container](#container)
|
||||
- [Pod](#pod)
|
||||
- [Contribution](#contribution)
|
||||
- [License](#license)
|
||||
|
||||
---
|
||||
## Kubernetes ATT&CK Matrix
|
||||
|
||||
kube-hunter now supports the new format of the Kubernetes ATT&CK matrix.
|
||||
While kube-hunter's vulnerabilities are a collection of creative techniques designed to mimic an attacker in the cluster (or outside it)
|
||||
The Mitre's ATT&CK defines a more general standardised categories of techniques to do so.
|
||||
|
||||
You can think of kube-hunter vulnerabilities as small steps for an attacker, which follows the track of a more general technique he would aim for.
|
||||
Most of kube-hunter's hunters and vulnerabilities can closly fall under those techniques, That's why we moved to follow the Matrix standard.
|
||||
|
||||
_Some kube-hunter vulnerabities which we could not map to Mitre technique, are prefixed with the `General` keyword_
|
||||

|
||||
|
||||
## Hunting
|
||||
### Where should I run kube-hunter?
|
||||
|
||||
There are three different ways to run kube-hunter, each providing a different approach to detecting weaknesses in your cluster:
|
||||
@@ -53,7 +74,8 @@ Run kube-hunter on any machine (including your laptop), select Remote scanning a
|
||||
|
||||
You can run kube-hunter directly on a machine in the cluster, and select the option to probe all the local network interfaces.
|
||||
|
||||
You can also run kube-hunter in a pod within the cluster. This indicates how exposed your cluster would be if one of your application pods is compromised (through a software vulnerability, for example).
|
||||
You can also run kube-hunter in a pod within the cluster. This indicates how exposed your cluster would be if one of your application pods is compromised (through a software vulnerability, for example). (_`--pod` flag_)
|
||||
|
||||
|
||||
### Scanning options
|
||||
|
||||
@@ -76,6 +98,26 @@ To specify interface scanning, you can use the `--interface` option (this will s
|
||||
To specify a specific CIDR to scan, use the `--cidr` option. Example:
|
||||
`kube-hunter --cidr 192.168.0.0/24`
|
||||
|
||||
4. **Kubernetes node auto-discovery**
|
||||
|
||||
Set `--k8s-auto-discover-nodes` flag to query Kubernetes for all nodes in the cluster, and then attempt to scan them all. By default, it will use [in-cluster config](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) to connect to the Kubernetes API. If you'd like to use an explicit kubeconfig file, set `--kubeconfig /location/of/kubeconfig/file`.
|
||||
|
||||
Also note, that this is always done when using `--pod` mode.
|
||||
|
||||
### Authentication
|
||||
In order to mimic an attacker in it's early stages, kube-hunter requires no authentication for the hunt.
|
||||
|
||||
* **Impersonate** - You can provide kube-hunter with a specific service account token to use when hunting by manually passing the JWT Bearer token of the service-account secret with the `--service-account-token` flag.
|
||||
|
||||
Example:
|
||||
```bash
|
||||
$ kube-hunter --active --service-account-token eyJhbGciOiJSUzI1Ni...
|
||||
```
|
||||
|
||||
* When runing with `--pod` flag, kube-hunter uses the service account token [mounted inside the pod](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/) to authenticate to services it finds during the hunt.
|
||||
* if specified, `--service-account-token` flag takes priority when running as a pod
|
||||
|
||||
|
||||
### Active Hunting
|
||||
|
||||
Active hunting is an option in which kube-hunter will exploit vulnerabilities it finds, to explore for further vulnerabilities.
|
||||
@@ -115,11 +157,49 @@ Available dispatch methods are:
|
||||
* KUBEHUNTER_HTTP_DISPATCH_URL (defaults to: https://localhost)
|
||||
* KUBEHUNTER_HTTP_DISPATCH_METHOD (defaults to: POST)
|
||||
|
||||
### Advanced Usage
|
||||
#### Azure Quick Scanning
|
||||
When running **as a Pod in an Azure environment**, kube-hunter will fetch subnets from the Instance Metadata Service. Naturally this makes the discovery process take longer.
|
||||
|
||||
## Advanced Usage
|
||||
### Azure Quick Scanning
|
||||
When running **as a Pod in an Azure or AWS environment**, kube-hunter will fetch subnets from the Instance Metadata Service. Naturally this makes the discovery process take longer.
|
||||
To hardlimit subnet scanning to a `/24` CIDR, use the `--quick` option.
|
||||
|
||||
### Custom Hunting
|
||||
Custom hunting enables advanced users to have control over what hunters gets registered at the start of a hunt.
|
||||
**If you know what you are doing**, this can help if you want to adjust kube-hunter's hunting and discovery process for your needs.
|
||||
|
||||
Example:
|
||||
```
|
||||
kube-hunter --custom <HunterName1> <HunterName2>
|
||||
```
|
||||
Enabling Custom hunting removes all hunters from the hunting process, except the given whitelisted hunters.
|
||||
|
||||
The `--custom` flag reads a list of hunters class names, in order to view all of kube-hunter's class names, you can combine the flag `--raw-hunter-names` with the `--list` flag.
|
||||
|
||||
Example:
|
||||
```
|
||||
kube-hunter --active --list --raw-hunter-names
|
||||
```
|
||||
|
||||
**Notice**: Due to kube-huner's architectural design, the following "Core Hunters/Classes" will always register (even when using custom hunting):
|
||||
* HostDiscovery
|
||||
* _Generates ip addresses for the hunt by given configurations_
|
||||
* _Automatically discovers subnets using cloud Metadata APIs_
|
||||
* FromPodHostDiscovery
|
||||
* _Auto discover attack surface ip addresses for the hunt by using Pod based environment techniques_
|
||||
* _Automatically discovers subnets using cloud Metadata APIs_
|
||||
* PortDiscovery
|
||||
* _Port scanning given ip addresses for known kubernetes services ports_
|
||||
* Collector
|
||||
* _Collects discovered vulnerabilities and open services for future report_
|
||||
* StartedInfo
|
||||
* _Prints the start message_
|
||||
* SendFullReport
|
||||
* _Dispatching the report based on given configurations_
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## Deployment
|
||||
There are three methods for deploying kube-hunter:
|
||||
|
||||
@@ -165,7 +245,7 @@ python3 kube_hunter
|
||||
_If you want to use pyinstaller/py2exe you need to first run the install_imports.py script._
|
||||
|
||||
### Container
|
||||
Aqua Security maintains a containerized version of kube-hunter at `aquasec/kube-hunter`. This container includes this source code, plus an additional (closed source) reporting plugin for uploading results into a report that can be viewed at [kube-hunter.aquasec.com](https://kube-hunter.aquasec.com). Please note, that running the `aquasec/kube-hunter` container and uploading reports data are subject to additional [terms and conditions](https://kube-hunter.aquasec.com/eula.html).
|
||||
Aqua Security maintains a containerized version of kube-hunter at `aquasec/kube-hunter:aqua`. This container includes this source code, plus an additional (closed source) reporting plugin for uploading results into a report that can be viewed at [kube-hunter.aquasec.com](https://kube-hunter.aquasec.com). Please note, that running the `aquasec/kube-hunter` container and uploading reports data are subject to additional [terms and conditions](https://kube-hunter.aquasec.com/eula.html).
|
||||
|
||||
The Dockerfile in this repository allows you to build a containerized version without the reporting plugin.
|
||||
|
||||
|
||||
@@ -197,9 +197,9 @@ GEM
|
||||
html-pipeline (~> 2.2)
|
||||
jekyll (>= 3.0, < 5.0)
|
||||
kramdown (2.3.0)
|
||||
rexml
|
||||
rexml (>= 3.2.5)
|
||||
kramdown-parser-gfm (1.1.0)
|
||||
kramdown (~> 2.0)
|
||||
kramdown (>= 2.3.1)
|
||||
liquid (4.0.3)
|
||||
listen (3.4.0)
|
||||
rb-fsevent (~> 0.10, >= 0.10.3)
|
||||
@@ -212,7 +212,7 @@ GEM
|
||||
jekyll-seo-tag (~> 2.1)
|
||||
minitest (5.14.3)
|
||||
multipart-post (2.1.1)
|
||||
nokogiri (1.11.1)
|
||||
nokogiri (>= 1.11.4)
|
||||
mini_portile2 (~> 2.5.0)
|
||||
racc (~> 1.4)
|
||||
octokit (4.20.0)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV002
|
||||
title: Kubernetes version disclosure
|
||||
categories: [Information Disclosure]
|
||||
severity: low
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV003
|
||||
title: Azure Metadata Exposure
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV004
|
||||
title: Azure SPN Exposure
|
||||
categories: [Identity Theft]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV005
|
||||
title: Access to Kubernetes API
|
||||
categories: [Information Disclosure, Unauthenticated Access]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV006
|
||||
title: Insecure (HTTP) access to Kubernetes API
|
||||
categories: [Unauthenticated Access]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV007
|
||||
title: Specific Access to Kubernetes API
|
||||
categories: [Access Risk]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV020
|
||||
title: Possible Arp Spoof
|
||||
categories: [IdentityTheft]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV021
|
||||
title: Certificate Includes Email Address
|
||||
categories: [Information Disclosure]
|
||||
severity: low
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV022
|
||||
title: Critical Privilege Escalation CVE
|
||||
categories: [Privilege Escalation]
|
||||
severity: critical
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV023
|
||||
title: Denial of Service to Kubernetes API Server
|
||||
categories: [Denial Of Service]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV024
|
||||
title: Possible Ping Flood Attack
|
||||
categories: [Denial Of Service]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV025
|
||||
title: Possible Reset Flood Attack
|
||||
categories: [Denial Of Service]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV026
|
||||
title: Arbitrary Access To Cluster Scoped Resources
|
||||
categories: [PrivilegeEscalation]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV027
|
||||
title: Kubectl Vulnerable To CVE-2019-11246
|
||||
categories: [Remote Code Execution]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV028
|
||||
title: Kubectl Vulnerable To CVE-2019-1002101
|
||||
categories: [Remote Code Execution]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV029
|
||||
title: Dashboard Exposed
|
||||
categories: [Remote Code Execution]
|
||||
severity: critical
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
@@ -12,4 +13,5 @@ An open Kubernetes Dashboard was detected. The Kubernetes Dashboard can be used
|
||||
|
||||
## Remediation
|
||||
|
||||
Do not leave the Dashboard insecured.
|
||||
Do not leave the Dashboard insecured.
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV030
|
||||
title: Possible DNS Spoof
|
||||
categories: [Identity Theft]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV031
|
||||
title: Etcd Remote Write Access Event
|
||||
categories: [Remote Code Execution]
|
||||
severity: critical
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV032
|
||||
title: Etcd Remote Read Access Event
|
||||
categories: [Access Risk]
|
||||
severity: critical
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV033
|
||||
title: Etcd Remote version disclosure
|
||||
categories: [Information Disclosure]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV034
|
||||
title: Etcd is accessible using insecure connection (HTTP)
|
||||
categories: [Unauthenticated Access]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV036
|
||||
title: Anonymous Authentication
|
||||
categories: [Remote Code Execution]
|
||||
severity: critical
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV037
|
||||
title: Exposed Container Logs
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV038
|
||||
title: Exposed Running Pods
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV039
|
||||
title: Exposed Exec On Container
|
||||
categories: [Remote Code Execution]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV040
|
||||
title: Exposed Run Inside Container
|
||||
categories: [Remote Code Execution]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV041
|
||||
title: Exposed Port Forward
|
||||
categories: [Remote Code Execution]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV042
|
||||
title: Exposed Attaching To Container
|
||||
categories: [Remote Code Execution]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV043
|
||||
title: Cluster Health Disclosure
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV044
|
||||
title: Privileged Container
|
||||
categories: [Access Risk]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV045
|
||||
title: Exposed System Logs
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV046
|
||||
title: Exposed Kubelet Cmdline
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV047
|
||||
title: Pod With Mount To /var/log
|
||||
categories: [Privilege Escalation]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV049
|
||||
title: kubectl proxy Exposed
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV050
|
||||
title: Read access to Pod service account token
|
||||
categories: [Access Risk]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV051
|
||||
title: Exposed Existing Privileged Containers Via Secure Kubelet Port
|
||||
categories: [Access Risk]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV052
|
||||
title: Exposed Pods
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
25
docs/_kb/KHV053.md
Normal file
25
docs/_kb/KHV053.md
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
vid: KHV053
|
||||
title: AWS Metadata Exposure
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
## Issue description
|
||||
|
||||
AWS EC2 provides an internal HTTP endpoint that exposes information from the cloud platform to workloads running in an instance. The endpoint is accessible to every workload running in the instance. An attacker that is able to execute a pod in the cluster may be able to query the metadata service and discover additional information about the environment.
|
||||
|
||||
## Remediation
|
||||
|
||||
* Limit access to the instance metadata service. Consider using a local firewall such as `iptables` to disable access from some or all processes/users to the instance metadata service.
|
||||
|
||||
* Disable the metadata service (via instance metadata options or IAM), or at a minimum enforce the use IMDSv2 on an instance to require token-based access to the service.
|
||||
|
||||
* Modify the HTTP PUT response hop limit on the instance to 1. This will only allow access to the service from the instance itself rather than from within a pod.
|
||||
|
||||
## References
|
||||
|
||||
- [AWS Instance Metadata service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html)
|
||||
- [EC2 Instance Profiles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html)
|
||||
6
job.yaml
6
job.yaml
@@ -5,11 +5,13 @@ metadata:
|
||||
name: kube-hunter
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: kube-hunter
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-hunter
|
||||
image: aquasec/kube-hunter
|
||||
image: aquasec/kube-hunter:0.6.8
|
||||
command: ["kube-hunter"]
|
||||
args: ["--pod"]
|
||||
restartPolicy: Never
|
||||
backoffLimit: 4
|
||||
|
||||
BIN
kube-hunter.png
BIN
kube-hunter.png
Binary file not shown.
|
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 25 KiB |
@@ -76,7 +76,7 @@ in order to prevent circular dependency bug.
|
||||
Following the above example, let's figure out the imports:
|
||||
```python
|
||||
from kube_hunter.core.types import Hunter
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
|
||||
from kube_hunter.core.events.types import OpenPortEvent
|
||||
|
||||
@@ -206,7 +206,7 @@ __Make sure to return the event from the execute method, or the event will not g
|
||||
|
||||
For example, if you don't want to hunt services found on a localhost IP, you can create the following module, in the `kube_hunter/modules/report/`
|
||||
```python
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Service, EventFilterBase
|
||||
|
||||
@handler.subscribe(Service)
|
||||
@@ -222,7 +222,7 @@ That means other Hunters that are subscribed to this Service will not get trigge
|
||||
That opens up a wide variety of possible operations, as this not only can __filter out__ events, but you can actually __change event attributes__, for example:
|
||||
|
||||
```python
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.types import InformationDisclosure
|
||||
from kube_hunter.core.events.types import Vulnerability, EventFilterBase
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
# flake8: noqa: E402
|
||||
|
||||
from functools import partial
|
||||
import logging
|
||||
import threading
|
||||
|
||||
@@ -21,10 +22,16 @@ config = Config(
|
||||
log_file=args.log_file,
|
||||
mapping=args.mapping,
|
||||
network_timeout=args.network_timeout,
|
||||
num_worker_threads=args.num_worker_threads,
|
||||
pod=args.pod,
|
||||
quick=args.quick,
|
||||
remote=args.remote,
|
||||
statistics=args.statistics,
|
||||
k8s_auto_discover_nodes=args.k8s_auto_discover_nodes,
|
||||
service_account_token=args.service_account_token,
|
||||
kubeconfig=args.kubeconfig,
|
||||
enable_cve_hunting=args.enable_cve_hunting,
|
||||
custom=args.custom,
|
||||
)
|
||||
setup_logger(args.log, args.log_file)
|
||||
set_config(config)
|
||||
@@ -32,7 +39,7 @@ set_config(config)
|
||||
# Running all other registered plugins before execution
|
||||
pm.hook.load_plugin(args=args)
|
||||
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import HuntFinished, HuntStarted
|
||||
from kube_hunter.modules.discovery.hosts import RunningAsPodEvent, HostScanEvent
|
||||
from kube_hunter.modules.report import get_reporter, get_dispatcher
|
||||
@@ -69,16 +76,20 @@ def interactive_set_config():
|
||||
return True
|
||||
|
||||
|
||||
def list_hunters():
|
||||
def list_hunters(class_names=False):
|
||||
print("\nPassive Hunters:\n----------------")
|
||||
for hunter, docs in handler.passive_hunters.items():
|
||||
name, doc = hunter.parse_docs(docs)
|
||||
if class_names:
|
||||
name = hunter.__name__
|
||||
print(f"* {name}\n {doc}\n")
|
||||
|
||||
if config.active:
|
||||
print("\n\nActive Hunters:\n---------------")
|
||||
for hunter, docs in handler.active_hunters.items():
|
||||
name, doc = hunter.parse_docs(docs)
|
||||
if class_names:
|
||||
name = hunter.__name__
|
||||
print(f"* {name}\n {doc}\n")
|
||||
|
||||
|
||||
@@ -88,10 +99,13 @@ hunt_started = False
|
||||
|
||||
def main():
|
||||
global hunt_started
|
||||
scan_options = [config.pod, config.cidr, config.remote, config.interface]
|
||||
scan_options = [config.pod, config.cidr, config.remote, config.interface, config.k8s_auto_discover_nodes]
|
||||
try:
|
||||
if args.list:
|
||||
list_hunters()
|
||||
if args.raw_hunter_names:
|
||||
list_hunters(class_names=True)
|
||||
else:
|
||||
list_hunters()
|
||||
return
|
||||
|
||||
if not any(scan_options):
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
def get_default_core_hunters():
|
||||
return ["FromPodHostDiscovery", "HostDiscovery", "PortDiscovery", "SendFullReport", "Collector", "StartedInfo"]
|
||||
|
||||
|
||||
@dataclass
|
||||
class Config:
|
||||
"""Config is a configuration container.
|
||||
@@ -16,11 +20,13 @@ class Config:
|
||||
- log_file: Log File path
|
||||
- mapping: Report only found components
|
||||
- network_timeout: Timeout for network operations
|
||||
- num_worker_threads: Add a flag --threads to change the default 800 thread count of the event handler
|
||||
- pod: From pod scanning mode
|
||||
- quick: Quick scanning mode
|
||||
- remote: Hosts to scan
|
||||
- report: Output format
|
||||
- statistics: Include hunters statistics
|
||||
- enable_cve_hunting: enables cve hunting, shows cve results
|
||||
"""
|
||||
|
||||
active: bool = False
|
||||
@@ -31,11 +37,19 @@ class Config:
|
||||
log_file: Optional[str] = None
|
||||
mapping: bool = False
|
||||
network_timeout: float = 5.0
|
||||
num_worker_threads: int = 800
|
||||
pod: bool = False
|
||||
quick: bool = False
|
||||
remote: Optional[str] = None
|
||||
reporter: Optional[Any] = None
|
||||
statistics: bool = False
|
||||
k8s_auto_discover_nodes: bool = False
|
||||
service_account_token: Optional[str] = None
|
||||
kubeconfig: Optional[str] = None
|
||||
enable_cve_hunting: bool = False
|
||||
custom: Optional[list] = None
|
||||
raw_hunter_names: bool = False
|
||||
core_hunters: list = field(default_factory=get_default_core_hunters)
|
||||
|
||||
|
||||
_config: Optional[Config] = None
|
||||
|
||||
@@ -4,10 +4,6 @@ DEFAULT_LEVEL = logging.INFO
|
||||
DEFAULT_LEVEL_NAME = logging.getLevelName(DEFAULT_LEVEL)
|
||||
LOG_FORMAT = "%(asctime)s %(levelname)s %(name)s %(message)s"
|
||||
|
||||
# Suppress logging from scapy
|
||||
logging.getLogger("scapy.runtime").setLevel(logging.CRITICAL)
|
||||
logging.getLogger("scapy.loading").setLevel(logging.CRITICAL)
|
||||
|
||||
|
||||
def setup_logger(level_name, logfile):
|
||||
# Remove any existing handlers
|
||||
|
||||
@@ -46,8 +46,58 @@ def parser_add_arguments(parser):
|
||||
help="One or more remote ip/dns to hunt",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--custom",
|
||||
nargs="+",
|
||||
metavar="HUNTERS",
|
||||
default=list(),
|
||||
help="Custom hunting. Only given hunter names will register in the hunt."
|
||||
"for a list of options run `--list --raw-hunter-names`",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--raw-hunter-names",
|
||||
action="store_true",
|
||||
help="Use in combination with `--list` to display hunter class names to pass for custom hunting flag",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--k8s-auto-discover-nodes",
|
||||
action="store_true",
|
||||
help="Enables automatic detection of all nodes in a Kubernetes cluster "
|
||||
"by quering the Kubernetes API server. "
|
||||
"It supports both in-cluster config (when running as a pod), "
|
||||
"and a specific kubectl config file (use --kubeconfig to set this). "
|
||||
"By default, when this flag is set, it will use in-cluster config. "
|
||||
"NOTE: this is automatically switched on in --pod mode.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--service-account-token",
|
||||
type=str,
|
||||
metavar="JWT_TOKEN",
|
||||
help="Manually specify the service account jwt token to use for authenticating in the hunting process "
|
||||
"NOTE: This overrides the loading of the pod's bounded authentication when running in --pod mode",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--kubeconfig",
|
||||
type=str,
|
||||
metavar="KUBECONFIG",
|
||||
default=None,
|
||||
help="Specify the kubeconfig file to use for Kubernetes nodes auto discovery "
|
||||
" (to be used in conjuction with the --k8s-auto-discover-nodes flag.",
|
||||
)
|
||||
|
||||
parser.add_argument("--active", action="store_true", help="Enables active hunting")
|
||||
|
||||
parser.add_argument(
|
||||
"--enable-cve-hunting",
|
||||
action="store_true",
|
||||
help="Show cluster CVEs based on discovered version (Depending on different vendors, may result in False Positives)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--log",
|
||||
type=str,
|
||||
@@ -83,6 +133,14 @@ def parser_add_arguments(parser):
|
||||
|
||||
parser.add_argument("--network-timeout", type=float, default=5.0, help="network operations timeout")
|
||||
|
||||
parser.add_argument(
|
||||
"--num-worker-threads",
|
||||
type=int,
|
||||
default=800,
|
||||
help="In some environments the default thread count (800) can cause the process to crash. "
|
||||
"In the case of a crash try lowering the thread count",
|
||||
)
|
||||
|
||||
|
||||
def parse_args(add_args_hook):
|
||||
"""
|
||||
|
||||
@@ -1,3 +1,2 @@
|
||||
# flake8: noqa: E402
|
||||
from .handler import EventQueue, handler
|
||||
from . import types
|
||||
|
||||
370
kube_hunter/core/events/event_handler.py
Normal file
370
kube_hunter/core/events/event_handler.py
Normal file
@@ -0,0 +1,370 @@
|
||||
import logging
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.types import ActiveHunter, HunterBase
|
||||
from kube_hunter.core.events.types import Vulnerability, EventFilterBase, MultipleEventsContainer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Inherits Queue object, handles events asynchronously
|
||||
class EventQueue(Queue):
|
||||
def __init__(self, num_worker=10):
|
||||
super().__init__()
|
||||
self.passive_hunters = dict()
|
||||
self.active_hunters = dict()
|
||||
self.all_hunters = dict()
|
||||
|
||||
self.running = True
|
||||
self.workers = list()
|
||||
|
||||
# -- Regular Subscription --
|
||||
# Structure: key: Event Class, value: tuple(Registered Hunter, Predicate Function)
|
||||
self.hooks = defaultdict(list)
|
||||
self.filters = defaultdict(list)
|
||||
# --------------------------
|
||||
|
||||
# -- Multiple Subscription --
|
||||
# Structure: key: Event Class, value: tuple(Registered Hunter, Predicate Function)
|
||||
self.multi_hooks = defaultdict(list)
|
||||
|
||||
# When subscribing to multiple events, this gets populated with required event classes
|
||||
# Structure: key: Hunter Class, value: set(RequiredEventClass1, RequiredEventClass2)
|
||||
self.hook_dependencies = defaultdict(set)
|
||||
|
||||
# To keep track of fulfilled dependencies. we need to have a structure which saves historical instanciated
|
||||
# events mapped to a registered hunter.
|
||||
# We used a 2 dimensional dictionary in order to fulfill two demands:
|
||||
# * correctly count published required events
|
||||
# * save historical events fired, easily sorted by their type
|
||||
#
|
||||
# Structure: hook_fulfilled_deps[hunter_class] -> fulfilled_events_for_hunter[event_class] -> [EventObject, EventObject2]
|
||||
self.hook_fulfilled_deps = defaultdict(lambda: defaultdict(list))
|
||||
# ---------------------------
|
||||
|
||||
for _ in range(num_worker):
|
||||
t = Thread(target=self.worker)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
self.workers.append(t)
|
||||
|
||||
t = Thread(target=self.notifier)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
"""
|
||||
######################################################
|
||||
+ ----------------- Public Methods ----------------- +
|
||||
######################################################
|
||||
"""
|
||||
|
||||
def subscribe(self, event, hook=None, predicate=None, is_register=True):
|
||||
"""
|
||||
The Subscribe Decorator - For Regular Registration
|
||||
Use this to register for one event only. Your hunter will execute each time this event is published
|
||||
|
||||
@param event - Event class to subscribe to
|
||||
@param predicate - Optional: Function that will be called with the published event as a parameter before trigger.
|
||||
If it's return value is False, the Hunter will not run (default=None).
|
||||
@param hook - Hunter class to register for (ignore when using as a decorator)
|
||||
"""
|
||||
|
||||
def wrapper(hook):
|
||||
self.subscribe_event(event, hook=hook, predicate=predicate, is_register=is_register)
|
||||
return hook
|
||||
|
||||
return wrapper
|
||||
|
||||
def subscribe_many(self, events, hook=None, predicates=None, is_register=True):
|
||||
"""
|
||||
The Subscribe Many Decorator - For Multiple Registration,
|
||||
When your attack needs several prerequisites to exist in the cluster, You need to register for multiple events.
|
||||
Your hunter will execute once for every new combination of required events.
|
||||
For example:
|
||||
1. event A was published 3 times
|
||||
2. event B was published once.
|
||||
3. event B was published again
|
||||
Your hunter will execute 2 times:
|
||||
* (on step 2) with the newest version of A
|
||||
* (on step 3) with the newest version of A and newest version of B
|
||||
|
||||
@param events - List of event classes to subscribe to
|
||||
@param predicates - Optional: List of function that will be called with the published event as a parameter before trigger.
|
||||
If it's return value is False, the Hunter will not run (default=None).
|
||||
@param hook - Hunter class to register for (ignore when using as a decorator)
|
||||
"""
|
||||
|
||||
def wrapper(hook):
|
||||
self.subscribe_events(events, hook=hook, predicates=predicates, is_register=is_register)
|
||||
return hook
|
||||
|
||||
return wrapper
|
||||
|
||||
def subscribe_once(self, event, hook=None, predicate=None, is_register=True):
|
||||
"""
|
||||
The Subscribe Once Decorator - For Single Trigger Registration,
|
||||
Use this when you want your hunter to execute only in your entire program run
|
||||
wraps subscribe_event method
|
||||
|
||||
@param events - List of event classes to subscribe to
|
||||
@param predicates - Optional: List of function that will be called with the published event as a parameter before trigger.
|
||||
If it's return value is False, the Hunter will not run (default=None).
|
||||
@param hook - Hunter class to register for (ignore when using as a decorator)
|
||||
"""
|
||||
|
||||
def wrapper(hook):
|
||||
# installing a __new__ magic method on the hunter
|
||||
# which will remove the hunter from the list upon creation
|
||||
def __new__unsubscribe_self(self, cls):
|
||||
handler.hooks[event].remove((hook, predicate))
|
||||
return object.__new__(self)
|
||||
|
||||
hook.__new__ = __new__unsubscribe_self
|
||||
|
||||
self.subscribe_event(event, hook=hook, predicate=predicate, is_register=is_register)
|
||||
|
||||
return hook
|
||||
|
||||
return wrapper
|
||||
|
||||
def publish_event(self, event, caller=None):
|
||||
"""
|
||||
The Publish Event Method - For Publishing Events To Kube-Hunter's Queue
|
||||
"""
|
||||
# Document that the hunter published a vulnerability (if it's indeed a vulnerability)
|
||||
# For statistics options
|
||||
self._increase_vuln_count(event, caller)
|
||||
|
||||
# sets the event's parent to be it's publisher hunter.
|
||||
self._set_event_chain(event, caller)
|
||||
|
||||
# applying filters on the event, before publishing it to subscribers.
|
||||
# if filter returned None, not proceeding to publish
|
||||
event = self.apply_filters(event)
|
||||
if event:
|
||||
# If event was rewritten, make sure it's linked again
|
||||
self._set_event_chain(event, caller)
|
||||
|
||||
# Regular Hunter registrations - publish logic
|
||||
# Here we iterate over all the registered-to events:
|
||||
for hooked_event in self.hooks.keys():
|
||||
# We check if the event we want to publish is an inherited class of the current registered-to iterated event
|
||||
# Meaning - if this is a relevant event:
|
||||
if hooked_event in event.__class__.__mro__:
|
||||
# If so, we want to publish to all registerd hunters.
|
||||
for hook, predicate in self.hooks[hooked_event]:
|
||||
if predicate and not predicate(event):
|
||||
continue
|
||||
|
||||
self.put(hook(event))
|
||||
logger.debug(f"Event {event.__class__} got published to hunter - {hook} with {event}")
|
||||
|
||||
# Multiple Hunter registrations - publish logic
|
||||
# Here we iterate over all the registered-to events:
|
||||
for hooked_event in self.multi_hooks.keys():
|
||||
# We check if the event we want to publish is an inherited class of the current registered-to iterated event
|
||||
# Meaning - if this is a relevant event:
|
||||
if hooked_event in event.__class__.__mro__:
|
||||
# now we iterate over the corresponding registered hunters.
|
||||
for hook, predicate in self.multi_hooks[hooked_event]:
|
||||
if predicate and not predicate(event):
|
||||
continue
|
||||
|
||||
self._update_multi_hooks(hook, event)
|
||||
|
||||
if self._is_all_fulfilled_for_hunter(hook):
|
||||
events_container = MultipleEventsContainer(self._get_latest_events_from_multi_hooks(hook))
|
||||
self.put(hook(events_container))
|
||||
logger.debug(
|
||||
f"Multiple subscription requirements were met for hunter {hook}. events container was \
|
||||
published with {self.hook_fulfilled_deps[hook].keys()}"
|
||||
)
|
||||
|
||||
"""
|
||||
######################################################
|
||||
+ ---------------- Private Methods ----------------- +
|
||||
+ ---------------- (Backend Logic) ----------------- +
|
||||
######################################################
|
||||
"""
|
||||
|
||||
def _get_latest_events_from_multi_hooks(self, hook):
|
||||
"""
|
||||
Iterates over fulfilled deps for the hunter, and fetching the latest appended events from history
|
||||
"""
|
||||
latest_events = list()
|
||||
for event_class in self.hook_fulfilled_deps[hook].keys():
|
||||
latest_events.append(self.hook_fulfilled_deps[hook][event_class][-1])
|
||||
return latest_events
|
||||
|
||||
def _update_multi_hooks(self, hook, event):
|
||||
"""
|
||||
Updates published events in the multi hooks fulfilled store.
|
||||
"""
|
||||
self.hook_fulfilled_deps[hook][event.__class__].append(event)
|
||||
|
||||
def _is_all_fulfilled_for_hunter(self, hook):
|
||||
"""
|
||||
Returns true for multi hook fulfilled, else oterwise
|
||||
"""
|
||||
# Check if the first dimension already contains all necessary event classes
|
||||
return len(self.hook_fulfilled_deps[hook].keys()) == len(self.hook_dependencies[hook])
|
||||
|
||||
def _set_event_chain(self, event, caller):
|
||||
"""
|
||||
Sets' events attribute chain.
|
||||
In here we link the event with it's publisher (Hunter),
|
||||
so in the next hunter that catches this event, we could access the previous one's attributes.
|
||||
|
||||
@param event: the event object to be chained
|
||||
@param caller: the Hunter object that published this event.
|
||||
"""
|
||||
if caller:
|
||||
event.previous = caller.event
|
||||
event.hunter = caller.__class__
|
||||
|
||||
def _register_hunters(self, hook=None):
|
||||
"""
|
||||
This method is called when a Hunter registers itself to the handler.
|
||||
this is done in order to track and correctly configure the current run of the program.
|
||||
|
||||
passive_hunters, active_hunters, all_hunters
|
||||
"""
|
||||
config = get_config()
|
||||
if ActiveHunter in hook.__mro__:
|
||||
if not config.active:
|
||||
return False
|
||||
else:
|
||||
self.active_hunters[hook] = hook.__doc__
|
||||
elif HunterBase in hook.__mro__:
|
||||
self.passive_hunters[hook] = hook.__doc__
|
||||
|
||||
if HunterBase in hook.__mro__:
|
||||
self.all_hunters[hook] = hook.__doc__
|
||||
|
||||
return True
|
||||
|
||||
def _register_filter(self, event, hook=None, predicate=None):
|
||||
if hook not in self.filters[event]:
|
||||
self.filters[event].append((hook, predicate))
|
||||
logging.debug("{} filter subscribed to {}".format(hook, event))
|
||||
|
||||
def _register_hook(self, event, hook=None, predicate=None):
|
||||
if hook not in self.hooks[event]:
|
||||
self.hooks[event].append((hook, predicate))
|
||||
logging.debug("{} subscribed to {}".format(hook, event))
|
||||
|
||||
def allowed_for_custom_registration(self, target_hunter):
|
||||
"""
|
||||
Check if the partial input list contains the hunter we are about to register for events
|
||||
If hunter is considered a Core hunter as specified in `config.core_hunters` we allow it anyway
|
||||
|
||||
Returns true if:
|
||||
1. partial hunt is disabled
|
||||
2. partial hunt is enabled and hunter is in core hunter class
|
||||
3. partial hunt is enabled and hunter is specified in config.partial
|
||||
|
||||
@param target_hunter: hunter class for registration check
|
||||
"""
|
||||
config = get_config()
|
||||
if not config.custom:
|
||||
return True
|
||||
|
||||
hunter_class_name = target_hunter.__name__
|
||||
if hunter_class_name in config.core_hunters or hunter_class_name in config.custom:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def subscribe_event(self, event, hook=None, predicate=None, is_register=True):
|
||||
if not is_register:
|
||||
return
|
||||
if not self.allowed_for_custom_registration(hook):
|
||||
return
|
||||
if not self._register_hunters(hook):
|
||||
return
|
||||
|
||||
# registering filters
|
||||
if EventFilterBase in hook.__mro__:
|
||||
self._register_filter(event, hook, predicate)
|
||||
# registering hunters
|
||||
else:
|
||||
self._register_hook(event, hook, predicate)
|
||||
|
||||
def subscribe_events(self, events, hook=None, predicates=None, is_register=True):
|
||||
if not is_register:
|
||||
return
|
||||
if not self.allowed_for_custom_registration(hook):
|
||||
return
|
||||
if not self._register_hunters(hook):
|
||||
return
|
||||
|
||||
if predicates is None:
|
||||
predicates = [None] * len(events)
|
||||
|
||||
# registering filters.
|
||||
if EventFilterBase in hook.__mro__:
|
||||
for event, predicate in zip(events, predicates):
|
||||
self._register_filter(event, hook, predicate)
|
||||
# registering hunters.
|
||||
else:
|
||||
for event, predicate in zip(events, predicates):
|
||||
self.multi_hooks[event].append((hook, predicate))
|
||||
|
||||
self.hook_dependencies[hook] = frozenset(events)
|
||||
|
||||
def apply_filters(self, event):
|
||||
# if filters are subscribed, apply them on the event
|
||||
for hooked_event in self.filters.keys():
|
||||
if hooked_event in event.__class__.__mro__:
|
||||
for filter_hook, predicate in self.filters[hooked_event]:
|
||||
if predicate and not predicate(event):
|
||||
continue
|
||||
|
||||
logger.debug(f"Event {event.__class__} filtered with {filter_hook}")
|
||||
event = filter_hook(event).execute()
|
||||
# if filter decided to remove event, returning None
|
||||
if not event:
|
||||
return None
|
||||
return event
|
||||
|
||||
def _increase_vuln_count(self, event, caller):
|
||||
config = get_config()
|
||||
if config.statistics and caller:
|
||||
if Vulnerability in event.__class__.__mro__:
|
||||
caller.__class__.publishedVulnerabilities += 1
|
||||
|
||||
# executes callbacks on dedicated thread as a daemon
|
||||
def worker(self):
|
||||
while self.running:
|
||||
try:
|
||||
hook = self.get()
|
||||
logger.debug(f"Executing {hook.__class__} with {hook.event.__dict__}")
|
||||
hook.execute()
|
||||
except Exception as ex:
|
||||
logger.debug(ex, exc_info=True)
|
||||
finally:
|
||||
self.task_done()
|
||||
logger.debug("closing thread...")
|
||||
|
||||
def notifier(self):
|
||||
time.sleep(2)
|
||||
# should consider locking on unfinished_tasks
|
||||
while self.unfinished_tasks > 0:
|
||||
logger.debug(f"{self.unfinished_tasks} tasks left")
|
||||
time.sleep(3)
|
||||
if self.unfinished_tasks == 1:
|
||||
logger.debug("final hook is hanging")
|
||||
|
||||
# stops execution of all daemons
|
||||
def free(self):
|
||||
self.running = False
|
||||
with self.mutex:
|
||||
self.queue.clear()
|
||||
|
||||
|
||||
config = get_config()
|
||||
handler = EventQueue(config.num_worker_threads)
|
||||
@@ -1,160 +0,0 @@
|
||||
import logging
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.types import ActiveHunter, HunterBase
|
||||
from kube_hunter.core.events.types import Vulnerability, EventFilterBase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Inherits Queue object, handles events asynchronously
|
||||
class EventQueue(Queue):
|
||||
def __init__(self, num_worker=10):
|
||||
super().__init__()
|
||||
self.passive_hunters = dict()
|
||||
self.active_hunters = dict()
|
||||
self.all_hunters = dict()
|
||||
|
||||
self.hooks = defaultdict(list)
|
||||
self.filters = defaultdict(list)
|
||||
self.running = True
|
||||
self.workers = list()
|
||||
|
||||
for _ in range(num_worker):
|
||||
t = Thread(target=self.worker)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
self.workers.append(t)
|
||||
|
||||
t = Thread(target=self.notifier)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
# decorator wrapping for easy subscription
|
||||
def subscribe(self, event, hook=None, predicate=None):
|
||||
def wrapper(hook):
|
||||
self.subscribe_event(event, hook=hook, predicate=predicate)
|
||||
return hook
|
||||
|
||||
return wrapper
|
||||
|
||||
# wrapper takes care of the subscribe once mechanism
|
||||
def subscribe_once(self, event, hook=None, predicate=None):
|
||||
def wrapper(hook):
|
||||
# installing a __new__ magic method on the hunter
|
||||
# which will remove the hunter from the list upon creation
|
||||
def __new__unsubscribe_self(self, cls):
|
||||
handler.hooks[event].remove((hook, predicate))
|
||||
return object.__new__(self)
|
||||
|
||||
hook.__new__ = __new__unsubscribe_self
|
||||
|
||||
self.subscribe_event(event, hook=hook, predicate=predicate)
|
||||
return hook
|
||||
|
||||
return wrapper
|
||||
|
||||
# getting uninstantiated event object
|
||||
def subscribe_event(self, event, hook=None, predicate=None):
|
||||
config = get_config()
|
||||
if ActiveHunter in hook.__mro__:
|
||||
if not config.active:
|
||||
return
|
||||
self.active_hunters[hook] = hook.__doc__
|
||||
elif HunterBase in hook.__mro__:
|
||||
self.passive_hunters[hook] = hook.__doc__
|
||||
|
||||
if HunterBase in hook.__mro__:
|
||||
self.all_hunters[hook] = hook.__doc__
|
||||
|
||||
# registering filters
|
||||
if EventFilterBase in hook.__mro__:
|
||||
if hook not in self.filters[event]:
|
||||
self.filters[event].append((hook, predicate))
|
||||
logger.debug(f"{hook} filter subscribed to {event}")
|
||||
|
||||
# registering hunters
|
||||
elif hook not in self.hooks[event]:
|
||||
self.hooks[event].append((hook, predicate))
|
||||
logger.debug(f"{hook} subscribed to {event}")
|
||||
|
||||
def apply_filters(self, event):
|
||||
# if filters are subscribed, apply them on the event
|
||||
for hooked_event in self.filters.keys():
|
||||
if hooked_event in event.__class__.__mro__:
|
||||
for filter_hook, predicate in self.filters[hooked_event]:
|
||||
if predicate and not predicate(event):
|
||||
continue
|
||||
|
||||
logger.debug(f"Event {event.__class__} filtered with {filter_hook}")
|
||||
event = filter_hook(event).execute()
|
||||
# if filter decided to remove event, returning None
|
||||
if not event:
|
||||
return None
|
||||
return event
|
||||
|
||||
# getting instantiated event object
|
||||
def publish_event(self, event, caller=None):
|
||||
config = get_config()
|
||||
|
||||
# setting event chain
|
||||
if caller:
|
||||
event.previous = caller.event
|
||||
event.hunter = caller.__class__
|
||||
|
||||
# applying filters on the event, before publishing it to subscribers.
|
||||
# if filter returned None, not proceeding to publish
|
||||
event = self.apply_filters(event)
|
||||
if event:
|
||||
# If event was rewritten, make sure it's linked to its parent ('previous') event
|
||||
if caller:
|
||||
event.previous = caller.event
|
||||
event.hunter = caller.__class__
|
||||
|
||||
for hooked_event in self.hooks.keys():
|
||||
if hooked_event in event.__class__.__mro__:
|
||||
for hook, predicate in self.hooks[hooked_event]:
|
||||
if predicate and not predicate(event):
|
||||
continue
|
||||
|
||||
if config.statistics and caller:
|
||||
if Vulnerability in event.__class__.__mro__:
|
||||
caller.__class__.publishedVulnerabilities += 1
|
||||
|
||||
logger.debug(f"Event {event.__class__} got published with {event}")
|
||||
self.put(hook(event))
|
||||
|
||||
# executes callbacks on dedicated thread as a daemon
|
||||
def worker(self):
|
||||
while self.running:
|
||||
try:
|
||||
hook = self.get()
|
||||
logger.debug(f"Executing {hook.__class__} with {hook.event.__dict__}")
|
||||
hook.execute()
|
||||
except Exception as ex:
|
||||
logger.debug(ex, exc_info=True)
|
||||
finally:
|
||||
self.task_done()
|
||||
logger.debug("closing thread...")
|
||||
|
||||
def notifier(self):
|
||||
time.sleep(2)
|
||||
# should consider locking on unfinished_tasks
|
||||
while self.unfinished_tasks > 0:
|
||||
logger.debug(f"{self.unfinished_tasks} tasks left")
|
||||
time.sleep(3)
|
||||
if self.unfinished_tasks == 1:
|
||||
logger.debug("final hook is hanging")
|
||||
|
||||
# stops execution of all daemons
|
||||
def free(self):
|
||||
self.running = False
|
||||
with self.mutex:
|
||||
self.queue.clear()
|
||||
|
||||
|
||||
handler = EventQueue(800)
|
||||
@@ -3,15 +3,32 @@ import threading
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.types import (
|
||||
InformationDisclosure,
|
||||
DenialOfService,
|
||||
RemoteCodeExec,
|
||||
IdentityTheft,
|
||||
PrivilegeEscalation,
|
||||
AccessRisk,
|
||||
UnauthenticatedAccess,
|
||||
KubernetesCluster,
|
||||
from kube_hunter.core.types import KubernetesCluster
|
||||
from kube_hunter.core.types.vulnerabilities import (
|
||||
GeneralSensitiveInformationTechnique,
|
||||
ExposedSensitiveInterfacesTechnique,
|
||||
MountServicePrincipalTechnique,
|
||||
ListK8sSecretsTechnique,
|
||||
AccessContainerServiceAccountTechnique,
|
||||
AccessK8sApiServerTechnique,
|
||||
AccessKubeletAPITechnique,
|
||||
AccessK8sDashboardTechnique,
|
||||
InstanceMetadataApiTechnique,
|
||||
ExecIntoContainerTechnique,
|
||||
SidecarInjectionTechnique,
|
||||
NewContainerTechnique,
|
||||
GeneralPersistenceTechnique,
|
||||
HostPathMountPrivilegeEscalationTechnique,
|
||||
PrivilegedContainerTechnique,
|
||||
ClusterAdminBindingTechnique,
|
||||
ARPPoisoningTechnique,
|
||||
CoreDNSPoisoningTechnique,
|
||||
DataDestructionTechnique,
|
||||
GeneralDefenseEvasionTechnique,
|
||||
ConnectFromProxyServerTechnique,
|
||||
CVERemoteCodeExecutionCategory,
|
||||
CVEPrivilegeEscalationCategory,
|
||||
CVEDenialOfServiceTechnique,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -62,6 +79,20 @@ class Event:
|
||||
return history
|
||||
|
||||
|
||||
class MultipleEventsContainer(Event):
|
||||
"""
|
||||
This is the class of the object an hunter will get if he was registered to multiple events.
|
||||
"""
|
||||
|
||||
def __init__(self, events):
|
||||
self.events = events
|
||||
|
||||
def get_by_class(self, event_class):
|
||||
for event in self.events:
|
||||
if event.__class__ == event_class:
|
||||
return event
|
||||
|
||||
|
||||
class Service:
|
||||
def __init__(self, name, path="", secure=True):
|
||||
self.name = name
|
||||
@@ -69,6 +100,12 @@ class Service:
|
||||
self.path = path
|
||||
self.role = "Node"
|
||||
|
||||
# if a service account token was specified, we load it to the Service class
|
||||
# We load it here because generally all kuberentes services could be authenticated with the token
|
||||
config = get_config()
|
||||
if config.service_account_token:
|
||||
self.auth_token = config.service_account_token
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
@@ -82,13 +119,30 @@ class Service:
|
||||
class Vulnerability:
|
||||
severity = dict(
|
||||
{
|
||||
InformationDisclosure: "medium",
|
||||
DenialOfService: "medium",
|
||||
RemoteCodeExec: "high",
|
||||
IdentityTheft: "high",
|
||||
PrivilegeEscalation: "high",
|
||||
AccessRisk: "low",
|
||||
UnauthenticatedAccess: "low",
|
||||
GeneralSensitiveInformationTechnique: "low",
|
||||
ExposedSensitiveInterfacesTechnique: "high",
|
||||
MountServicePrincipalTechnique: "high",
|
||||
ListK8sSecretsTechnique: "high",
|
||||
AccessContainerServiceAccountTechnique: "low",
|
||||
AccessK8sApiServerTechnique: "medium",
|
||||
AccessKubeletAPITechnique: "medium",
|
||||
AccessK8sDashboardTechnique: "medium",
|
||||
InstanceMetadataApiTechnique: "high",
|
||||
ExecIntoContainerTechnique: "high",
|
||||
SidecarInjectionTechnique: "high",
|
||||
NewContainerTechnique: "high",
|
||||
GeneralPersistenceTechnique: "high",
|
||||
HostPathMountPrivilegeEscalationTechnique: "high",
|
||||
PrivilegedContainerTechnique: "high",
|
||||
ClusterAdminBindingTechnique: "high",
|
||||
ARPPoisoningTechnique: "medium",
|
||||
CoreDNSPoisoningTechnique: "high",
|
||||
DataDestructionTechnique: "high",
|
||||
GeneralDefenseEvasionTechnique: "high",
|
||||
ConnectFromProxyServerTechnique: "low",
|
||||
CVERemoteCodeExecutionCategory: "high",
|
||||
CVEPrivilegeEscalationCategory: "high",
|
||||
CVEDenialOfServiceTechnique: "medium",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -191,20 +245,23 @@ class ReportDispatched(Event):
|
||||
|
||||
|
||||
class K8sVersionDisclosure(Vulnerability, Event):
|
||||
"""The kubernetes version could be obtained from the {} endpoint """
|
||||
"""The kubernetes version could be obtained from the {} endpoint"""
|
||||
|
||||
def __init__(self, version, from_endpoint, extra_info=""):
|
||||
def __init__(self, version, from_endpoint, extra_info="", category=None):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"K8s Version Disclosure",
|
||||
category=InformationDisclosure,
|
||||
category=ExposedSensitiveInterfacesTechnique,
|
||||
vid="KHV002",
|
||||
)
|
||||
self.version = version
|
||||
self.from_endpoint = from_endpoint
|
||||
self.extra_info = extra_info
|
||||
self.evidence = version
|
||||
# depending from where the version came from, we might want to also override the category
|
||||
if category:
|
||||
self.category = category
|
||||
|
||||
def explain(self):
|
||||
return self.__doc__.format(self.from_endpoint) + self.extra_info
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
class HunterBase:
|
||||
publishedVulnerabilities = 0
|
||||
|
||||
@staticmethod
|
||||
def parse_docs(docs):
|
||||
"""returns tuple of (name, docs)"""
|
||||
if not docs:
|
||||
return __name__, "<no documentation>"
|
||||
docs = docs.strip().split("\n")
|
||||
for i, line in enumerate(docs):
|
||||
docs[i] = line.strip()
|
||||
return docs[0], " ".join(docs[1:]) if len(docs[1:]) else "<no documentation>"
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
name, _ = cls.parse_docs(cls.__doc__)
|
||||
return name
|
||||
|
||||
def publish_event(self, event):
|
||||
handler.publish_event(event, caller=self)
|
||||
|
||||
|
||||
class ActiveHunter(HunterBase):
|
||||
pass
|
||||
|
||||
|
||||
class Hunter(HunterBase):
|
||||
pass
|
||||
|
||||
|
||||
class Discovery(HunterBase):
|
||||
pass
|
||||
|
||||
|
||||
class KubernetesCluster:
|
||||
"""Kubernetes Cluster"""
|
||||
|
||||
name = "Kubernetes Cluster"
|
||||
|
||||
|
||||
class KubectlClient:
|
||||
"""The kubectl client binary is used by the user to interact with the cluster"""
|
||||
|
||||
name = "Kubectl Client"
|
||||
|
||||
|
||||
class Kubelet(KubernetesCluster):
|
||||
"""The kubelet is the primary "node agent" that runs on each node"""
|
||||
|
||||
name = "Kubelet"
|
||||
|
||||
|
||||
class Azure(KubernetesCluster):
|
||||
"""Azure Cluster"""
|
||||
|
||||
name = "Azure"
|
||||
|
||||
|
||||
class InformationDisclosure:
|
||||
name = "Information Disclosure"
|
||||
|
||||
|
||||
class RemoteCodeExec:
|
||||
name = "Remote Code Execution"
|
||||
|
||||
|
||||
class IdentityTheft:
|
||||
name = "Identity Theft"
|
||||
|
||||
|
||||
class UnauthenticatedAccess:
|
||||
name = "Unauthenticated Access"
|
||||
|
||||
|
||||
class AccessRisk:
|
||||
name = "Access Risk"
|
||||
|
||||
|
||||
class PrivilegeEscalation(KubernetesCluster):
|
||||
name = "Privilege Escalation"
|
||||
|
||||
|
||||
class DenialOfService:
|
||||
name = "Denial of Service"
|
||||
|
||||
|
||||
# import is in the bottom to break import loops
|
||||
from .events import handler # noqa
|
||||
4
kube_hunter/core/types/__init__.py
Normal file
4
kube_hunter/core/types/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
# flake8: noqa: E402
|
||||
from .hunters import *
|
||||
from .components import *
|
||||
from .vulnerabilities import *
|
||||
28
kube_hunter/core/types/components.py
Normal file
28
kube_hunter/core/types/components.py
Normal file
@@ -0,0 +1,28 @@
|
||||
class KubernetesCluster:
|
||||
"""Kubernetes Cluster"""
|
||||
|
||||
name = "Kubernetes Cluster"
|
||||
|
||||
|
||||
class KubectlClient:
|
||||
"""The kubectl client binary is used by the user to interact with the cluster"""
|
||||
|
||||
name = "Kubectl Client"
|
||||
|
||||
|
||||
class Kubelet(KubernetesCluster):
|
||||
"""The kubelet is the primary "node agent" that runs on each node"""
|
||||
|
||||
name = "Kubelet"
|
||||
|
||||
|
||||
class AWS(KubernetesCluster):
|
||||
"""AWS Cluster"""
|
||||
|
||||
name = "AWS"
|
||||
|
||||
|
||||
class Azure(KubernetesCluster):
|
||||
"""Azure Cluster"""
|
||||
|
||||
name = "Azure"
|
||||
36
kube_hunter/core/types/hunters.py
Normal file
36
kube_hunter/core/types/hunters.py
Normal file
@@ -0,0 +1,36 @@
|
||||
class HunterBase:
|
||||
publishedVulnerabilities = 0
|
||||
|
||||
@staticmethod
|
||||
def parse_docs(docs):
|
||||
"""returns tuple of (name, docs)"""
|
||||
if not docs:
|
||||
return __name__, "<no documentation>"
|
||||
docs = docs.strip().split("\n")
|
||||
for i, line in enumerate(docs):
|
||||
docs[i] = line.strip()
|
||||
return docs[0], " ".join(docs[1:]) if len(docs[1:]) else "<no documentation>"
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
name, _ = cls.parse_docs(cls.__doc__)
|
||||
return name
|
||||
|
||||
def publish_event(self, event):
|
||||
# Import here to avoid circular import from events package.
|
||||
# imports are cached in python so this should not affect runtime
|
||||
from ..events.event_handler import handler # noqa
|
||||
|
||||
handler.publish_event(event, caller=self)
|
||||
|
||||
|
||||
class ActiveHunter(HunterBase):
|
||||
pass
|
||||
|
||||
|
||||
class Hunter(HunterBase):
|
||||
pass
|
||||
|
||||
|
||||
class Discovery(HunterBase):
|
||||
pass
|
||||
188
kube_hunter/core/types/vulnerabilities.py
Normal file
188
kube_hunter/core/types/vulnerabilities.py
Normal file
@@ -0,0 +1,188 @@
|
||||
"""
|
||||
Vulnerabilities are divided into 2 main categories.
|
||||
|
||||
MITRE Category
|
||||
--------------
|
||||
Vulnerability that correlates to a method in the official MITRE ATT&CK matrix for kubernetes
|
||||
|
||||
CVE Category
|
||||
-------------
|
||||
"General" category definition. The category is usually determined by the severity of the CVE
|
||||
"""
|
||||
|
||||
|
||||
class MITRECategory:
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
"""
|
||||
Returns the full name of MITRE technique: <MITRE CATEGORY> // <MITRE TECHNIQUE>
|
||||
Should only be used on a direct technique class at the end of the MITRE inheritance chain.
|
||||
|
||||
Example inheritance:
|
||||
MITRECategory -> InitialAccessCategory -> ExposedSensitiveInterfacesTechnique
|
||||
"""
|
||||
inheritance_chain = cls.__mro__
|
||||
if len(inheritance_chain) >= 4:
|
||||
# -3 == index of mitreCategory class. (object class is first)
|
||||
mitre_category_class = inheritance_chain[-3]
|
||||
return f"{mitre_category_class.name} // {cls.name}"
|
||||
|
||||
|
||||
class CVECategory:
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
"""
|
||||
Returns the full name of the category: CVE // <CVE Category name>
|
||||
"""
|
||||
return f"CVE // {cls.name}"
|
||||
|
||||
|
||||
"""
|
||||
MITRE ATT&CK Technique Categories
|
||||
"""
|
||||
|
||||
|
||||
class InitialAccessCategory(MITRECategory):
|
||||
name = "Initial Access"
|
||||
|
||||
|
||||
class ExecutionCategory(MITRECategory):
|
||||
name = "Execution"
|
||||
|
||||
|
||||
class PersistenceCategory(MITRECategory):
|
||||
name = "Persistence"
|
||||
|
||||
|
||||
class PrivilegeEscalationCategory(MITRECategory):
|
||||
name = "Privilege Escalation"
|
||||
|
||||
|
||||
class DefenseEvasionCategory(MITRECategory):
|
||||
name = "Defense Evasion"
|
||||
|
||||
|
||||
class CredentialAccessCategory(MITRECategory):
|
||||
name = "Credential Access"
|
||||
|
||||
|
||||
class DiscoveryCategory(MITRECategory):
|
||||
name = "Discovery"
|
||||
|
||||
|
||||
class LateralMovementCategory(MITRECategory):
|
||||
name = "Lateral Movement"
|
||||
|
||||
|
||||
class CollectionCategory(MITRECategory):
|
||||
name = "Collection"
|
||||
|
||||
|
||||
class ImpactCategory(MITRECategory):
|
||||
name = "Impact"
|
||||
|
||||
|
||||
"""
|
||||
MITRE ATT&CK Techniques
|
||||
"""
|
||||
|
||||
|
||||
class GeneralSensitiveInformationTechnique(InitialAccessCategory):
|
||||
name = "General Sensitive Information"
|
||||
|
||||
|
||||
class ExposedSensitiveInterfacesTechnique(InitialAccessCategory):
|
||||
name = "Exposed sensitive interfaces"
|
||||
|
||||
|
||||
class MountServicePrincipalTechnique(CredentialAccessCategory):
|
||||
name = "Mount service principal"
|
||||
|
||||
|
||||
class ListK8sSecretsTechnique(CredentialAccessCategory):
|
||||
name = "List K8S secrets"
|
||||
|
||||
|
||||
class AccessContainerServiceAccountTechnique(CredentialAccessCategory):
|
||||
name = "Access container service account"
|
||||
|
||||
|
||||
class AccessK8sApiServerTechnique(DiscoveryCategory):
|
||||
name = "Access the K8S API Server"
|
||||
|
||||
|
||||
class AccessKubeletAPITechnique(DiscoveryCategory):
|
||||
name = "Access Kubelet API"
|
||||
|
||||
|
||||
class AccessK8sDashboardTechnique(DiscoveryCategory):
|
||||
name = "Access Kubernetes Dashboard"
|
||||
|
||||
|
||||
class InstanceMetadataApiTechnique(DiscoveryCategory):
|
||||
name = "Instance Metadata API"
|
||||
|
||||
|
||||
class ExecIntoContainerTechnique(ExecutionCategory):
|
||||
name = "Exec into container"
|
||||
|
||||
|
||||
class SidecarInjectionTechnique(ExecutionCategory):
|
||||
name = "Sidecar injection"
|
||||
|
||||
|
||||
class NewContainerTechnique(ExecutionCategory):
|
||||
name = "New container"
|
||||
|
||||
|
||||
class GeneralPersistenceTechnique(PersistenceCategory):
|
||||
name = "General Peristence"
|
||||
|
||||
|
||||
class HostPathMountPrivilegeEscalationTechnique(PrivilegeEscalationCategory):
|
||||
name = "hostPath mount"
|
||||
|
||||
|
||||
class PrivilegedContainerTechnique(PrivilegeEscalationCategory):
|
||||
name = "Privileged container"
|
||||
|
||||
|
||||
class ClusterAdminBindingTechnique(PrivilegeEscalationCategory):
|
||||
name = "Cluser-admin binding"
|
||||
|
||||
|
||||
class ARPPoisoningTechnique(LateralMovementCategory):
|
||||
name = "ARP poisoning and IP spoofing"
|
||||
|
||||
|
||||
class CoreDNSPoisoningTechnique(LateralMovementCategory):
|
||||
name = "CoreDNS poisoning"
|
||||
|
||||
|
||||
class DataDestructionTechnique(ImpactCategory):
|
||||
name = "Data Destruction"
|
||||
|
||||
|
||||
class GeneralDefenseEvasionTechnique(DefenseEvasionCategory):
|
||||
name = "General Defense Evasion"
|
||||
|
||||
|
||||
class ConnectFromProxyServerTechnique(DefenseEvasionCategory):
|
||||
name = "Connect from Proxy server"
|
||||
|
||||
|
||||
"""
|
||||
CVE Categories
|
||||
"""
|
||||
|
||||
|
||||
class CVERemoteCodeExecutionCategory(CVECategory):
|
||||
name = "Remote Code Execution (CVE)"
|
||||
|
||||
|
||||
class CVEPrivilegeEscalationCategory(CVECategory):
|
||||
name = "Privilege Escalation (CVE)"
|
||||
|
||||
|
||||
class CVEDenialOfServiceTechnique(CVECategory):
|
||||
name = "Denial Of Service (CVE)"
|
||||
@@ -2,7 +2,7 @@ import logging
|
||||
import requests
|
||||
|
||||
from kube_hunter.core.types import Discovery
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import OpenPortEvent, Service, Event, EventFilterBase
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
|
||||
@@ -3,7 +3,7 @@ import logging
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, OpenPortEvent, Service
|
||||
from kube_hunter.core.types import Discovery
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, OpenPortEvent, Service
|
||||
from kube_hunter.core.types import Discovery
|
||||
|
||||
|
||||
@@ -1,16 +1,19 @@
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
import logging
|
||||
import itertools
|
||||
import requests
|
||||
|
||||
from enum import Enum
|
||||
from netaddr import IPNetwork, IPAddress, AddrFormatError
|
||||
from netifaces import AF_INET, ifaddresses, interfaces, gateways
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.modules.discovery.kubernetes_client import list_all_k8s_cluster_nodes
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, NewHostEvent, Vulnerability
|
||||
from kube_hunter.core.types import Discovery, InformationDisclosure, Azure
|
||||
from kube_hunter.core.types import Discovery, AWS, Azure, InstanceMetadataApiTechnique
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -18,11 +21,17 @@ logger = logging.getLogger(__name__)
|
||||
class RunningAsPodEvent(Event):
|
||||
def __init__(self):
|
||||
self.name = "Running from within a pod"
|
||||
self.auth_token = self.get_service_account_file("token")
|
||||
self.client_cert = self.get_service_account_file("ca.crt")
|
||||
self.namespace = self.get_service_account_file("namespace")
|
||||
self.kubeservicehost = os.environ.get("KUBERNETES_SERVICE_HOST", None)
|
||||
|
||||
# if service account token was manually specified, we don't load the token file
|
||||
config = get_config()
|
||||
if config.service_account_token:
|
||||
self.auth_token = config.service_account_token
|
||||
else:
|
||||
self.auth_token = self.get_service_account_file("token")
|
||||
|
||||
# Event's logical location to be used mainly for reports.
|
||||
def location(self):
|
||||
location = "Local to Pod"
|
||||
@@ -40,6 +49,21 @@ class RunningAsPodEvent(Event):
|
||||
pass
|
||||
|
||||
|
||||
class AWSMetadataApi(Vulnerability, Event):
|
||||
"""Access to the AWS Metadata API exposes information about the machines associated with the cluster"""
|
||||
|
||||
def __init__(self, cidr):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
AWS,
|
||||
"AWS Metadata Exposure",
|
||||
category=InstanceMetadataApiTechnique,
|
||||
vid="KHV053",
|
||||
)
|
||||
self.cidr = cidr
|
||||
self.evidence = f"cidr: {cidr}"
|
||||
|
||||
|
||||
class AzureMetadataApi(Vulnerability, Event):
|
||||
"""Access to the Azure Metadata API exposes information about the machines associated with the cluster"""
|
||||
|
||||
@@ -48,7 +72,7 @@ class AzureMetadataApi(Vulnerability, Event):
|
||||
self,
|
||||
Azure,
|
||||
"Azure Metadata Exposure",
|
||||
category=InformationDisclosure,
|
||||
category=InstanceMetadataApiTechnique,
|
||||
vid="KHV003",
|
||||
)
|
||||
self.cidr = cidr
|
||||
@@ -99,16 +123,25 @@ class FromPodHostDiscovery(Discovery):
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
# Attempt to read all hosts from the Kubernetes API
|
||||
for host in list_all_k8s_cluster_nodes(config.kubeconfig):
|
||||
self.publish_event(NewHostEvent(host=host))
|
||||
# Scan any hosts that the user specified
|
||||
if config.remote or config.cidr:
|
||||
self.publish_event(HostScanEvent())
|
||||
else:
|
||||
# Discover cluster subnets, we'll scan all these hosts
|
||||
cloud = None
|
||||
cloud, subnets = None, list()
|
||||
if self.is_azure_pod():
|
||||
subnets, cloud = self.azure_metadata_discovery()
|
||||
else:
|
||||
subnets = self.gateway_discovery()
|
||||
elif self.is_aws_pod_v1():
|
||||
subnets, cloud = self.aws_metadata_v1_discovery()
|
||||
elif self.is_aws_pod_v2():
|
||||
subnets, cloud = self.aws_metadata_v2_discovery()
|
||||
|
||||
gateway_subnet = self.gateway_discovery()
|
||||
if gateway_subnet:
|
||||
subnets.append(gateway_subnet)
|
||||
|
||||
should_scan_apiserver = False
|
||||
if self.event.kubeservicehost:
|
||||
@@ -122,6 +155,50 @@ class FromPodHostDiscovery(Discovery):
|
||||
if should_scan_apiserver:
|
||||
self.publish_event(NewHostEvent(host=IPAddress(self.event.kubeservicehost), cloud=cloud))
|
||||
|
||||
def is_aws_pod_v1(self):
|
||||
config = get_config()
|
||||
try:
|
||||
# Instance Metadata Service v1
|
||||
logger.debug("From pod attempting to access AWS Metadata v1 API")
|
||||
if (
|
||||
requests.get(
|
||||
"http://169.254.169.254/latest/meta-data/",
|
||||
timeout=config.network_timeout,
|
||||
).status_code
|
||||
== 200
|
||||
):
|
||||
return True
|
||||
except requests.exceptions.ConnectionError:
|
||||
logger.debug("Failed to connect AWS metadata server v1")
|
||||
except Exception:
|
||||
logger.debug("Unknown error when trying to connect to AWS metadata v1 API")
|
||||
return False
|
||||
|
||||
def is_aws_pod_v2(self):
|
||||
config = get_config()
|
||||
try:
|
||||
# Instance Metadata Service v2
|
||||
logger.debug("From pod attempting to access AWS Metadata v2 API")
|
||||
token = requests.put(
|
||||
"http://169.254.169.254/latest/api/token/",
|
||||
headers={"X-aws-ec2-metatadata-token-ttl-seconds": "21600"},
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
if (
|
||||
requests.get(
|
||||
"http://169.254.169.254/latest/meta-data/",
|
||||
headers={"X-aws-ec2-metatadata-token": token},
|
||||
timeout=config.network_timeout,
|
||||
).status_code
|
||||
== 200
|
||||
):
|
||||
return True
|
||||
except requests.exceptions.ConnectionError:
|
||||
logger.debug("Failed to connect AWS metadata server v2")
|
||||
except Exception:
|
||||
logger.debug("Unknown error when trying to connect to AWS metadata v2 API")
|
||||
return False
|
||||
|
||||
def is_azure_pod(self):
|
||||
config = get_config()
|
||||
try:
|
||||
@@ -137,12 +214,97 @@ class FromPodHostDiscovery(Discovery):
|
||||
return True
|
||||
except requests.exceptions.ConnectionError:
|
||||
logger.debug("Failed to connect Azure metadata server")
|
||||
return False
|
||||
except Exception:
|
||||
logger.debug("Unknown error when trying to connect to Azure metadata server")
|
||||
return False
|
||||
|
||||
# for pod scanning
|
||||
def gateway_discovery(self):
|
||||
""" Retrieving default gateway of pod, which is usually also a contact point with the host """
|
||||
return [[gateways()["default"][AF_INET][0], "24"]]
|
||||
"""Retrieving default gateway of pod, which is usually also a contact point with the host"""
|
||||
# read the default gateway directly from /proc
|
||||
# netifaces currently does not have a maintainer. so we backported to linux support only for this cause.
|
||||
# TODO: implement WMI queries for windows support
|
||||
# https://stackoverflow.com/a/6556951
|
||||
if sys.platform in ["linux", "linux2"]:
|
||||
try:
|
||||
from pyroute2 import IPDB
|
||||
|
||||
ip = IPDB()
|
||||
gateway_ip = ip.routes["default"]["gateway"]
|
||||
ip.release()
|
||||
return [gateway_ip, "24"]
|
||||
except Exception as x:
|
||||
logging.debug(f"Exception while fetching default gateway from container - {x}")
|
||||
finally:
|
||||
ip.release()
|
||||
else:
|
||||
logging.debug("Not running in a linux env, will not scan default subnet")
|
||||
|
||||
return False
|
||||
|
||||
# querying AWS's interface metadata api v1 | works only from a pod
|
||||
def aws_metadata_v1_discovery(self):
|
||||
config = get_config()
|
||||
logger.debug("From pod attempting to access aws's metadata v1")
|
||||
mac_address = requests.get(
|
||||
"http://169.254.169.254/latest/meta-data/mac",
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
logger.debug(f"Extracted mac from aws's metadata v1: {mac_address}")
|
||||
|
||||
cidr = requests.get(
|
||||
f"http://169.254.169.254/latest/meta-data/network/interfaces/macs/{mac_address}/subnet-ipv4-cidr-block",
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
logger.debug(f"Trying to extract cidr from aws's metadata v1: {cidr}")
|
||||
|
||||
try:
|
||||
cidr = cidr.split("/")
|
||||
address, subnet = (cidr[0], cidr[1])
|
||||
subnet = subnet if not config.quick else "24"
|
||||
cidr = f"{address}/{subnet}"
|
||||
logger.debug(f"From pod discovered subnet {cidr}")
|
||||
|
||||
self.publish_event(AWSMetadataApi(cidr=cidr))
|
||||
return [(address, subnet)], "AWS"
|
||||
except Exception as x:
|
||||
logger.debug(f"ERROR: could not parse cidr from aws metadata api: {cidr} - {x}")
|
||||
|
||||
return [], "AWS"
|
||||
|
||||
# querying AWS's interface metadata api v2 | works only from a pod
|
||||
def aws_metadata_v2_discovery(self):
|
||||
config = get_config()
|
||||
logger.debug("From pod attempting to access aws's metadata v2")
|
||||
token = requests.get(
|
||||
"http://169.254.169.254/latest/api/token",
|
||||
headers={"X-aws-ec2-metatadata-token-ttl-seconds": "21600"},
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
mac_address = requests.get(
|
||||
"http://169.254.169.254/latest/meta-data/mac",
|
||||
headers={"X-aws-ec2-metatadata-token": token},
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
cidr = requests.get(
|
||||
f"http://169.254.169.254/latest/meta-data/network/interfaces/macs/{mac_address}/subnet-ipv4-cidr-block",
|
||||
headers={"X-aws-ec2-metatadata-token": token},
|
||||
timeout=config.network_timeout,
|
||||
).text.split("/")
|
||||
|
||||
try:
|
||||
address, subnet = (cidr[0], cidr[1])
|
||||
subnet = subnet if not config.quick else "24"
|
||||
cidr = f"{address}/{subnet}"
|
||||
logger.debug(f"From pod discovered subnet {cidr}")
|
||||
|
||||
self.publish_event(AWSMetadataApi(cidr=cidr))
|
||||
|
||||
return [(address, subnet)], "AWS"
|
||||
except Exception as x:
|
||||
logger.debug(f"ERROR: could not parse cidr from aws metadata api: {cidr} - {x}")
|
||||
|
||||
return [], "AWS"
|
||||
|
||||
# querying azure's interface metadata api | works only from a pod
|
||||
def azure_metadata_discovery(self):
|
||||
@@ -188,6 +350,9 @@ class HostDiscovery(Discovery):
|
||||
elif len(config.remote) > 0:
|
||||
for host in config.remote:
|
||||
self.publish_event(NewHostEvent(host=host))
|
||||
elif config.k8s_auto_discover_nodes:
|
||||
for host in list_all_k8s_cluster_nodes(config.kubeconfig):
|
||||
self.publish_event(NewHostEvent(host=host))
|
||||
|
||||
# for normal scanning
|
||||
def scan_interfaces(self):
|
||||
@@ -196,13 +361,62 @@ class HostDiscovery(Discovery):
|
||||
|
||||
# generate all subnets from all internal network interfaces
|
||||
def generate_interfaces_subnet(self, sn="24"):
|
||||
for ifaceName in interfaces():
|
||||
for ip in [i["addr"] for i in ifaddresses(ifaceName).setdefault(AF_INET, [])]:
|
||||
if not self.event.localhost and InterfaceTypes.LOCALHOST.value in ip.__str__():
|
||||
if sys.platform == "win32":
|
||||
return self.generate_interfaces_subnet_windows()
|
||||
elif sys.platform in ["linux", "linux2"]:
|
||||
return self.generate_interfaces_subnet_linux()
|
||||
|
||||
def generate_interfaces_subnet_linux(self, sn="24"):
|
||||
try:
|
||||
from pyroute2 import IPRoute
|
||||
|
||||
ip = IPRoute()
|
||||
for i in ip.get_addr():
|
||||
# whitelist only ipv4 ips
|
||||
if i["family"] == socket.AF_INET:
|
||||
ipaddress = i[0].get_attr("IFA_ADDRESS")
|
||||
# TODO: add this instead of hardcoded 24 subnet, (add a flag for full scan option)
|
||||
# subnet = i['prefixlen']
|
||||
|
||||
# unless specified explicitly with localhost scan flag, skip localhost ip addresses
|
||||
if not self.event.localhost and ipaddress.startswith(InterfaceTypes.LOCALHOST.value):
|
||||
continue
|
||||
|
||||
ip_network = IPNetwork(f"{ipaddress}/{sn}")
|
||||
for ip in ip_network:
|
||||
yield ip
|
||||
except Exception as x:
|
||||
logging.debug(f"Exception while generating subnet scan from local interfaces: {x}")
|
||||
finally:
|
||||
ip.release()
|
||||
|
||||
def generate_interfaces_subnet_windows(self, sn="24"):
|
||||
from subprocess import check_output
|
||||
|
||||
local_subnets = (
|
||||
check_output(
|
||||
"powershell -NoLogo -NoProfile -NonInteractive -ExecutionPolicy bypass -Command "
|
||||
' "& {'
|
||||
"Get-NetIPConfiguration | Get-NetIPAddress | Where-Object {$_.AddressFamily -eq 'IPv4'}"
|
||||
" | Select-Object -Property IPAddress, PrefixLength | ConvertTo-Json "
|
||||
' "}',
|
||||
shell=True,
|
||||
)
|
||||
.decode()
|
||||
.strip()
|
||||
)
|
||||
try:
|
||||
subnets = json.loads(local_subnets)
|
||||
for subnet in subnets:
|
||||
if not self.event.localhost and subnet["IPAddress"].startswith(InterfaceTypes.LOCALHOST.value):
|
||||
continue
|
||||
for ip in IPNetwork(f"{ip}/{sn}"):
|
||||
ip_network = IPNetwork(f"{subnet['IPAddress']}/{sn}")
|
||||
for ip in ip_network:
|
||||
yield ip
|
||||
|
||||
except Exception as x:
|
||||
logging.debug(f"ERROR: Could not extract interface information using powershell - {x}")
|
||||
|
||||
|
||||
# for comparing prefixes
|
||||
class InterfaceTypes(Enum):
|
||||
|
||||
@@ -2,7 +2,7 @@ import logging
|
||||
import subprocess
|
||||
|
||||
from kube_hunter.core.types import Discovery
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import HuntStarted, Event
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -5,7 +5,7 @@ from enum import Enum
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.types import Discovery
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import OpenPortEvent, Event, Service
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
27
kube_hunter/modules/discovery/kubernetes_client.py
Normal file
27
kube_hunter/modules/discovery/kubernetes_client.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import logging
|
||||
import kubernetes
|
||||
|
||||
|
||||
def list_all_k8s_cluster_nodes(kube_config=None, client=None):
|
||||
logger = logging.getLogger(__name__)
|
||||
try:
|
||||
if kube_config:
|
||||
logger.debug("Attempting to use kubeconfig file: %s", kube_config)
|
||||
kubernetes.config.load_kube_config(config_file=kube_config)
|
||||
else:
|
||||
logger.debug("Attempting to use in cluster Kubernetes config")
|
||||
kubernetes.config.load_incluster_config()
|
||||
except kubernetes.config.config_exception.ConfigException as ex:
|
||||
logger.debug(f"Failed to initiate Kubernetes client: {ex}")
|
||||
return
|
||||
|
||||
try:
|
||||
if client is None:
|
||||
client = kubernetes.client.CoreV1Api()
|
||||
ret = client.list_node(watch=False)
|
||||
logger.info("Listed %d nodes in the cluster" % len(ret.items))
|
||||
for item in ret.items:
|
||||
for addr in item.status.addresses:
|
||||
yield addr.address
|
||||
except Exception as ex:
|
||||
logger.debug(f"Failed to list nodes from Kubernetes: {ex}")
|
||||
@@ -2,7 +2,7 @@ import logging
|
||||
from socket import socket
|
||||
|
||||
from kube_hunter.core.types import Discovery
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import NewHostEvent, OpenPortEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -3,7 +3,7 @@ import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.types import Discovery
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Service, Event, OpenPortEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -2,12 +2,10 @@
|
||||
from . import (
|
||||
aks,
|
||||
apiserver,
|
||||
arp,
|
||||
capabilities,
|
||||
certificates,
|
||||
cves,
|
||||
dashboard,
|
||||
dns,
|
||||
etcd,
|
||||
kubelet,
|
||||
mounts,
|
||||
|
||||
@@ -5,9 +5,9 @@ import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.modules.hunting.kubelet import ExposedPodsHandler, SecureKubeletPortHunter
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability
|
||||
from kube_hunter.core.types import Hunter, ActiveHunter, IdentityTheft, Azure
|
||||
from kube_hunter.core.types import Hunter, ActiveHunter, MountServicePrincipalTechnique, Azure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -20,7 +20,7 @@ class AzureSpnExposure(Vulnerability, Event):
|
||||
self,
|
||||
Azure,
|
||||
"Azure SPN Exposure",
|
||||
category=IdentityTheft,
|
||||
category=MountServicePrincipalTechnique,
|
||||
vid="KHV004",
|
||||
)
|
||||
self.container = container
|
||||
|
||||
@@ -5,13 +5,18 @@ import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.modules.discovery.apiserver import ApiServer
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
|
||||
from kube_hunter.core.types import Hunter, ActiveHunter, KubernetesCluster
|
||||
from kube_hunter.core.types import (
|
||||
AccessRisk,
|
||||
InformationDisclosure,
|
||||
UnauthenticatedAccess,
|
||||
from kube_hunter.core.types.vulnerabilities import (
|
||||
AccessK8sApiServerTechnique,
|
||||
ExposedSensitiveInterfacesTechnique,
|
||||
GeneralDefenseEvasionTechnique,
|
||||
DataDestructionTechnique,
|
||||
ClusterAdminBindingTechnique,
|
||||
NewContainerTechnique,
|
||||
PrivilegedContainerTechnique,
|
||||
SidecarInjectionTechnique,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -24,10 +29,10 @@ class ServerApiAccess(Vulnerability, Event):
|
||||
def __init__(self, evidence, using_token):
|
||||
if using_token:
|
||||
name = "Access to API using service account token"
|
||||
category = InformationDisclosure
|
||||
category = AccessK8sApiServerTechnique
|
||||
else:
|
||||
name = "Unauthenticated access to API"
|
||||
category = UnauthenticatedAccess
|
||||
category = ExposedSensitiveInterfacesTechnique
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
@@ -44,7 +49,7 @@ class ServerApiHTTPAccess(Vulnerability, Event):
|
||||
|
||||
def __init__(self, evidence):
|
||||
name = "Insecure (HTTP) access to API"
|
||||
category = UnauthenticatedAccess
|
||||
category = ExposedSensitiveInterfacesTechnique
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
@@ -59,7 +64,7 @@ class ApiInfoDisclosure(Vulnerability, Event):
|
||||
"""Information Disclosure depending upon RBAC permissions and Kube-Cluster Setup"""
|
||||
|
||||
def __init__(self, evidence, using_token, name):
|
||||
category = InformationDisclosure
|
||||
category = AccessK8sApiServerTechnique
|
||||
if using_token:
|
||||
name += " using default service account token"
|
||||
else:
|
||||
@@ -75,28 +80,28 @@ class ApiInfoDisclosure(Vulnerability, Event):
|
||||
|
||||
|
||||
class ListPodsAndNamespaces(ApiInfoDisclosure):
|
||||
""" Accessing pods might give an attacker valuable information"""
|
||||
"""Accessing pods might give an attacker valuable information"""
|
||||
|
||||
def __init__(self, evidence, using_token):
|
||||
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing pods")
|
||||
|
||||
|
||||
class ListNamespaces(ApiInfoDisclosure):
|
||||
""" Accessing namespaces might give an attacker valuable information """
|
||||
"""Accessing namespaces might give an attacker valuable information"""
|
||||
|
||||
def __init__(self, evidence, using_token):
|
||||
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing namespaces")
|
||||
|
||||
|
||||
class ListRoles(ApiInfoDisclosure):
|
||||
""" Accessing roles might give an attacker valuable information """
|
||||
"""Accessing roles might give an attacker valuable information"""
|
||||
|
||||
def __init__(self, evidence, using_token):
|
||||
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing roles")
|
||||
|
||||
|
||||
class ListClusterRoles(ApiInfoDisclosure):
|
||||
""" Accessing cluster roles might give an attacker valuable information """
|
||||
"""Accessing cluster roles might give an attacker valuable information"""
|
||||
|
||||
def __init__(self, evidence, using_token):
|
||||
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing cluster roles")
|
||||
@@ -111,21 +116,21 @@ class CreateANamespace(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Created a namespace",
|
||||
category=AccessRisk,
|
||||
category=GeneralDefenseEvasionTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class DeleteANamespace(Vulnerability, Event):
|
||||
|
||||
""" Deleting a namespace might give an attacker the option to affect application behavior """
|
||||
"""Deleting a namespace might give an attacker the option to affect application behavior"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Delete a namespace",
|
||||
category=AccessRisk,
|
||||
category=DataDestructionTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -136,7 +141,7 @@ class CreateARole(Vulnerability, Event):
|
||||
"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(self, KubernetesCluster, name="Created a role", category=AccessRisk)
|
||||
Vulnerability.__init__(self, KubernetesCluster, name="Created a role", category=GeneralDefenseEvasionTechnique)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
@@ -150,7 +155,7 @@ class CreateAClusterRole(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Created a cluster role",
|
||||
category=AccessRisk,
|
||||
category=ClusterAdminBindingTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -165,7 +170,7 @@ class PatchARole(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Patched a role",
|
||||
category=AccessRisk,
|
||||
category=ClusterAdminBindingTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -180,85 +185,85 @@ class PatchAClusterRole(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Patched a cluster role",
|
||||
category=AccessRisk,
|
||||
category=ClusterAdminBindingTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class DeleteARole(Vulnerability, Event):
|
||||
""" Deleting a role might allow an attacker to affect access to resources in the namespace"""
|
||||
"""Deleting a role might allow an attacker to affect access to resources in the namespace"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Deleted a role",
|
||||
category=AccessRisk,
|
||||
category=DataDestructionTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class DeleteAClusterRole(Vulnerability, Event):
|
||||
""" Deleting a cluster role might allow an attacker to affect access to resources in the cluster"""
|
||||
"""Deleting a cluster role might allow an attacker to affect access to resources in the cluster"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Deleted a cluster role",
|
||||
category=AccessRisk,
|
||||
category=DataDestructionTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class CreateAPod(Vulnerability, Event):
|
||||
""" Creating a new pod allows an attacker to run custom code"""
|
||||
"""Creating a new pod allows an attacker to run custom code"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Created A Pod",
|
||||
category=AccessRisk,
|
||||
category=NewContainerTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class CreateAPrivilegedPod(Vulnerability, Event):
|
||||
""" Creating a new PRIVILEGED pod would gain an attacker FULL CONTROL over the cluster"""
|
||||
"""Creating a new PRIVILEGED pod would gain an attacker FULL CONTROL over the cluster"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Created A PRIVILEGED Pod",
|
||||
category=AccessRisk,
|
||||
category=PrivilegedContainerTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class PatchAPod(Vulnerability, Event):
|
||||
""" Patching a pod allows an attacker to compromise and control it """
|
||||
"""Patching a pod allows an attacker to compromise and control it"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Patched A Pod",
|
||||
category=AccessRisk,
|
||||
category=SidecarInjectionTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class DeleteAPod(Vulnerability, Event):
|
||||
""" Deleting a pod allows an attacker to disturb applications on the cluster """
|
||||
"""Deleting a pod allows an attacker to disturb applications on the cluster"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Deleted A Pod",
|
||||
category=AccessRisk,
|
||||
category=DataDestructionTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -377,7 +382,7 @@ class AccessApiServerWithToken(AccessApiServer):
|
||||
super().__init__(event)
|
||||
assert self.event.auth_token
|
||||
self.headers = {"Authorization": f"Bearer {self.event.auth_token}"}
|
||||
self.category = InformationDisclosure
|
||||
self.category = AccessK8sApiServerTechnique
|
||||
self.with_token = True
|
||||
|
||||
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
import logging
|
||||
|
||||
from scapy.all import ARP, IP, ICMP, Ether, sr1, srp
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability
|
||||
from kube_hunter.core.types import ActiveHunter, KubernetesCluster, IdentityTheft
|
||||
from kube_hunter.modules.hunting.capabilities import CapNetRawEnabled
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PossibleArpSpoofing(Vulnerability, Event):
|
||||
"""A malicious pod running on the cluster could potentially run an ARP Spoof attack
|
||||
and perform a MITM between pods on the node."""
|
||||
|
||||
def __init__(self):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Possible Arp Spoof",
|
||||
category=IdentityTheft,
|
||||
vid="KHV020",
|
||||
)
|
||||
|
||||
|
||||
@handler.subscribe(CapNetRawEnabled)
|
||||
class ArpSpoofHunter(ActiveHunter):
|
||||
"""Arp Spoof Hunter
|
||||
Checks for the possibility of running an ARP spoof
|
||||
attack from within a pod (results are based on the running node)
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def try_getting_mac(self, ip):
|
||||
config = get_config()
|
||||
ans = sr1(ARP(op=1, pdst=ip), timeout=config.network_timeout, verbose=0)
|
||||
return ans[ARP].hwsrc if ans else None
|
||||
|
||||
def detect_l3_on_host(self, arp_responses):
|
||||
""" returns True for an existence of an L3 network plugin """
|
||||
logger.debug("Attempting to detect L3 network plugin using ARP")
|
||||
unique_macs = list({response[ARP].hwsrc for _, response in arp_responses})
|
||||
|
||||
# if LAN addresses not unique
|
||||
if len(unique_macs) == 1:
|
||||
# if an ip outside the subnets gets a mac address
|
||||
outside_mac = self.try_getting_mac("1.1.1.1")
|
||||
# outside mac is the same as lan macs
|
||||
if outside_mac == unique_macs[0]:
|
||||
return True
|
||||
# only one mac address for whole LAN and outside
|
||||
return False
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
self_ip = sr1(IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout)[IP].dst
|
||||
arp_responses, _ = srp(
|
||||
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=f"{self_ip}/24"),
|
||||
timeout=config.network_timeout,
|
||||
verbose=0,
|
||||
)
|
||||
|
||||
# arp enabled on cluster and more than one pod on node
|
||||
if len(arp_responses) > 1:
|
||||
# L3 plugin not installed
|
||||
if not self.detect_l3_on_host(arp_responses):
|
||||
self.publish_event(PossibleArpSpoofing())
|
||||
@@ -2,9 +2,9 @@ import socket
|
||||
import logging
|
||||
|
||||
from kube_hunter.modules.discovery.hosts import RunningAsPodEvent
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability
|
||||
from kube_hunter.core.types import Hunter, AccessRisk, KubernetesCluster
|
||||
from kube_hunter.core.types import Hunter, ARPPoisoningTechnique, KubernetesCluster
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -20,7 +20,7 @@ class CapNetRawEnabled(Event, Vulnerability):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="CAP_NET_RAW Enabled",
|
||||
category=AccessRisk,
|
||||
category=ARPPoisoningTechnique,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@ import logging
|
||||
import base64
|
||||
import re
|
||||
|
||||
from kube_hunter.core.types import Hunter, KubernetesCluster, InformationDisclosure
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.types import Hunter, KubernetesCluster, GeneralSensitiveInformationTechnique
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, Service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -21,7 +21,7 @@ class CertificateEmail(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Certificate Includes Email Address",
|
||||
category=InformationDisclosure,
|
||||
category=GeneralSensitiveInformationTechnique,
|
||||
vid="KHV021",
|
||||
)
|
||||
self.email = email
|
||||
|
||||
@@ -2,19 +2,21 @@ import logging
|
||||
from packaging import version
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
|
||||
from kube_hunter.core.events.types import K8sVersionDisclosure, Vulnerability, Event
|
||||
from kube_hunter.core.types import (
|
||||
Hunter,
|
||||
KubernetesCluster,
|
||||
RemoteCodeExec,
|
||||
PrivilegeEscalation,
|
||||
DenialOfService,
|
||||
KubectlClient,
|
||||
KubernetesCluster,
|
||||
CVERemoteCodeExecutionCategory,
|
||||
CVEPrivilegeEscalationCategory,
|
||||
CVEDenialOfServiceTechnique,
|
||||
)
|
||||
from kube_hunter.modules.discovery.kubectl import KubectlClientEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config = get_config()
|
||||
|
||||
|
||||
class ServerApiVersionEndPointAccessPE(Vulnerability, Event):
|
||||
@@ -25,7 +27,7 @@ class ServerApiVersionEndPointAccessPE(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Critical Privilege Escalation CVE",
|
||||
category=PrivilegeEscalation,
|
||||
category=CVEPrivilegeEscalationCategory,
|
||||
vid="KHV022",
|
||||
)
|
||||
self.evidence = evidence
|
||||
@@ -40,7 +42,7 @@ class ServerApiVersionEndPointAccessDos(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Denial of Service to Kubernetes API Server",
|
||||
category=DenialOfService,
|
||||
category=CVEDenialOfServiceTechnique,
|
||||
vid="KHV023",
|
||||
)
|
||||
self.evidence = evidence
|
||||
@@ -55,7 +57,7 @@ class PingFloodHttp2Implementation(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Possible Ping Flood Attack",
|
||||
category=DenialOfService,
|
||||
category=CVEDenialOfServiceTechnique,
|
||||
vid="KHV024",
|
||||
)
|
||||
self.evidence = evidence
|
||||
@@ -70,7 +72,7 @@ class ResetFloodHttp2Implementation(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Possible Reset Flood Attack",
|
||||
category=DenialOfService,
|
||||
category=CVEDenialOfServiceTechnique,
|
||||
vid="KHV025",
|
||||
)
|
||||
self.evidence = evidence
|
||||
@@ -85,7 +87,7 @@ class ServerApiClusterScopedResourcesAccess(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Arbitrary Access To Cluster Scoped Resources",
|
||||
category=PrivilegeEscalation,
|
||||
category=CVEPrivilegeEscalationCategory,
|
||||
vid="KHV026",
|
||||
)
|
||||
self.evidence = evidence
|
||||
@@ -100,7 +102,7 @@ class IncompleteFixToKubectlCpVulnerability(Vulnerability, Event):
|
||||
self,
|
||||
KubectlClient,
|
||||
"Kubectl Vulnerable To CVE-2019-11246",
|
||||
category=RemoteCodeExec,
|
||||
category=CVERemoteCodeExecutionCategory,
|
||||
vid="KHV027",
|
||||
)
|
||||
self.binary_version = binary_version
|
||||
@@ -116,7 +118,7 @@ class KubectlCpVulnerability(Vulnerability, Event):
|
||||
self,
|
||||
KubectlClient,
|
||||
"Kubectl Vulnerable To CVE-2019-1002101",
|
||||
category=RemoteCodeExec,
|
||||
category=CVERemoteCodeExecutionCategory,
|
||||
vid="KHV028",
|
||||
)
|
||||
self.binary_version = binary_version
|
||||
@@ -199,7 +201,7 @@ class CveUtils:
|
||||
return vulnerable
|
||||
|
||||
|
||||
@handler.subscribe_once(K8sVersionDisclosure)
|
||||
@handler.subscribe_once(K8sVersionDisclosure, is_register=config.enable_cve_hunting)
|
||||
class K8sClusterCveHunter(Hunter):
|
||||
"""K8s CVE Hunter
|
||||
Checks if Node is running a Kubernetes version vulnerable to
|
||||
@@ -224,6 +226,7 @@ class K8sClusterCveHunter(Hunter):
|
||||
self.publish_event(vulnerability(self.event.version))
|
||||
|
||||
|
||||
# Removed due to incomplete implementation for multiple vendors revisions of kubernetes
|
||||
@handler.subscribe(KubectlClientEvent)
|
||||
class KubectlCVEHunter(Hunter):
|
||||
"""Kubectl CVE Hunter
|
||||
|
||||
@@ -3,8 +3,8 @@ import json
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.types import Hunter, RemoteCodeExec, KubernetesCluster
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.types import Hunter, AccessK8sDashboardTechnique, KubernetesCluster
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event
|
||||
from kube_hunter.modules.discovery.dashboard import KubeDashboardEvent
|
||||
|
||||
@@ -19,7 +19,7 @@ class DashboardExposed(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Dashboard Exposed",
|
||||
category=RemoteCodeExec,
|
||||
category=AccessK8sDashboardTechnique,
|
||||
vid="KHV029",
|
||||
)
|
||||
self.evidence = "nodes: {}".format(" ".join(nodes)) if nodes else None
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
import re
|
||||
import logging
|
||||
|
||||
from scapy.all import IP, ICMP, UDP, DNS, DNSQR, ARP, Ether, sr1, srp1, srp
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability
|
||||
from kube_hunter.core.types import ActiveHunter, KubernetesCluster, IdentityTheft
|
||||
from kube_hunter.modules.hunting.arp import PossibleArpSpoofing
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PossibleDnsSpoofing(Vulnerability, Event):
|
||||
"""A malicious pod running on the cluster could potentially run a DNS Spoof attack
|
||||
and perform a MITM attack on applications running in the cluster."""
|
||||
|
||||
def __init__(self, kubedns_pod_ip):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Possible DNS Spoof",
|
||||
category=IdentityTheft,
|
||||
vid="KHV030",
|
||||
)
|
||||
self.kubedns_pod_ip = kubedns_pod_ip
|
||||
self.evidence = f"kube-dns at: {self.kubedns_pod_ip}"
|
||||
|
||||
|
||||
# Only triggered with RunningAsPod base event
|
||||
@handler.subscribe(PossibleArpSpoofing)
|
||||
class DnsSpoofHunter(ActiveHunter):
|
||||
"""DNS Spoof Hunter
|
||||
Checks for the possibility for a malicious pod to compromise DNS requests of the cluster
|
||||
(results are based on the running node)
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def get_cbr0_ip_mac(self):
|
||||
config = get_config()
|
||||
res = srp1(Ether() / IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout)
|
||||
return res[IP].src, res.src
|
||||
|
||||
def extract_nameserver_ip(self):
|
||||
with open("/etc/resolv.conf") as f:
|
||||
# finds first nameserver in /etc/resolv.conf
|
||||
match = re.search(r"nameserver (\d+.\d+.\d+.\d+)", f.read())
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
def get_kube_dns_ip_mac(self):
|
||||
config = get_config()
|
||||
kubedns_svc_ip = self.extract_nameserver_ip()
|
||||
|
||||
# getting actual pod ip of kube-dns service, by comparing the src mac of a dns response and arp scanning.
|
||||
dns_info_res = srp1(
|
||||
Ether() / IP(dst=kubedns_svc_ip) / UDP(dport=53) / DNS(rd=1, qd=DNSQR()),
|
||||
verbose=0,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
kubedns_pod_mac = dns_info_res.src
|
||||
self_ip = dns_info_res[IP].dst
|
||||
|
||||
arp_responses, _ = srp(
|
||||
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=f"{self_ip}/24"),
|
||||
timeout=config.network_timeout,
|
||||
verbose=0,
|
||||
)
|
||||
for _, response in arp_responses:
|
||||
if response[Ether].src == kubedns_pod_mac:
|
||||
return response[ARP].psrc, response.src
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
logger.debug("Attempting to get kube-dns pod ip")
|
||||
self_ip = sr1(IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout)[IP].dst
|
||||
cbr0_ip, cbr0_mac = self.get_cbr0_ip_mac()
|
||||
|
||||
kubedns = self.get_kube_dns_ip_mac()
|
||||
if kubedns:
|
||||
kubedns_ip, kubedns_mac = kubedns
|
||||
logger.debug(f"ip={self_ip} kubednsip={kubedns_ip} cbr0ip={cbr0_ip}")
|
||||
if kubedns_mac != cbr0_mac:
|
||||
# if self pod in the same subnet as kube-dns pod
|
||||
self.publish_event(PossibleDnsSpoofing(kubedns_pod_ip=kubedns_ip))
|
||||
else:
|
||||
logger.debug("Could not get kubedns identity")
|
||||
@@ -2,16 +2,16 @@ import logging
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, OpenPortEvent
|
||||
from kube_hunter.core.types import (
|
||||
ActiveHunter,
|
||||
Hunter,
|
||||
KubernetesCluster,
|
||||
InformationDisclosure,
|
||||
RemoteCodeExec,
|
||||
UnauthenticatedAccess,
|
||||
AccessRisk,
|
||||
GeneralSensitiveInformationTechnique,
|
||||
GeneralPersistenceTechnique,
|
||||
ListK8sSecretsTechnique,
|
||||
ExposedSensitiveInterfacesTechnique,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -29,7 +29,7 @@ class EtcdRemoteWriteAccessEvent(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Etcd Remote Write Access Event",
|
||||
category=RemoteCodeExec,
|
||||
category=GeneralPersistenceTechnique,
|
||||
vid="KHV031",
|
||||
)
|
||||
self.evidence = write_res
|
||||
@@ -43,7 +43,7 @@ class EtcdRemoteReadAccessEvent(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Etcd Remote Read Access Event",
|
||||
category=AccessRisk,
|
||||
category=ListK8sSecretsTechnique,
|
||||
vid="KHV032",
|
||||
)
|
||||
self.evidence = keys
|
||||
@@ -58,7 +58,7 @@ class EtcdRemoteVersionDisclosureEvent(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Etcd Remote version disclosure",
|
||||
category=InformationDisclosure,
|
||||
category=GeneralSensitiveInformationTechnique,
|
||||
vid="KHV033",
|
||||
)
|
||||
self.evidence = version
|
||||
@@ -74,7 +74,7 @@ class EtcdAccessEnabledWithoutAuthEvent(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Etcd is accessible using insecure connection (HTTP)",
|
||||
category=UnauthenticatedAccess,
|
||||
category=ExposedSensitiveInterfacesTechnique,
|
||||
vid="KHV034",
|
||||
)
|
||||
self.evidence = version
|
||||
|
||||
@@ -9,16 +9,19 @@ import urllib3
|
||||
import uuid
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
|
||||
from kube_hunter.core.types import (
|
||||
Hunter,
|
||||
ActiveHunter,
|
||||
KubernetesCluster,
|
||||
Kubelet,
|
||||
InformationDisclosure,
|
||||
RemoteCodeExec,
|
||||
AccessRisk,
|
||||
ExposedSensitiveInterfacesTechnique,
|
||||
ExecIntoContainerTechnique,
|
||||
GeneralDefenseEvasionTechnique,
|
||||
GeneralSensitiveInformationTechnique,
|
||||
PrivilegedContainerTechnique,
|
||||
AccessKubeletAPITechnique,
|
||||
)
|
||||
from kube_hunter.modules.discovery.kubelet import (
|
||||
ReadOnlyKubeletEvent,
|
||||
@@ -35,7 +38,7 @@ class ExposedPodsHandler(Vulnerability, Event):
|
||||
|
||||
def __init__(self, pods):
|
||||
Vulnerability.__init__(
|
||||
self, component=Kubelet, name="Exposed Pods", category=InformationDisclosure, vid="KHV052"
|
||||
self, component=Kubelet, name="Exposed Pods", category=AccessKubeletAPITechnique, vid="KHV052"
|
||||
)
|
||||
self.pods = pods
|
||||
self.evidence = f"count: {len(self.pods)}"
|
||||
@@ -50,7 +53,7 @@ class AnonymousAuthEnabled(Vulnerability, Event):
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Anonymous Authentication",
|
||||
category=RemoteCodeExec,
|
||||
category=ExposedSensitiveInterfacesTechnique,
|
||||
vid="KHV036",
|
||||
)
|
||||
|
||||
@@ -63,7 +66,7 @@ class ExposedContainerLogsHandler(Vulnerability, Event):
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Container Logs",
|
||||
category=InformationDisclosure,
|
||||
category=AccessKubeletAPITechnique,
|
||||
vid="KHV037",
|
||||
)
|
||||
|
||||
@@ -77,7 +80,7 @@ class ExposedRunningPodsHandler(Vulnerability, Event):
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Running Pods",
|
||||
category=InformationDisclosure,
|
||||
category=AccessKubeletAPITechnique,
|
||||
vid="KHV038",
|
||||
)
|
||||
self.count = count
|
||||
@@ -92,7 +95,7 @@ class ExposedExecHandler(Vulnerability, Event):
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Exec On Container",
|
||||
category=RemoteCodeExec,
|
||||
category=ExecIntoContainerTechnique,
|
||||
vid="KHV039",
|
||||
)
|
||||
|
||||
@@ -105,7 +108,7 @@ class ExposedRunHandler(Vulnerability, Event):
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Run Inside Container",
|
||||
category=RemoteCodeExec,
|
||||
category=ExecIntoContainerTechnique,
|
||||
vid="KHV040",
|
||||
)
|
||||
|
||||
@@ -118,7 +121,7 @@ class ExposedPortForwardHandler(Vulnerability, Event):
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Port Forward",
|
||||
category=RemoteCodeExec,
|
||||
category=GeneralDefenseEvasionTechnique,
|
||||
vid="KHV041",
|
||||
)
|
||||
|
||||
@@ -132,7 +135,7 @@ class ExposedAttachHandler(Vulnerability, Event):
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Attaching To Container",
|
||||
category=RemoteCodeExec,
|
||||
category=ExecIntoContainerTechnique,
|
||||
vid="KHV042",
|
||||
)
|
||||
|
||||
@@ -146,7 +149,7 @@ class ExposedHealthzHandler(Vulnerability, Event):
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Cluster Health Disclosure",
|
||||
category=InformationDisclosure,
|
||||
category=GeneralSensitiveInformationTechnique,
|
||||
vid="KHV043",
|
||||
)
|
||||
self.status = status
|
||||
@@ -163,7 +166,7 @@ the whole cluster"""
|
||||
self,
|
||||
component=KubernetesCluster,
|
||||
name="Exposed Existing Privileged Container(s) Via Secure Kubelet Port",
|
||||
category=AccessRisk,
|
||||
category=PrivilegedContainerTechnique,
|
||||
vid="KHV051",
|
||||
)
|
||||
self.exposed_existing_privileged_containers = exposed_existing_privileged_containers
|
||||
@@ -178,7 +181,7 @@ class PrivilegedContainers(Vulnerability, Event):
|
||||
self,
|
||||
component=KubernetesCluster,
|
||||
name="Privileged Container",
|
||||
category=AccessRisk,
|
||||
category=PrivilegedContainerTechnique,
|
||||
vid="KHV044",
|
||||
)
|
||||
self.containers = containers
|
||||
@@ -193,7 +196,7 @@ class ExposedSystemLogs(Vulnerability, Event):
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed System Logs",
|
||||
category=InformationDisclosure,
|
||||
category=AccessKubeletAPITechnique,
|
||||
vid="KHV045",
|
||||
)
|
||||
|
||||
@@ -206,7 +209,7 @@ class ExposedKubeletCmdline(Vulnerability, Event):
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Kubelet Cmdline",
|
||||
category=InformationDisclosure,
|
||||
category=AccessKubeletAPITechnique,
|
||||
vid="KHV046",
|
||||
)
|
||||
self.cmdline = cmdline
|
||||
@@ -303,7 +306,7 @@ class SecureKubeletPortHunter(Hunter):
|
||||
"""
|
||||
|
||||
class DebugHandlers:
|
||||
""" all methods will return the handler name if successful """
|
||||
"""all methods will return the handler name if successful"""
|
||||
|
||||
def __init__(self, path, pod, session=None):
|
||||
self.path = path + ("/" if not path.endswith("/") else "")
|
||||
|
||||
@@ -3,14 +3,9 @@ import re
|
||||
import uuid
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability
|
||||
from kube_hunter.core.types import (
|
||||
ActiveHunter,
|
||||
Hunter,
|
||||
KubernetesCluster,
|
||||
PrivilegeEscalation,
|
||||
)
|
||||
from kube_hunter.core.types import ActiveHunter, Hunter, KubernetesCluster, HostPathMountPrivilegeEscalationTechnique
|
||||
from kube_hunter.modules.hunting.kubelet import (
|
||||
ExposedPodsHandler,
|
||||
ExposedRunHandler,
|
||||
@@ -28,7 +23,7 @@ class WriteMountToVarLog(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Pod With Mount To /var/log",
|
||||
category=PrivilegeEscalation,
|
||||
category=HostPathMountPrivilegeEscalationTechnique,
|
||||
vid="KHV047",
|
||||
)
|
||||
self.pods = pods
|
||||
@@ -44,7 +39,7 @@ class DirectoryTraversalWithKubelet(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Root Traversal Read On The Kubelet",
|
||||
category=PrivilegeEscalation,
|
||||
category=HostPathMountPrivilegeEscalationTechnique,
|
||||
)
|
||||
self.output = output
|
||||
self.evidence = f"output: {self.output}"
|
||||
@@ -77,15 +72,17 @@ class VarLogMountHunter(Hunter):
|
||||
self.publish_event(WriteMountToVarLog(pods=pe_pods))
|
||||
|
||||
|
||||
@handler.subscribe(ExposedRunHandler)
|
||||
@handler.subscribe_many([ExposedRunHandler, WriteMountToVarLog])
|
||||
class ProveVarLogMount(ActiveHunter):
|
||||
"""Prove /var/log Mount Hunter
|
||||
Tries to read /etc/shadow on the host by running commands inside a pod with host mount to /var/log
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.base_path = f"https://{self.event.host}:{self.event.port}"
|
||||
self.write_mount_event = self.event.get_by_class(WriteMountToVarLog)
|
||||
self.event = self.write_mount_event
|
||||
|
||||
self.base_path = f"https://{self.write_mount_event.host}:{self.write_mount_event.port}"
|
||||
|
||||
def run(self, command, container):
|
||||
run_url = KubeletHandlers.RUN.value.format(
|
||||
@@ -96,20 +93,6 @@ class ProveVarLogMount(ActiveHunter):
|
||||
)
|
||||
return self.event.session.post(f"{self.base_path}/{run_url}", verify=False).text
|
||||
|
||||
# TODO: replace with multiple subscription to WriteMountToVarLog as well
|
||||
def get_varlog_mounters(self):
|
||||
config = get_config()
|
||||
logger.debug("accessing /pods manually on ProveVarLogMount")
|
||||
pods = self.event.session.get(
|
||||
f"{self.base_path}/" + KubeletHandlers.PODS.value,
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).json()["items"]
|
||||
for pod in pods:
|
||||
volume = VarLogMountHunter(ExposedPodsHandler(pods=pods)).has_write_mount_to(pod, "/var/log")
|
||||
if volume:
|
||||
yield pod, volume
|
||||
|
||||
def mount_path_from_mountname(self, pod, mount_name):
|
||||
"""returns container name, and container mount path correlated to mount_name"""
|
||||
for container in pod["spec"]["containers"]:
|
||||
@@ -138,7 +121,7 @@ class ProveVarLogMount(ActiveHunter):
|
||||
return content
|
||||
|
||||
def execute(self):
|
||||
for pod, volume in self.get_varlog_mounters():
|
||||
for pod, volume in self.write_mount_event.pe_pods():
|
||||
for container, mount_path in self.mount_path_from_mountname(pod, volume["name"]):
|
||||
logger.debug("Correlated container to mount_name")
|
||||
cont = {
|
||||
|
||||
@@ -4,13 +4,13 @@ import requests
|
||||
from enum import Enum
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability, K8sVersionDisclosure
|
||||
from kube_hunter.core.types import (
|
||||
ActiveHunter,
|
||||
Hunter,
|
||||
KubernetesCluster,
|
||||
InformationDisclosure,
|
||||
ConnectFromProxyServerTechnique,
|
||||
)
|
||||
from kube_hunter.modules.discovery.dashboard import KubeDashboardEvent
|
||||
from kube_hunter.modules.discovery.proxy import KubeProxyEvent
|
||||
@@ -26,7 +26,7 @@ class KubeProxyExposed(Vulnerability, Event):
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Proxy Exposed",
|
||||
category=InformationDisclosure,
|
||||
category=ConnectFromProxyServerTechnique,
|
||||
vid="KHV049",
|
||||
)
|
||||
|
||||
@@ -123,5 +123,6 @@ class K8sVersionDisclosureProve(ActiveHunter):
|
||||
version=version_metadata["gitVersion"],
|
||||
from_endpoint="/version",
|
||||
extra_info="on kube-proxy",
|
||||
category=ConnectFromProxyServerTechnique,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1,37 +1,37 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event
|
||||
from kube_hunter.core.types import Hunter, KubernetesCluster, AccessRisk
|
||||
from kube_hunter.core.types import Hunter, KubernetesCluster, AccessContainerServiceAccountTechnique
|
||||
from kube_hunter.modules.discovery.hosts import RunningAsPodEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ServiceAccountTokenAccess(Vulnerability, Event):
|
||||
""" Accessing the pod service account token gives an attacker the option to use the server API """
|
||||
"""Accessing the pod service account token gives an attacker the option to use the server API"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Read access to pod's service account token",
|
||||
category=AccessRisk,
|
||||
category=AccessContainerServiceAccountTechnique,
|
||||
vid="KHV050",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class SecretsAccess(Vulnerability, Event):
|
||||
""" Accessing the pod's secrets within a compromised pod might disclose valuable data to a potential attacker"""
|
||||
"""Accessing the pod's secrets within a compromised pod might disclose valuable data to a potential attacker"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
component=KubernetesCluster,
|
||||
name="Access to pod's secrets",
|
||||
category=AccessRisk,
|
||||
category=AccessContainerServiceAccountTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ class BaseReporter:
|
||||
{
|
||||
"location": vuln.location(),
|
||||
"vid": vuln.get_vid(),
|
||||
"category": vuln.category.name,
|
||||
"category": vuln.category.get_name(),
|
||||
"severity": vuln.get_severity(),
|
||||
"vulnerability": vuln.get_name(),
|
||||
"description": vuln.explain(),
|
||||
|
||||
@@ -2,7 +2,7 @@ import logging
|
||||
import threading
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import (
|
||||
Event,
|
||||
Service,
|
||||
|
||||
@@ -12,10 +12,7 @@ class HTTPDispatcher:
|
||||
dispatch_url = os.environ.get("KUBEHUNTER_HTTP_DISPATCH_URL", "https://localhost/")
|
||||
try:
|
||||
r = requests.request(
|
||||
dispatch_method,
|
||||
dispatch_url,
|
||||
json=report,
|
||||
headers={"Content-Type": "application/json"},
|
||||
dispatch_method, dispatch_url, json=report, headers={"Content-Type": "application/json"}, verify=False
|
||||
)
|
||||
r.raise_for_status()
|
||||
logger.info(f"Report was dispatched to: {dispatch_url}")
|
||||
|
||||
@@ -83,7 +83,7 @@ class PlainReporter(BaseReporter):
|
||||
column_names = [
|
||||
"ID",
|
||||
"Location",
|
||||
"Category",
|
||||
"MITRE Category",
|
||||
"Vulnerability",
|
||||
"Description",
|
||||
"Evidence",
|
||||
@@ -91,7 +91,7 @@ class PlainReporter(BaseReporter):
|
||||
vuln_table = PrettyTable(column_names, hrules=ALL)
|
||||
vuln_table.align = "l"
|
||||
vuln_table.max_width = MAX_TABLE_WIDTH
|
||||
vuln_table.sortby = "Category"
|
||||
vuln_table.sortby = "MITRE Category"
|
||||
vuln_table.reversesort = True
|
||||
vuln_table.padding_width = 1
|
||||
vuln_table.header_style = "upper"
|
||||
@@ -101,10 +101,11 @@ class PlainReporter(BaseReporter):
|
||||
evidence = str(vuln.evidence)
|
||||
if len(evidence) > EVIDENCE_PREVIEW:
|
||||
evidence = evidence[:EVIDENCE_PREVIEW] + "..."
|
||||
|
||||
row = [
|
||||
vuln.get_vid(),
|
||||
vuln.location(),
|
||||
vuln.category.name,
|
||||
vuln.category.get_name(),
|
||||
vuln.get_name(),
|
||||
vuln.explain(),
|
||||
evidence,
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
-r requirements.txt
|
||||
|
||||
flake8
|
||||
pytest >= 2.9.1
|
||||
requests-mock >= 1.8
|
||||
|
||||
@@ -31,8 +31,7 @@ zip_safe = False
|
||||
packages = find:
|
||||
install_requires =
|
||||
netaddr
|
||||
netifaces
|
||||
scapy>=2.4.3
|
||||
pyroute2
|
||||
requests
|
||||
PrettyTable
|
||||
urllib3>=1.24.3
|
||||
@@ -41,6 +40,7 @@ install_requires =
|
||||
packaging
|
||||
dataclasses
|
||||
pluggy
|
||||
kubernetes==12.0.1
|
||||
setup_requires =
|
||||
setuptools>=30.3.0
|
||||
setuptools_scm
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
# flake8: noqa: E402
|
||||
import requests_mock
|
||||
import json
|
||||
|
||||
from kube_hunter.conf import Config, set_config
|
||||
from kube_hunter.core.events.types import NewHostEvent
|
||||
|
||||
set_config(Config())
|
||||
|
||||
from kube_hunter.core.events.types import NewHostEvent
|
||||
|
||||
|
||||
def test_presetcloud():
|
||||
"""Testing if it doesn't try to run get_cloud if the cloud type is already set.
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# flake8: noqa: E402
|
||||
|
||||
from kube_hunter.conf import Config, set_config
|
||||
from kube_hunter.conf import Config, set_config, get_config
|
||||
|
||||
set_config(Config(active=True))
|
||||
|
||||
from kube_hunter.core.events.handler import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.modules.discovery.apiserver import ApiServiceDiscovery
|
||||
from kube_hunter.modules.discovery.dashboard import KubeDashboard as KubeDashboardDiscovery
|
||||
from kube_hunter.modules.discovery.etcd import EtcdRemoteAccess as EtcdRemoteAccessDiscovery
|
||||
@@ -20,12 +20,12 @@ from kube_hunter.modules.hunting.apiserver import (
|
||||
AccessApiServerActive,
|
||||
AccessApiServerWithToken,
|
||||
)
|
||||
from kube_hunter.modules.hunting.arp import ArpSpoofHunter
|
||||
from kube_hunter.modules.hunting.capabilities import PodCapabilitiesHunter
|
||||
from kube_hunter.modules.hunting.certificates import CertificateDiscovery
|
||||
from kube_hunter.modules.hunting.cves import K8sClusterCveHunter, KubectlCVEHunter
|
||||
|
||||
from kube_hunter.modules.hunting.cves import K8sClusterCveHunter
|
||||
from kube_hunter.modules.hunting.cves import KubectlCVEHunter
|
||||
from kube_hunter.modules.hunting.dashboard import KubeDashboard
|
||||
from kube_hunter.modules.hunting.dns import DnsSpoofHunter
|
||||
from kube_hunter.modules.hunting.etcd import EtcdRemoteAccess, EtcdRemoteAccessActive
|
||||
from kube_hunter.modules.hunting.kubelet import (
|
||||
ProveAnonymousAuth,
|
||||
@@ -40,6 +40,8 @@ from kube_hunter.modules.hunting.mounts import VarLogMountHunter, ProveVarLogMou
|
||||
from kube_hunter.modules.hunting.proxy import KubeProxy, ProveProxyExposed, K8sVersionDisclosureProve
|
||||
from kube_hunter.modules.hunting.secrets import AccessSecrets
|
||||
|
||||
config = get_config()
|
||||
|
||||
PASSIVE_HUNTERS = {
|
||||
ApiServiceDiscovery,
|
||||
KubeDashboardDiscovery,
|
||||
@@ -56,7 +58,6 @@ PASSIVE_HUNTERS = {
|
||||
ApiVersionHunter,
|
||||
PodCapabilitiesHunter,
|
||||
CertificateDiscovery,
|
||||
K8sClusterCveHunter,
|
||||
KubectlCVEHunter,
|
||||
KubeDashboard,
|
||||
EtcdRemoteAccess,
|
||||
@@ -67,11 +68,12 @@ PASSIVE_HUNTERS = {
|
||||
AccessSecrets,
|
||||
}
|
||||
|
||||
# if config.enable_cve_hunting:
|
||||
# PASSIVE_HUNTERS.append(K8sClusterCveHunter)
|
||||
|
||||
ACTIVE_HUNTERS = {
|
||||
ProveAzureSpnExposure,
|
||||
AccessApiServerActive,
|
||||
ArpSpoofHunter,
|
||||
DnsSpoofHunter,
|
||||
EtcdRemoteAccessActive,
|
||||
ProveRunHandler,
|
||||
ProveContainerLogsHandler,
|
||||
|
||||
@@ -3,9 +3,11 @@ import time
|
||||
from kube_hunter.conf import Config, set_config
|
||||
from kube_hunter.core.types import Hunter
|
||||
from kube_hunter.core.events.types import Event, Service
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
|
||||
counter = 0
|
||||
first_run = True
|
||||
|
||||
set_config(Config())
|
||||
|
||||
|
||||
@@ -19,6 +21,16 @@ class RegularEvent(Service, Event):
|
||||
Service.__init__(self, "Test Service")
|
||||
|
||||
|
||||
class AnotherRegularEvent(Service, Event):
|
||||
def __init__(self):
|
||||
Service.__init__(self, "Test Service (another)")
|
||||
|
||||
|
||||
class DifferentRegularEvent(Service, Event):
|
||||
def __init__(self):
|
||||
Service.__init__(self, "Test Service (different)")
|
||||
|
||||
|
||||
@handler.subscribe_once(OnceOnlyEvent)
|
||||
class OnceHunter(Hunter):
|
||||
def __init__(self, event):
|
||||
@@ -33,8 +45,36 @@ class RegularHunter(Hunter):
|
||||
counter += 1
|
||||
|
||||
|
||||
@handler.subscribe_many([DifferentRegularEvent, AnotherRegularEvent])
|
||||
class SmartHunter(Hunter):
|
||||
def __init__(self, events):
|
||||
global counter, first_run
|
||||
counter += 1
|
||||
|
||||
# we add an attribute on the second scan.
|
||||
# here we test that we get the latest event
|
||||
different_event = events.get_by_class(DifferentRegularEvent)
|
||||
if first_run:
|
||||
first_run = False
|
||||
assert not different_event.new_value
|
||||
else:
|
||||
assert different_event.new_value
|
||||
|
||||
|
||||
@handler.subscribe_many([DifferentRegularEvent, AnotherRegularEvent])
|
||||
class SmartHunter2(Hunter):
|
||||
def __init__(self, events):
|
||||
global counter
|
||||
counter += 1
|
||||
|
||||
# check if we can access the events
|
||||
assert events.get_by_class(DifferentRegularEvent).__class__ == DifferentRegularEvent
|
||||
assert events.get_by_class(AnotherRegularEvent).__class__ == AnotherRegularEvent
|
||||
|
||||
|
||||
def test_subscribe_mechanism():
|
||||
global counter
|
||||
counter = 0
|
||||
|
||||
# first test normal subscribe and publish works
|
||||
handler.publish_event(RegularEvent())
|
||||
@@ -43,13 +83,47 @@ def test_subscribe_mechanism():
|
||||
|
||||
time.sleep(0.02)
|
||||
assert counter == 3
|
||||
|
||||
|
||||
def test_subscribe_once_mechanism():
|
||||
global counter
|
||||
counter = 0
|
||||
|
||||
# testing the subscribe_once mechanism
|
||||
handler.publish_event(OnceOnlyEvent())
|
||||
handler.publish_event(OnceOnlyEvent())
|
||||
# testing the multiple subscription mechanism
|
||||
handler.publish_event(OnceOnlyEvent())
|
||||
|
||||
time.sleep(0.02)
|
||||
# should have been triggered once
|
||||
assert counter == 1
|
||||
counter = 0
|
||||
|
||||
handler.publish_event(OnceOnlyEvent())
|
||||
handler.publish_event(OnceOnlyEvent())
|
||||
handler.publish_event(OnceOnlyEvent())
|
||||
time.sleep(0.02)
|
||||
|
||||
assert counter == 0
|
||||
|
||||
|
||||
def test_subscribe_many_mechanism():
|
||||
global counter
|
||||
counter = 0
|
||||
|
||||
# testing the multiple subscription mechanism
|
||||
handler.publish_event(DifferentRegularEvent())
|
||||
handler.publish_event(DifferentRegularEvent())
|
||||
handler.publish_event(DifferentRegularEvent())
|
||||
handler.publish_event(DifferentRegularEvent())
|
||||
handler.publish_event(DifferentRegularEvent())
|
||||
handler.publish_event(AnotherRegularEvent())
|
||||
|
||||
time.sleep(0.02)
|
||||
# We expect SmartHunter and SmartHunter2 to be executed once. hence the counter should be 2
|
||||
assert counter == 2
|
||||
counter = 0
|
||||
|
||||
# Test using most recent event
|
||||
newer_version_event = DifferentRegularEvent()
|
||||
newer_version_event.new_value = True
|
||||
handler.publish_event(newer_version_event)
|
||||
|
||||
assert counter == 2
|
||||
|
||||
@@ -8,7 +8,7 @@ set_config(Config())
|
||||
|
||||
from kube_hunter.modules.discovery.apiserver import ApiServer, ApiServiceDiscovery
|
||||
from kube_hunter.core.events.types import Event
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
|
||||
counter = 0
|
||||
|
||||
|
||||
@@ -1,4 +1,12 @@
|
||||
# flake8: noqa: E402
|
||||
from kube_hunter.modules.discovery.hosts import (
|
||||
FromPodHostDiscovery,
|
||||
RunningAsPodEvent,
|
||||
HostScanEvent,
|
||||
HostDiscoveryHelpers,
|
||||
)
|
||||
from kube_hunter.core.types import Hunter
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
import json
|
||||
import requests_mock
|
||||
import pytest
|
||||
@@ -9,19 +17,10 @@ from kube_hunter.conf import Config, get_config, set_config
|
||||
|
||||
set_config(Config())
|
||||
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.types import Hunter
|
||||
from kube_hunter.modules.discovery.hosts import (
|
||||
FromPodHostDiscovery,
|
||||
RunningAsPodEvent,
|
||||
HostScanEvent,
|
||||
HostDiscoveryHelpers,
|
||||
)
|
||||
|
||||
|
||||
class TestFromPodHostDiscovery:
|
||||
@staticmethod
|
||||
def _make_response(*subnets: List[tuple]) -> str:
|
||||
def _make_azure_response(*subnets: List[tuple]) -> str:
|
||||
return json.dumps(
|
||||
{
|
||||
"network": {
|
||||
@@ -32,6 +31,10 @@ class TestFromPodHostDiscovery:
|
||||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _make_aws_response(*data: List[str]) -> str:
|
||||
return "\n".join(data)
|
||||
|
||||
def test_is_azure_pod_request_fail(self):
|
||||
f = FromPodHostDiscovery(RunningAsPodEvent())
|
||||
|
||||
@@ -47,12 +50,125 @@ class TestFromPodHostDiscovery:
|
||||
with requests_mock.Mocker() as m:
|
||||
m.get(
|
||||
"http://169.254.169.254/metadata/instance?api-version=2017-08-01",
|
||||
text=TestFromPodHostDiscovery._make_response(("3.4.5.6", "255.255.255.252")),
|
||||
text=TestFromPodHostDiscovery._make_azure_response(("3.4.5.6", "255.255.255.252")),
|
||||
)
|
||||
result = f.is_azure_pod()
|
||||
|
||||
assert result
|
||||
|
||||
def test_is_aws_pod_v1_request_fail(self):
|
||||
f = FromPodHostDiscovery(RunningAsPodEvent())
|
||||
|
||||
with requests_mock.Mocker() as m:
|
||||
m.get("http://169.254.169.254/latest/meta-data/", status_code=404)
|
||||
result = f.is_aws_pod_v1()
|
||||
|
||||
assert not result
|
||||
|
||||
def test_is_aws_pod_v1_success(self):
|
||||
f = FromPodHostDiscovery(RunningAsPodEvent())
|
||||
|
||||
with requests_mock.Mocker() as m:
|
||||
m.get(
|
||||
"http://169.254.169.254/latest/meta-data/",
|
||||
text=TestFromPodHostDiscovery._make_aws_response(
|
||||
"\n".join(
|
||||
(
|
||||
"ami-id",
|
||||
"ami-launch-index",
|
||||
"ami-manifest-path",
|
||||
"block-device-mapping/",
|
||||
"events/",
|
||||
"hostname",
|
||||
"iam/",
|
||||
"instance-action",
|
||||
"instance-id",
|
||||
"instance-type",
|
||||
"local-hostname",
|
||||
"local-ipv4",
|
||||
"mac",
|
||||
"metrics/",
|
||||
"network/",
|
||||
"placement/",
|
||||
"profile",
|
||||
"public-hostname",
|
||||
"public-ipv4",
|
||||
"public-keys/",
|
||||
"reservation-id",
|
||||
"security-groups",
|
||||
"services/",
|
||||
)
|
||||
),
|
||||
),
|
||||
)
|
||||
result = f.is_aws_pod_v1()
|
||||
|
||||
assert result
|
||||
|
||||
def test_is_aws_pod_v2_request_fail(self):
|
||||
f = FromPodHostDiscovery(RunningAsPodEvent())
|
||||
|
||||
with requests_mock.Mocker() as m:
|
||||
m.put(
|
||||
"http://169.254.169.254/latest/api/token/",
|
||||
headers={"X-aws-ec2-metatadata-token-ttl-seconds": "21600"},
|
||||
status_code=404,
|
||||
)
|
||||
m.get(
|
||||
"http://169.254.169.254/latest/meta-data/",
|
||||
headers={"X-aws-ec2-metatadata-token": "token"},
|
||||
status_code=404,
|
||||
)
|
||||
result = f.is_aws_pod_v2()
|
||||
|
||||
assert not result
|
||||
|
||||
def test_is_aws_pod_v2_success(self):
|
||||
f = FromPodHostDiscovery(RunningAsPodEvent())
|
||||
|
||||
with requests_mock.Mocker() as m:
|
||||
m.put(
|
||||
"http://169.254.169.254/latest/api/token/",
|
||||
headers={"X-aws-ec2-metatadata-token-ttl-seconds": "21600"},
|
||||
text=TestFromPodHostDiscovery._make_aws_response("token"),
|
||||
)
|
||||
m.get(
|
||||
"http://169.254.169.254/latest/meta-data/",
|
||||
headers={"X-aws-ec2-metatadata-token": "token"},
|
||||
text=TestFromPodHostDiscovery._make_aws_response(
|
||||
"\n".join(
|
||||
(
|
||||
"ami-id",
|
||||
"ami-launch-index",
|
||||
"ami-manifest-path",
|
||||
"block-device-mapping/",
|
||||
"events/",
|
||||
"hostname",
|
||||
"iam/",
|
||||
"instance-action",
|
||||
"instance-id",
|
||||
"instance-type",
|
||||
"local-hostname",
|
||||
"local-ipv4",
|
||||
"mac",
|
||||
"metrics/",
|
||||
"network/",
|
||||
"placement/",
|
||||
"profile",
|
||||
"public-hostname",
|
||||
"public-ipv4",
|
||||
"public-keys/",
|
||||
"reservation-id",
|
||||
"security-groups",
|
||||
"services/",
|
||||
)
|
||||
),
|
||||
),
|
||||
)
|
||||
result = f.is_aws_pod_v2()
|
||||
|
||||
assert result
|
||||
|
||||
def test_execute_scan_cidr(self):
|
||||
set_config(Config(cidr="1.2.3.4/30"))
|
||||
f = FromPodHostDiscovery(RunningAsPodEvent())
|
||||
|
||||
30
tests/discovery/test_k8s.py
Normal file
30
tests/discovery/test_k8s.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from kube_hunter.conf import Config, set_config
|
||||
|
||||
from kube_hunter.modules.discovery.kubernetes_client import list_all_k8s_cluster_nodes
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
set_config(Config())
|
||||
|
||||
|
||||
def test_client_yields_ips():
|
||||
client = MagicMock()
|
||||
response = MagicMock()
|
||||
client.list_node.return_value = response
|
||||
response.items = [MagicMock(), MagicMock()]
|
||||
response.items[0].status.addresses = [MagicMock(), MagicMock()]
|
||||
response.items[0].status.addresses[0].address = "127.0.0.1"
|
||||
response.items[0].status.addresses[1].address = "127.0.0.2"
|
||||
response.items[1].status.addresses = [MagicMock()]
|
||||
response.items[1].status.addresses[0].address = "127.0.0.3"
|
||||
|
||||
with patch("kubernetes.config.load_incluster_config") as m:
|
||||
output = list(list_all_k8s_cluster_nodes(client=client))
|
||||
m.assert_called_once()
|
||||
|
||||
assert output == ["127.0.0.1", "127.0.0.2", "127.0.0.3"]
|
||||
|
||||
|
||||
def test_client_uses_kubeconfig():
|
||||
with patch("kubernetes.config.load_kube_config") as m:
|
||||
list(list_all_k8s_cluster_nodes(kube_config="/location", client=MagicMock()))
|
||||
m.assert_called_once_with(config_file="/location")
|
||||
@@ -1,4 +1,5 @@
|
||||
# flake8: noqa: E402
|
||||
from kube_hunter.core.types.vulnerabilities import AccessK8sApiServerTechnique
|
||||
import requests_mock
|
||||
import time
|
||||
|
||||
@@ -21,8 +22,8 @@ from kube_hunter.modules.hunting.apiserver import (
|
||||
from kube_hunter.modules.hunting.apiserver import ApiServerPassiveHunterFinished
|
||||
from kube_hunter.modules.hunting.apiserver import CreateANamespace, DeleteANamespace
|
||||
from kube_hunter.modules.discovery.apiserver import ApiServer
|
||||
from kube_hunter.core.types import UnauthenticatedAccess, InformationDisclosure
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.types import ExposedSensitiveInterfacesTechnique, AccessK8sApiServerTechnique
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
|
||||
counter = 0
|
||||
|
||||
@@ -181,10 +182,10 @@ class test_ListClusterRoles:
|
||||
class test_ServerApiAccess:
|
||||
def __init__(self, event):
|
||||
print("ServerApiAccess")
|
||||
if event.category == UnauthenticatedAccess:
|
||||
if event.category == ExposedSensitiveInterfacesTechnique:
|
||||
assert event.auth_token is None
|
||||
else:
|
||||
assert event.category == InformationDisclosure
|
||||
assert event.category == AccessK8sApiServerTechnique
|
||||
assert event.auth_token == "so-secret"
|
||||
global counter
|
||||
counter += 1
|
||||
|
||||
@@ -5,7 +5,7 @@ set_config(Config())
|
||||
|
||||
from kube_hunter.core.events.types import Event
|
||||
from kube_hunter.modules.hunting.certificates import CertificateDiscovery, CertificateEmail
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
|
||||
|
||||
def test_CertificateDiscovery():
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user