Compare commits

...

73 Commits

Author SHA1 Message Date
Kiran Bodipi
bc47f08e88 fix: resolved severity discrepancy between kube-hunter report and docs for khv043 (#551) 2024-03-19 14:30:55 +02:00
Kiran Bodipi
3e1347290b fix: resolved severity discrepancy between kube-hunter report and docs (#550) 2024-03-11 14:22:47 +02:00
Andreas Lindhé
7479aae9ba Fix broken link to Trivy (#546)
Fixes #545
2023-11-15 15:30:45 +02:00
Itay Shakury
e8827b24f6 add maintenance notice (#544) 2023-11-11 01:05:12 +02:00
Itay Shakury
ff9f2c536f update logo (#520) 2022-09-04 09:39:33 +03:00
danielsagi
eb31026d8e Removing netifaces due to lack of maintainer (#519)
* removed dependency on netifaces entirely by using psutil and manually parsing /proc/net/route to figure out default gateway

* Checking if /proc/net/route is accessible. before commiting to parse it

* changed to using pyroute2 instead of manually parsing /proc/net/route and psutil for interface enum

* added pyroute2 as a dependency

* fixed bug in subnets appending

* added windows support using a powershell snippet for interface enum
2022-08-25 21:31:02 +03:00
danielsagi
a578726495 update manifest to 0.6.8 (#509) 2022-05-13 12:49:12 +03:00
rhtenhove
c442172715 pin image version (#504)
* pin image version to job

* change docker tag format

* update semver GA
2022-05-13 00:27:39 +03:00
danielsagi
d7df38fc95 Fix: Removed automatic import of handler object (#506)
* removed automatic import of handler object in events package and renamed handler.py to event_handler.py to solve name collision
2022-05-12 22:12:31 +03:00
danielsagi
9ce385a190 ignore E402 flake8 on test_cloud 2022-05-07 10:22:17 +03:00
danielsagi
ebd8e2e405 Moved config initialize to start of test_cloud.py to solve bug in testing 2022-05-07 10:22:17 +03:00
danielsagi
585b490f19 Changed help message of --num-worker-threads flag 2022-05-07 10:22:17 +03:00
Florian Bachmann
6c4ad4f6fd Solves: Make thread count configurable #433 2022-05-07 09:29:00 +03:00
danielsagi
e6a3c12098 Remove scapy usage (#500)
* removed arp and dns hunters usage due to it's violations of the scapy GPL2 license

* added installation of arp and dns hunters to Dockerfile

* added explicit new version to plugins in dockerfile installation

* ignore B020 flake8
2022-05-07 09:09:09 +03:00
danielsagi
2a7020682e Update image tag of aqua version 2022-03-28 17:33:22 +03:00
Owen Rumney
e1896f3983 docs: lowercase the severities for AVD (#495)
Signed-off-by: Owen Rumney <owen@owenrumney.co.uk>
2022-03-25 09:03:43 +00:00
jerbia
fc7fbbf1fc Added severity to the kube-hunter found issues (#492) 2022-03-22 11:03:05 +02:00
danielsagi
7c62cc21af Feature: Custom Hunting (#489)
* added partial and partial-names flag. mechanism for whitelisting hunter subscrption for custom hunts

* changed name from partial to custom

* ran black to format

* flake8 formatting

* added documentation in readme for Custom hunting and made Advanced Usage a higher level topic

* added Collector, StartedInfo and SendFullReport to the core_hunters

* changed old name class-names to raw-hunter-names

* fixed bug in import loop
2022-01-28 18:54:36 +02:00
Juvenile
c17aa17096 ignore https certificate verification (#484) 2022-01-22 16:06:39 +02:00
testn
4204879251 Update README.md (#487)
Fix typo
2022-01-22 16:05:20 +02:00
danielsagi
a746bd0eb1 Added correct exception handling for discovery of Metadata apis (#488)
* Added correct exception handling for discovery of Metadata apis

* fixed linting issues
2022-01-22 15:56:04 +02:00
danielsagi
b379e64314 Added MITRE documentation in README (#485)
* Added documentation about differences between vulnerabilities and the attack matrix techniques

* moved docs to start of README, also created MITRE image, showing covered areas of kube-hunter

* fixed link in readme
2022-01-14 00:00:29 +02:00
danielsagi
00eb0dfa87 Switched CVE Hunting to optional & Minor core feature (#482)
* Removed automatic registration of the k8s CVE hunter

* Made CVE hunting optional, default set to not run
2021-10-16 17:49:00 +03:00
danielsagi
8d045fb1a8 Fix all of github action workflows (#481)
* fixed all of workflows
2021-10-16 17:23:41 +03:00
danielsagi
83b19d4208 Feature: Changed vulnerability categories to support MITRE ATT&CK (#474)
* Refactored all categories to the new MITRE attack matrix format

* Changed format of vulnerabilities table to display the mitre technique related to the vulnerability
2021-09-30 15:25:30 +03:00
danielsagi
473e4fe2b5 Make gateway discovery always run when running as pod #471 2021-07-23 21:09:28 +03:00
danielsagi
f67f08225c changed exception logs to debug logs in kubernetes_client nodes discovery (#470) 2021-07-22 15:57:25 +03:00
danielsagi
c96312b91e updated gemfile (#464) 2021-06-24 21:15:18 +03:00
danielsagi
a7d26452fb Feature: New Service Account Token Flag (#463)
* added service account token flag to use in hunting

* added flag to main parsing config creation

* fixed linting issues

* added documentation on the service-account-token flag

* minor readme change
2021-06-24 20:58:43 +03:00
danielsagi
e63efddf9f Support multiple subscription on ProveVarLogMount active hunter (#461)
* removed redundant call for /pods again from /var/log mount hunter, by using multiple subscription

* fixed new linting

* fixed linting with exceptions
2021-06-24 18:43:14 +03:00
Mikolaj Pawlikowski
6689005544 K8s autodiscovery (#453)
* Add a new dependency on Kubernetes package

* Add and store a new flag about automatic nodes discovery from a pod

* Implement the listing of nodes

* Add tests to cover the k8s node listing

* Fix the k8s listing test to ensure the load incluster function is actually called

* Add more help to the k8s node discovery flags, and cross-reference them.

* Add a note on the Kubernetes auto-discovery in the main README file

* Move the kubernetes discovery from conf to modules/discovery

* When running with --pods, run the Kubernetes auto discovery

* Also mention that the auto discovery is always on when using --pod

Co-authored-by: Mikolaj Pawlikowski <mpawlikowsk1@bloomberg.net>
2021-06-05 15:53:07 +03:00
danielsagi
0b90e0e43d Bugfix - Aws metadata api discovery (#455)
* fixed aws metadata bug

* added new black reformatting
2021-05-27 21:41:43 +03:00
danielsagi
65eefed721 Multiple Subscriptions Mechanism (#448)
* Add multiple subscription mechanism

* PR: address comments

* improved implementation, solved a couple of bugs, added documentation to almost the whole backend process

* added corresponding tests to the new method of the multiple subscription

* fixed linting issue

* fixed linting #2

Co-authored-by: Raito Bezarius <masterancpp@gmail.com>
2021-04-25 19:27:41 +03:00
danielsagi
599e9967e3 added pypi publish workflow (#450) 2021-04-23 14:37:31 +03:00
Tommy McCormick
5745f4a32b Add discovery for AWS metadata (#447) 2021-04-21 20:57:17 +03:00
danielsagi
1a26653007 Added Advanced Usage section to the readme, documenting azure quick scanning (#441) 2021-04-08 19:20:09 +03:00
miwithro
cdd9f9d432 Update KHV003.md (#439) 2021-03-16 17:17:55 +02:00
Simarpreet Singh
99678f3cac deps: Update github pages dependencies (#431)
Signed-off-by: Simarpreet Singh <simar@linux.com>
2021-01-17 16:03:04 +02:00
danielsagi
cdbc3dc12b Bug Fix: False Negative On AKS Hunting (#420)
* removed false negative in AzureSpnHunter when /run is disabled

* changed to use direct imported class

* fixed multiple bugs in azure spn hunting, and improved efficency

* fixed bug in cloud identification. TODO: remove the outsourcing for cloud provider

* removed unused config variable

* fixed tests to use already parsed pods as the given previous event has changed
2021-01-07 19:46:00 +02:00
Carol Valencia
d208b43532 feat: github actions to publish ecr and docker (#429)
* feat: github actions to publish ecr and docker

* test: github actions to publish ecr and docker

* chore: yaml lint github actions

* chore: yaml lint github actions

* fix: secrets envs for github action

* chore: build and push action for ecr/docker

Co-authored-by: Carol Valencia <krol3@users.noreply.github.com>
2020-12-26 21:31:53 +02:00
Itay Shakury
42250d9f62 move from master branch to main (#427) 2020-12-17 16:16:16 +02:00
danielsagi
d94d86a4c1 Created a Vulnerability Disclosure README (#423)
* Created a vulnerability disclosure readme

* Update SECURITY.md

Co-authored-by: Liz Rice <liz@lizrice.com>

* Update SECURITY.md

Co-authored-by: Liz Rice <liz@lizrice.com>

* Update SECURITY.md

Co-authored-by: Liz Rice <liz@lizrice.com>

Co-authored-by: Liz Rice <liz@lizrice.com>
2020-12-17 15:16:28 +02:00
danielsagi
a1c2c3ee3e Updated kramdown (#424)
Updated kramdown to a newer patched version, the old version was not patched to CVE-2020-14001
2020-12-17 11:50:02 +00:00
danielsagi
6aeee7f49d Improvements and bug fixed in Release workflow (#425)
* changed ubuntu to an older version, for compatibility reasons with glibc on pyinstaller steps and added a step to parse the release tag

* removed parsing of release tag

* changed flow name

* removed 'release' from the release name
2020-12-08 21:46:24 +02:00
danielsagi
f95df8172b added a release workflow for a linux binary (#421) 2020-12-04 13:45:03 +02:00
danielsagi
a3ad928f29 Bug Fix: Pyinstaller prettytable error (#419)
* added specific problematic hooks folder for when compiling with pyinstaller. added a fix for prettytable import

* fixed typo

* lint fix
2020-12-04 13:43:37 +02:00
danielsagi
22d6676e08 Removed Travis and Greetings workflows (#415)
* removed greetings workflow, and travis

* Update the build status badge to point to Github Actions
2020-12-04 13:42:38 +02:00
danielsagi
b9e0ef30e8 Removed Old Dependency For CAP_NET_RAW (#416)
* removed old dependency for cap_net_raw, by stop usage of tracerouting when running as a pod

* removed unused imports
2020-12-03 17:11:18 +02:00
RDxR10
693d668d0a Update apiserver.py (#397)
* Update apiserver.py

Added description of KHV007

* fixed linting issues

Co-authored-by: danielsagi <danielsagi2009@gmail.com>
2020-11-28 19:41:06 +02:00
RDxR10
2e4684658f Update certificates.py (#398)
* Update certificates.py

Regex expression update for email

* fixed linting issues

Co-authored-by: danielsagi <danielsagi2009@gmail.com>
2020-11-28 18:55:14 +02:00
Hugo van Kemenade
f5e8b14818 Migrate tests to GitHub Actions (#395) (#399)
Co-authored-by: danielsagi <danielsagi2009@gmail.com>
2020-11-28 17:34:30 +02:00
danielsagi
05094a9415 Fix lint comments (#414)
* removed unused get query to port forward

* moved existing code to comments

Co-authored-by: Liz Rice <liz@lizrice.com>
2020-11-28 17:16:57 +02:00
danielsagi
8acedf2e7d updated screenshot of aqua's site (#412) 2020-11-27 16:04:38 +02:00
danielsagi
14ca1b8bce Fixed false positive on test_run_handler (#411)
* fixed wrong check on test run handler

* changed method of testing to be using 404 with real post method
2020-11-19 17:41:33 +02:00
danielsagi
5a578fd8ab More intuitive message when ProveSystemLogs fails (#409)
* fixed wrong message for when proving audit logs

* fixed linting
2020-11-18 11:35:13 +02:00
danielsagi
bf7023d01c Added docs for exposed pods (#407)
* added doc _kb for exposed pods

* correlated the new khv to the Exposed pods vulnerability

* fixed linting
2020-11-17 15:22:06 +02:00
danielsagi
d7168af7d5 Change KB links to avd (#406)
* changed link to point to avd

* changed kb_links to be on base report module. and updated to point to avd. now json output returns the full avd url to the vulnerability

* switched to adding a new avd_reference instead of changed the VID

* added newline to fix linting
2020-11-17 14:03:18 +02:00
Hugo van Kemenade
35873baa12 Upgrade syntax for supported Python versions (#394) (#401)
Co-authored-by: danielsagi <danielsagi2009@gmail.com>
2020-11-16 20:40:28 +02:00
Sinith
a476d9383f Update KHV005.md (#403) 2020-11-08 18:42:41 +02:00
Hugo van Kemenade
6a3c7a885a Support Python 3.9 (#393) (#400)
Co-authored-by: danielsagi <danielsagi2009@gmail.com>
2020-11-07 15:59:44 +02:00
A N U S H
b6be309651 Added Greeting Github Actions (#382)
* Added Greeting Github Actions

* feat: Updated the Message

Co-authored-by: danielsagi <danielsagi2009@gmail.com>
2020-11-07 15:16:14 +02:00
Monish Singh
0d5b3d57d3 added the link of contribution page (#383)
* added the link of contribution page

users can directly go to the contribution page from here after reading the readme file

* added it to the table of contents

* Done

sorry for my prev. mistake, now its fixed.

Co-authored-by: danielsagi <danielsagi2009@gmail.com>
2020-11-07 15:07:39 +02:00
Milind Chawre
69057acf9b Adding --log-file option (#329) (#387) 2020-11-07 15:01:30 +02:00
Itay Shakury
e63200139e fix azure spn hunter (#372)
* fix azure spn hunter

* fix issues

* restore tests

* code style

Co-authored-by: danielsagi <danielsagi2009@gmail.com>
2020-10-19 13:53:50 +03:00
Itay Shakury
ad4cfe1c11 update gitignore (#371)
Co-authored-by: danielsagi <danielsagi2009@gmail.com>
2020-10-19 13:03:46 +03:00
Zoltán Reegn
24b5a709ad Increase evidence field length in plain report (#385)
Given that the Description tends to go over 100 characters as well, it
seems appropriate to loosen the restriction of the evidence field.

Fixes #111

Co-authored-by: danielsagi <danielsagi2009@gmail.com>
2020-10-19 12:49:43 +03:00
Jeff Rescignano
9cadc0ee41 Optimize images (#389) 2020-10-19 12:27:22 +03:00
danielsagi
3950a1c2f2 Fixed bug in etcd hunting (#364)
* fixed etcd version hunting typo

* changed self.protocol in other places on etcd hunting. this is a typo, protocol is a property of events, not hunters

Co-authored-by: Daniel Sagi <daniel@example.com>
Co-authored-by: Liz Rice <liz@lizrice.com>
2020-09-04 13:28:03 +01:00
Sanka Sathyaji
7530e6fee3 Update job.yml for Kubernetes cluster jobs (#367)
Existing job.yml has wrong command for command ["python", "kube-hunter,py"]. But it should change to command ["kube-hunter"]

Co-authored-by: Liz Rice <liz@lizrice.com>
2020-09-04 12:15:24 +01:00
danielsagi
72ae8c0719 reformatted files to pass new linting (#369)
Co-authored-by: Daniel Sagi <daniel@example.com>
2020-09-04 12:01:16 +01:00
danielsagi
b341124c20 Fixed bug in certificate hunting (#365)
* striping was incorrect due to multiple newlines in certificate returned from ssl.get_server_certificate

* changed ' to " for linting

Co-authored-by: Daniel Sagi <daniel@example.com>
2020-09-03 15:06:51 +01:00
danielsagi
3e06647b4c Added multistage build for Dockerfile (#362)
* removed unnecessary files from final image, using multistaged build

* added ebtables and tcpdump packages to multistage

Co-authored-by: Daniel Sagi <daniel@example.com>
2020-08-21 14:42:02 +03:00
danielsagi
cd1f79a658 fixed typo (#363) 2020-08-14 19:09:06 +03:00
115 changed files with 2570 additions and 985 deletions

View File

@@ -1,5 +1,5 @@
[flake8]
ignore = E203, E266, E501, W503, B903, T499
ignore = E203, E266, E501, W503, B903, T499, B020
max-line-length = 120
max-complexity = 18
select = B,C,E,F,W,B9,T4

View File

@@ -7,7 +7,7 @@
Please include a summary of the change and which issue is fixed. Also include relevant motivation and context. List any dependencies that are required for this change.
## Contribution Guidelines
Please Read through the [Contribution Guidelines](https://github.com/aquasecurity/kube-hunter/blob/master/CONTRIBUTING.md).
Please Read through the [Contribution Guidelines](https://github.com/aquasecurity/kube-hunter/blob/main/CONTRIBUTING.md).
## Fixed Issues

14
.github/workflows/lint.yml vendored Normal file
View File

@@ -0,0 +1,14 @@
---
name: Lint
on: [push, pull_request]
jobs:
build:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- uses: pre-commit/action@v2.0.0
- uses: ibiqlik/action-yamllint@v3

95
.github/workflows/publish.yml vendored Normal file
View File

@@ -0,0 +1,95 @@
---
name: Publish
on:
push:
tags:
- "v*"
env:
ALIAS: aquasecurity
REP: kube-hunter
jobs:
dockerhub:
name: Publish To Docker Hub
runs-on: ubuntu-18.04
steps:
- name: Check Out Repo
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
- name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildxarch-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildxarch-
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to ECR
uses: docker/login-action@v1
with:
registry: public.ecr.aws
username: ${{ secrets.ECR_ACCESS_KEY_ID }}
password: ${{ secrets.ECR_SECRET_ACCESS_KEY }}
- name: Get version
id: get_version
uses: crazy-max/ghaction-docker-meta@v3
with:
images: ${{ env.REP }}
tag-semver: |
{{version}}
- name: Build and push - Docker/ECR
id: docker_build
uses: docker/build-push-action@v2
with:
context: .
platforms: linux/amd64
builder: ${{ steps.buildx.outputs.name }}
push: true
tags: |
${{ secrets.DOCKERHUB_USER }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}
public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}
${{ secrets.DOCKERHUB_USER }}/${{ env.REP }}:latest
public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:latest
cache-from: type=local,src=/tmp/.buildx-cache/release
cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/release
- name: Image digest
run: echo ${{ steps.docker_build.outputs.digest }}
pypi:
name: Publish To PyPI
runs-on: ubuntu-18.04
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.9'
- name: Install dependencies
shell: bash
run: |
pip install -U pip
make deps
- name: Build project
shell: bash
run: |
python -m pip install wheel
make build
- name: Publish distribution package to PyPI
if: startsWith(github.ref, 'refs/tags')
uses: pypa/gh-action-pypi-publish@master
with:
password: ${{ secrets.PYPI_API_TOKEN }}

55
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
---
on:
push:
# Sequence of patterns matched against refs/tags
tags:
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
name: Release
jobs:
build:
name: Upload Release Asset
runs-on: ubuntu-18.04
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.8'
- name: Install dependencies
shell: bash
run: |
pip install -U pip
pip install pyinstaller
make deps
- name: Build project
shell: bash
run: |
make pyinstaller
- name: Create Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: ${{ github.ref }}
draft: false
prerelease: false
- name: Upload Release Asset
id: upload-release-asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./dist/kube-hunter
asset_name: kube-hunter-linux-x86_64-${{ github.ref }}
asset_content_type: application/octet-stream

55
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
---
name: Test
on: [push, pull_request]
env:
FORCE_COLOR: 1
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
python-version: ["3.6", "3.7", "3.8", "3.9"]
os: [ubuntu-20.04, ubuntu-18.04]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Get pip cache dir
id: pip-cache
run: |
echo "::set-output name=dir::$(pip cache dir)"
- name: Cache
uses: actions/cache@v2
with:
path: ${{ steps.pip-cache.outputs.dir }}
key:
${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('requirements-dev.txt') }}
restore-keys: |
${{ matrix.os }}-${{ matrix.python-version }}-
- name: Install dependencies
shell: bash
run: |
pip install -U pip
make dev-deps
make install
- name: Test
shell: bash
run: |
make test
- name: Upload coverage
uses: codecov/codecov-action@v1
with:
name: ${{ matrix.os }} Python ${{ matrix.python-version }}

1
.gitignore vendored
View File

@@ -24,6 +24,7 @@ var/
*.egg
*.spec
.eggs
pip-wheel-metadata
# Directory Cache Files
.DS_Store

View File

@@ -1,10 +1,11 @@
---
repos:
- repo: https://github.com/psf/black
rev: stable
hooks:
- id: black
- repo: https://gitlab.com/pycqa/flake8
rev: 3.7.9
hooks:
- id: flake8
additional_dependencies: [flake8-bugbear]
- repo: https://github.com/psf/black
rev: stable
hooks:
- id: black
- repo: https://gitlab.com/pycqa/flake8
rev: 3.7.9
hooks:
- id: flake8
additional_dependencies: [flake8-bugbear]

View File

@@ -1,20 +0,0 @@
group: travis_latest
language: python
cache: pip
python:
- "3.6"
- "3.7"
- "3.8"
install:
- pip install -r requirements.txt
- pip install -r requirements-dev.txt
before_script:
- make lint-check
script:
- make test
after_success:
- bash <(curl -s https://codecov.io/bash)
notifications:
email:
on_success: change
on_failure: always

6
.yamllint Normal file
View File

@@ -0,0 +1,6 @@
---
extends: default
rules:
line-length: disable
truthy: disable

View File

@@ -16,4 +16,17 @@ RUN make deps
COPY . .
RUN make install
FROM python:3.8-alpine
RUN apk add --no-cache \
tcpdump \
ebtables && \
apk upgrade --no-cache
COPY --from=builder /usr/local/lib/python3.8/site-packages /usr/local/lib/python3.8/site-packages
COPY --from=builder /usr/local/bin/kube-hunter /usr/local/bin/kube-hunter
# Add default plugins: https://github.com/aquasecurity/kube-hunter-plugins
RUN pip install kube-hunter-arp-spoof>=0.0.3 kube-hunter-dns-spoof>=0.0.3
ENTRYPOINT ["kube-hunter"]

BIN
MITRE.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 115 KiB

View File

@@ -31,7 +31,7 @@ lint-check:
.PHONY: test
test:
pytest
python -m pytest
.PHONY: build
build:

143
README.md
View File

@@ -1,42 +1,56 @@
![kube-hunter](https://github.com/aquasecurity/kube-hunter/blob/master/kube-hunter.png)
[![Build Status](https://travis-ci.org/aquasecurity/kube-hunter.svg?branch=master)](https://travis-ci.org/aquasecurity/kube-hunter)
[![codecov](https://codecov.io/gh/aquasecurity/kube-hunter/branch/master/graph/badge.svg)](https://codecov.io/gh/aquasecurity/kube-hunter)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
[![License](https://img.shields.io/github/license/aquasecurity/kube-hunter)](https://github.com/aquasecurity/kube-hunter/blob/master/LICENSE)
[![Docker image](https://images.microbadger.com/badges/image/aquasec/kube-hunter.svg)](https://microbadger.com/images/aquasec/kube-hunter "Get your own image badge on microbadger.com")
## Notice
kube-hunter is not under active development anymore. If you're interested in scanning Kubernetes clusters for known vulnerabilities, we recommend using [Trivy](https://github.com/aquasecurity/trivy). Specifically, Trivy's Kubernetes [misconfiguration scanning](https://blog.aquasec.com/trivy-kubernetes-cis-benchmark-scanning) and [KBOM vulnerability scanning](https://blog.aquasec.com/scanning-kbom-for-vulnerabilities-with-trivy). Learn more in the [Trivy Docs](https://aquasecurity.github.io/trivy/).
---
kube-hunter hunts for security weaknesses in Kubernetes clusters. The tool was developed to increase awareness and visibility for security issues in Kubernetes environments. **You should NOT run kube-hunter on a Kubernetes cluster that you don't own!**
**Run kube-hunter**: kube-hunter is available as a container (aquasec/kube-hunter), and we also offer a web site at [kube-hunter.aquasec.com](https://kube-hunter.aquasec.com) where you can register online to receive a token allowing you to see and share the results online. You can also run the Python code yourself as described below.
**Explore vulnerabilities**: The kube-hunter knowledge base includes articles about discoverable vulnerabilities and issues. When kube-hunter reports an issue, it will show its VID (Vulnerability ID) so you can look it up in the KB at https://aquasecurity.github.io/kube-hunter/
**Explore vulnerabilities**: The kube-hunter knowledge base includes articles about discoverable vulnerabilities and issues. When kube-hunter reports an issue, it will show its VID (Vulnerability ID) so you can look it up in the KB at https://aquasecurity.github.io/kube-hunter/
_If you're interested in kube-hunter's integration with the Kubernetes ATT&CK Matrix [Continue Reading](#kuberentes-attck-matrix)_
**Contribute**: We welcome contributions, especially new hunter modules that perform additional tests. If you would like to develop your modules please read [Guidelines For Developing Your First kube-hunter Module](https://github.com/aquasecurity/kube-hunter/blob/master/CONTRIBUTING.md).
[kube-hunter demo video](https://youtu.be/s2-6rTkH8a8?t=57s)
[![kube-hunter demo video](https://github.com/aquasecurity/kube-hunter/blob/master/kube-hunter-screenshot.png)](https://youtu.be/s2-6rTkH8a8?t=57s)
## Table of Contents
Table of Contents
=================
- [Table of Contents](#table-of-contents)
- [Kubernetes ATT&CK Matrix](#kubernetes-attck-matrix)
- [Hunting](#hunting)
- [Where should I run kube-hunter?](#where-should-i-run-kube-hunter)
- [Scanning options](#scanning-options)
- [Authentication](#authentication)
- [Active Hunting](#active-hunting)
- [List of tests](#list-of-tests)
- [Nodes Mapping](#nodes-mapping)
- [Output](#output)
- [Dispatching](#dispatching)
- [Advanced Usage](#advanced-usage)
- [Azure Quick Scanning](#azure-quick-scanning)
- [Custom Hunting](#custom-hunting)
- [Deployment](#deployment)
- [On Machine](#on-machine)
- [Prerequisites](#prerequisites)
- [Install with pip](#install-with-pip)
- [Run from source](#run-from-source)
- [Container](#container)
- [Pod](#pod)
- [Contribution](#contribution)
- [License](#license)
## Kubernetes ATT&CK Matrix
kube-hunter now supports the new format of the Kubernetes ATT&CK matrix.
While kube-hunter's vulnerabilities are a collection of creative techniques designed to mimic an attacker in the cluster (or outside it)
The Mitre's ATT&CK defines a more general standardised categories of techniques to do so.
You can think of kube-hunter vulnerabilities as small steps for an attacker, which follows the track of a more general technique he would aim for.
Most of kube-hunter's hunters and vulnerabilities can closly fall under those techniques, That's why we moved to follow the Matrix standard.
_Some kube-hunter vulnerabities which we could not map to Mitre technique, are prefixed with the `General` keyword_
![kube-hunter](./MITRE.png)
* [Hunting](#hunting)
* [Where should I run kube-hunter?](#where-should-i-run-kube-hunter)
* [Scanning options](#scanning-options)
* [Active Hunting](#active-hunting)
* [List of tests](#list-of-tests)
* [Nodes Mapping](#nodes-mapping)
* [Output](#output)
* [Dispatching](#dispatching)
* [Deployment](#deployment)
* [On Machine](#on-machine)
* [Prerequisites](#prerequisites)
* [Container](#container)
* [Pod](#pod)
## Hunting
### Where should I run kube-hunter?
There are three different ways to run kube-hunter, each providing a different approach to detecting weaknesses in your cluster:
@@ -45,7 +59,8 @@ Run kube-hunter on any machine (including your laptop), select Remote scanning a
You can run kube-hunter directly on a machine in the cluster, and select the option to probe all the local network interfaces.
You can also run kube-hunter in a pod within the cluster. This indicates how exposed your cluster would be if one of your application pods is compromised (through a software vulnerability, for example).
You can also run kube-hunter in a pod within the cluster. This indicates how exposed your cluster would be if one of your application pods is compromised (through a software vulnerability, for example). (_`--pod` flag_)
### Scanning options
@@ -68,6 +83,26 @@ To specify interface scanning, you can use the `--interface` option (this will s
To specify a specific CIDR to scan, use the `--cidr` option. Example:
`kube-hunter --cidr 192.168.0.0/24`
4. **Kubernetes node auto-discovery**
Set `--k8s-auto-discover-nodes` flag to query Kubernetes for all nodes in the cluster, and then attempt to scan them all. By default, it will use [in-cluster config](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) to connect to the Kubernetes API. If you'd like to use an explicit kubeconfig file, set `--kubeconfig /location/of/kubeconfig/file`.
Also note, that this is always done when using `--pod` mode.
### Authentication
In order to mimic an attacker in it's early stages, kube-hunter requires no authentication for the hunt.
* **Impersonate** - You can provide kube-hunter with a specific service account token to use when hunting by manually passing the JWT Bearer token of the service-account secret with the `--service-account-token` flag.
Example:
```bash
$ kube-hunter --active --service-account-token eyJhbGciOiJSUzI1Ni...
```
* When runing with `--pod` flag, kube-hunter uses the service account token [mounted inside the pod](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/) to authenticate to services it finds during the hunt.
* if specified, `--service-account-token` flag takes priority when running as a pod
### Active Hunting
Active hunting is an option in which kube-hunter will exploit vulnerabilities it finds, to explore for further vulnerabilities.
@@ -107,6 +142,49 @@ Available dispatch methods are:
* KUBEHUNTER_HTTP_DISPATCH_URL (defaults to: https://localhost)
* KUBEHUNTER_HTTP_DISPATCH_METHOD (defaults to: POST)
## Advanced Usage
### Azure Quick Scanning
When running **as a Pod in an Azure or AWS environment**, kube-hunter will fetch subnets from the Instance Metadata Service. Naturally this makes the discovery process take longer.
To hardlimit subnet scanning to a `/24` CIDR, use the `--quick` option.
### Custom Hunting
Custom hunting enables advanced users to have control over what hunters gets registered at the start of a hunt.
**If you know what you are doing**, this can help if you want to adjust kube-hunter's hunting and discovery process for your needs.
Example:
```
kube-hunter --custom <HunterName1> <HunterName2>
```
Enabling Custom hunting removes all hunters from the hunting process, except the given whitelisted hunters.
The `--custom` flag reads a list of hunters class names, in order to view all of kube-hunter's class names, you can combine the flag `--raw-hunter-names` with the `--list` flag.
Example:
```
kube-hunter --active --list --raw-hunter-names
```
**Notice**: Due to kube-huner's architectural design, the following "Core Hunters/Classes" will always register (even when using custom hunting):
* HostDiscovery
* _Generates ip addresses for the hunt by given configurations_
* _Automatically discovers subnets using cloud Metadata APIs_
* FromPodHostDiscovery
* _Auto discover attack surface ip addresses for the hunt by using Pod based environment techniques_
* _Automatically discovers subnets using cloud Metadata APIs_
* PortDiscovery
* _Port scanning given ip addresses for known kubernetes services ports_
* Collector
* _Collects discovered vulnerabilities and open services for future report_
* StartedInfo
* _Prints the start message_
* SendFullReport
* _Dispatching the report based on given configurations_
## Deployment
There are three methods for deploying kube-hunter:
@@ -152,7 +230,7 @@ python3 kube_hunter
_If you want to use pyinstaller/py2exe you need to first run the install_imports.py script._
### Container
Aqua Security maintains a containerized version of kube-hunter at `aquasec/kube-hunter`. This container includes this source code, plus an additional (closed source) reporting plugin for uploading results into a report that can be viewed at [kube-hunter.aquasec.com](https://kube-hunter.aquasec.com). Please note, that running the `aquasec/kube-hunter` container and uploading reports data are subject to additional [terms and conditions](https://kube-hunter.aquasec.com/eula.html).
Aqua Security maintains a containerized version of kube-hunter at `aquasec/kube-hunter:aqua`. This container includes this source code, plus an additional (closed source) reporting plugin for uploading results into a report that can be viewed at [kube-hunter.aquasec.com](https://kube-hunter.aquasec.com). Please note, that running the `aquasec/kube-hunter` container and uploading reports data are subject to additional [terms and conditions](https://kube-hunter.aquasec.com/eula.html).
The Dockerfile in this repository allows you to build a containerized version without the reporting plugin.
@@ -174,5 +252,8 @@ The example `job.yaml` file defines a Job that will run kube-hunter in a pod, us
* Find the pod name with `kubectl describe job kube-hunter`
* View the test results with `kubectl logs <pod name>`
## Contribution
To read the contribution guidelines, <a href="https://github.com/aquasecurity/kube-hunter/blob/main/CONTRIBUTING.md"> Click here </a>
## License
This repository is available under the [Apache License 2.0](https://github.com/aquasecurity/kube-hunter/blob/master/LICENSE).
This repository is available under the [Apache License 2.0](https://github.com/aquasecurity/kube-hunter/blob/main/LICENSE).

17
SECURITY.md Normal file
View File

@@ -0,0 +1,17 @@
# Security Policy
## Supported Versions
| Version | Supported |
| --------- | ------------------ |
| 0.4.x | :white_check_mark: |
| 0.3.x | :white_check_mark: |
## Reporting a Vulnerability
We encourage you to find vulnerabilities in kube-hunter.
The process is simple, just report a Bug issue. and we will take a look at this.
If you prefer to disclose privately, you can write to one of the security maintainers at:
| Name | Email |
| ----------- | ------------------ |
| Daniel Sagi | daniel.sagi@aquasec.com |

View File

@@ -1,11 +1,12 @@
GEM
remote: https://rubygems.org/
specs:
activesupport (4.2.11.1)
i18n (~> 0.7)
activesupport (6.0.3.4)
concurrent-ruby (~> 1.0, >= 1.0.2)
i18n (>= 0.7, < 2)
minitest (~> 5.1)
thread_safe (~> 0.3, >= 0.3.4)
tzinfo (~> 1.1)
zeitwerk (~> 2.2, >= 2.2.2)
addressable (2.7.0)
public_suffix (>= 2.0.2, < 5.0)
coffee-script (2.4.1)
@@ -15,65 +16,67 @@ GEM
colorator (1.1.0)
commonmarker (0.17.13)
ruby-enum (~> 0.5)
concurrent-ruby (1.1.5)
dnsruby (1.61.3)
addressable (~> 2.5)
em-websocket (0.5.1)
concurrent-ruby (1.1.7)
dnsruby (1.61.5)
simpleidn (~> 0.1)
em-websocket (0.5.2)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0.6.0)
ethon (0.12.0)
ffi (>= 1.3.0)
eventmachine (1.2.7)
execjs (2.7.0)
faraday (0.17.0)
faraday (1.3.0)
faraday-net_http (~> 1.0)
multipart-post (>= 1.2, < 3)
ffi (1.11.1)
ruby2_keywords
faraday-net_http (1.0.1)
ffi (1.14.2)
forwardable-extended (2.6.0)
gemoji (3.0.1)
github-pages (201)
activesupport (= 4.2.11.1)
github-pages (209)
github-pages-health-check (= 1.16.1)
jekyll (= 3.8.5)
jekyll-avatar (= 0.6.0)
jekyll (= 3.9.0)
jekyll-avatar (= 0.7.0)
jekyll-coffeescript (= 1.1.1)
jekyll-commonmark-ghpages (= 0.1.6)
jekyll-default-layout (= 0.1.4)
jekyll-feed (= 0.11.0)
jekyll-feed (= 0.15.1)
jekyll-gist (= 1.5.0)
jekyll-github-metadata (= 2.12.1)
jekyll-mentions (= 1.4.1)
jekyll-optional-front-matter (= 0.3.0)
jekyll-github-metadata (= 2.13.0)
jekyll-mentions (= 1.6.0)
jekyll-optional-front-matter (= 0.3.2)
jekyll-paginate (= 1.1.0)
jekyll-readme-index (= 0.2.0)
jekyll-redirect-from (= 0.14.0)
jekyll-relative-links (= 0.6.0)
jekyll-remote-theme (= 0.4.0)
jekyll-readme-index (= 0.3.0)
jekyll-redirect-from (= 0.16.0)
jekyll-relative-links (= 0.6.1)
jekyll-remote-theme (= 0.4.2)
jekyll-sass-converter (= 1.5.2)
jekyll-seo-tag (= 2.5.0)
jekyll-sitemap (= 1.2.0)
jekyll-swiss (= 0.4.0)
jekyll-seo-tag (= 2.6.1)
jekyll-sitemap (= 1.4.0)
jekyll-swiss (= 1.0.0)
jekyll-theme-architect (= 0.1.1)
jekyll-theme-cayman (= 0.1.1)
jekyll-theme-dinky (= 0.1.1)
jekyll-theme-hacker (= 0.1.1)
jekyll-theme-hacker (= 0.1.2)
jekyll-theme-leap-day (= 0.1.1)
jekyll-theme-merlot (= 0.1.1)
jekyll-theme-midnight (= 0.1.1)
jekyll-theme-minimal (= 0.1.1)
jekyll-theme-modernist (= 0.1.1)
jekyll-theme-primer (= 0.5.3)
jekyll-theme-primer (= 0.5.4)
jekyll-theme-slate (= 0.1.1)
jekyll-theme-tactile (= 0.1.1)
jekyll-theme-time-machine (= 0.1.1)
jekyll-titles-from-headings (= 0.5.1)
jemoji (= 0.10.2)
kramdown (= 1.17.0)
liquid (= 4.0.0)
listen (= 3.1.5)
jekyll-titles-from-headings (= 0.5.3)
jemoji (= 0.12.0)
kramdown (= 2.3.0)
kramdown-parser-gfm (= 1.1.0)
liquid (= 4.0.3)
mercenary (~> 0.3)
minima (= 2.5.0)
minima (= 2.5.1)
nokogiri (>= 1.10.4, < 2.0)
rouge (= 3.11.0)
rouge (= 3.23.0)
terminal-table (~> 1.4)
github-pages-health-check (1.16.1)
addressable (~> 2.3)
@@ -81,27 +84,27 @@ GEM
octokit (~> 4.0)
public_suffix (~> 3.0)
typhoeus (~> 1.3)
html-pipeline (2.12.0)
html-pipeline (2.14.0)
activesupport (>= 2)
nokogiri (>= 1.4)
http_parser.rb (0.6.0)
i18n (0.9.5)
concurrent-ruby (~> 1.0)
jekyll (3.8.5)
jekyll (3.9.0)
addressable (~> 2.4)
colorator (~> 1.0)
em-websocket (~> 0.5)
i18n (~> 0.7)
jekyll-sass-converter (~> 1.0)
jekyll-watch (~> 2.0)
kramdown (~> 1.14)
kramdown (>= 1.17, < 3)
liquid (~> 4.0)
mercenary (~> 0.3.3)
pathutil (~> 0.9)
rouge (>= 1.7, < 4)
safe_yaml (~> 1.0)
jekyll-avatar (0.6.0)
jekyll (~> 3.0)
jekyll-avatar (0.7.0)
jekyll (>= 3.0, < 5.0)
jekyll-coffeescript (1.1.1)
coffee-script (~> 2.2)
coffee-script-source (~> 1.11.1)
@@ -114,36 +117,37 @@ GEM
rouge (>= 2.0, < 4.0)
jekyll-default-layout (0.1.4)
jekyll (~> 3.0)
jekyll-feed (0.11.0)
jekyll (~> 3.3)
jekyll-feed (0.15.1)
jekyll (>= 3.7, < 5.0)
jekyll-gist (1.5.0)
octokit (~> 4.2)
jekyll-github-metadata (2.12.1)
jekyll (~> 3.4)
jekyll-github-metadata (2.13.0)
jekyll (>= 3.4, < 5.0)
octokit (~> 4.0, != 4.4.0)
jekyll-mentions (1.4.1)
jekyll-mentions (1.6.0)
html-pipeline (~> 2.3)
jekyll (~> 3.0)
jekyll-optional-front-matter (0.3.0)
jekyll (~> 3.0)
jekyll (>= 3.7, < 5.0)
jekyll-optional-front-matter (0.3.2)
jekyll (>= 3.0, < 5.0)
jekyll-paginate (1.1.0)
jekyll-readme-index (0.2.0)
jekyll (~> 3.0)
jekyll-redirect-from (0.14.0)
jekyll (~> 3.3)
jekyll-relative-links (0.6.0)
jekyll (~> 3.3)
jekyll-remote-theme (0.4.0)
jekyll-readme-index (0.3.0)
jekyll (>= 3.0, < 5.0)
jekyll-redirect-from (0.16.0)
jekyll (>= 3.3, < 5.0)
jekyll-relative-links (0.6.1)
jekyll (>= 3.3, < 5.0)
jekyll-remote-theme (0.4.2)
addressable (~> 2.0)
jekyll (~> 3.5)
rubyzip (>= 1.2.1, < 3.0)
jekyll (>= 3.5, < 5.0)
jekyll-sass-converter (>= 1.0, <= 3.0.0, != 2.0.0)
rubyzip (>= 1.3.0, < 3.0)
jekyll-sass-converter (1.5.2)
sass (~> 3.4)
jekyll-seo-tag (2.5.0)
jekyll (~> 3.3)
jekyll-sitemap (1.2.0)
jekyll (~> 3.3)
jekyll-swiss (0.4.0)
jekyll-seo-tag (2.6.1)
jekyll (>= 3.3, < 5.0)
jekyll-sitemap (1.4.0)
jekyll (>= 3.7, < 5.0)
jekyll-swiss (1.0.0)
jekyll-theme-architect (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
@@ -153,8 +157,8 @@ GEM
jekyll-theme-dinky (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-theme-hacker (0.1.1)
jekyll (~> 3.5)
jekyll-theme-hacker (0.1.2)
jekyll (> 3.5, < 5.0)
jekyll-seo-tag (~> 2.0)
jekyll-theme-leap-day (0.1.1)
jekyll (~> 3.5)
@@ -171,8 +175,8 @@ GEM
jekyll-theme-modernist (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-theme-primer (0.5.3)
jekyll (~> 3.5)
jekyll-theme-primer (0.5.4)
jekyll (> 3.5, < 5.0)
jekyll-github-metadata (~> 2.9)
jekyll-seo-tag (~> 2.0)
jekyll-theme-slate (0.1.1)
@@ -184,43 +188,49 @@ GEM
jekyll-theme-time-machine (0.1.1)
jekyll (~> 3.5)
jekyll-seo-tag (~> 2.0)
jekyll-titles-from-headings (0.5.1)
jekyll (~> 3.3)
jekyll-titles-from-headings (0.5.3)
jekyll (>= 3.3, < 5.0)
jekyll-watch (2.2.1)
listen (~> 3.0)
jemoji (0.10.2)
jemoji (0.12.0)
gemoji (~> 3.0)
html-pipeline (~> 2.2)
jekyll (~> 3.0)
kramdown (1.17.0)
liquid (4.0.0)
listen (3.1.5)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
ruby_dep (~> 1.2)
jekyll (>= 3.0, < 5.0)
kramdown (2.3.0)
rexml (>= 3.2.5)
kramdown-parser-gfm (1.1.0)
kramdown (>= 2.3.1)
liquid (4.0.3)
listen (3.4.0)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
mercenary (0.3.6)
mini_portile2 (2.4.0)
minima (2.5.0)
jekyll (~> 3.5)
mini_portile2 (2.5.0)
minima (2.5.1)
jekyll (>= 3.5, < 5.0)
jekyll-feed (~> 0.9)
jekyll-seo-tag (~> 2.1)
minitest (5.12.2)
minitest (5.14.3)
multipart-post (2.1.1)
nokogiri (1.10.8)
mini_portile2 (~> 2.4.0)
octokit (4.14.0)
nokogiri (>= 1.11.4)
mini_portile2 (~> 2.5.0)
racc (~> 1.4)
octokit (4.20.0)
faraday (>= 0.9)
sawyer (~> 0.8.0, >= 0.5.3)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (3.1.1)
rb-fsevent (0.10.3)
rb-inotify (0.10.0)
racc (1.5.2)
rb-fsevent (0.10.4)
rb-inotify (0.10.1)
ffi (~> 1.0)
rouge (3.11.0)
ruby-enum (0.7.2)
rexml (3.2.4)
rouge (3.23.0)
ruby-enum (0.8.0)
i18n
ruby_dep (1.5.0)
rubyzip (2.0.0)
ruby2_keywords (0.0.2)
rubyzip (2.3.0)
safe_yaml (1.0.5)
sass (3.7.4)
sass-listen (~> 4.0.0)
@@ -230,14 +240,20 @@ GEM
sawyer (0.8.2)
addressable (>= 2.3.5)
faraday (> 0.8, < 2.0)
simpleidn (0.1.1)
unf (~> 0.1.4)
terminal-table (1.8.0)
unicode-display_width (~> 1.1, >= 1.1.1)
thread_safe (0.3.6)
typhoeus (1.3.1)
typhoeus (1.4.0)
ethon (>= 0.9.0)
tzinfo (1.2.5)
tzinfo (1.2.9)
thread_safe (~> 0.1)
unicode-display_width (1.6.0)
unf (0.1.4)
unf_ext
unf_ext (0.0.7.7)
unicode-display_width (1.7.0)
zeitwerk (2.4.2)
PLATFORMS
ruby
@@ -247,4 +263,4 @@ DEPENDENCIES
jekyll-sitemap
BUNDLED WITH
1.17.2
2.2.5

View File

@@ -1,6 +1,7 @@
---
title: kube-hunter
description: Kube-hunter hunts for security weaknesses in Kubernetes clusters
logo: https://raw.githubusercontent.com/aquasecurity/kube-hunter/master/kube-hunter.png
logo: https://raw.githubusercontent.com/aquasecurity/kube-hunter/main/kube-hunter.png
show_downloads: false
google_analytics: UA-63272154-1
theme: jekyll-theme-minimal
@@ -10,7 +11,7 @@ collections:
defaults:
-
scope:
path: "" # an empty string here means all files in the project
path: "" # an empty string here means all files in the project
values:
layout: "default"

View File

@@ -2,6 +2,7 @@
vid: KHV002
title: Kubernetes version disclosure
categories: [Information Disclosure]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV003
title: Azure Metadata Exposure
categories: [Information Disclosure]
severity: high
---
# {{ page.vid }} - {{ page.title }}
@@ -12,7 +13,10 @@ Microsoft Azure provides an internal HTTP endpoint that exposes information from
## Remediation
Consider using AAD Pod Identity. A Microsoft project that allows scoping the identity of workloads to Kubernetes Pods instead of VMs (instances).
Starting in the 2020.10.15 Azure VHD Release, AKS restricts the pod CIDR access to that internal HTTP endpoint.
[CVE-2021-27075](https://github.com/Azure/AKS/issues/2168)
## References

View File

@@ -2,6 +2,7 @@
vid: KHV004
title: Azure SPN Exposure
categories: [Identity Theft]
severity: medium
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV005
title: Access to Kubernetes API
categories: [Information Disclosure, Unauthenticated Access]
severity: high
---
# {{ page.vid }} - {{ page.title }}
@@ -12,7 +13,7 @@ Kubernetes API was accessed with Pod Service Account or without Authentication (
## Remediation
Secure acess to your Kubernetes API.
Secure access to your Kubernetes API.
It is recommended to explicitly specify a Service Account for all of your workloads (`serviceAccountName` in `Pod.Spec`), and manage their permissions according to the least privilege principal.
@@ -21,4 +22,4 @@ Consider opting out automatic mounting of SA token using `automountServiceAccoun
## References
- [Configure Service Accounts for Pods](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
- [Configure Service Accounts for Pods](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)

View File

@@ -2,6 +2,7 @@
vid: KHV006
title: Insecure (HTTP) access to Kubernetes API
categories: [Unauthenticated Access]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV007
title: Specific Access to Kubernetes API
categories: [Access Risk]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV020
title: Possible Arp Spoof
categories: [IdentityTheft]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV021
title: Certificate Includes Email Address
categories: [Information Disclosure]
severity: low
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV022
title: Critical Privilege Escalation CVE
categories: [Privilege Escalation]
severity: critical
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV023
title: Denial of Service to Kubernetes API Server
categories: [Denial Of Service]
severity: medium
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV024
title: Possible Ping Flood Attack
categories: [Denial Of Service]
severity: medium
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV025
title: Possible Reset Flood Attack
categories: [Denial Of Service]
severity: medium
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV026
title: Arbitrary Access To Cluster Scoped Resources
categories: [PrivilegeEscalation]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV027
title: Kubectl Vulnerable To CVE-2019-11246
categories: [Remote Code Execution]
severity: medium
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV028
title: Kubectl Vulnerable To CVE-2019-1002101
categories: [Remote Code Execution]
severity: medium
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV029
title: Dashboard Exposed
categories: [Remote Code Execution]
severity: critical
---
# {{ page.vid }} - {{ page.title }}
@@ -12,4 +13,5 @@ An open Kubernetes Dashboard was detected. The Kubernetes Dashboard can be used
## Remediation
Do not leave the Dashboard insecured.
Do not leave the Dashboard insecured.

View File

@@ -2,6 +2,7 @@
vid: KHV030
title: Possible DNS Spoof
categories: [Identity Theft]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV031
title: Etcd Remote Write Access Event
categories: [Remote Code Execution]
severity: critical
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV032
title: Etcd Remote Read Access Event
categories: [Access Risk]
severity: critical
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV033
title: Etcd Remote version disclosure
categories: [Information Disclosure]
severity: medium
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV034
title: Etcd is accessible using insecure connection (HTTP)
categories: [Unauthenticated Access]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV036
title: Anonymous Authentication
categories: [Remote Code Execution]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV037
title: Exposed Container Logs
categories: [Information Disclosure]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV038
title: Exposed Running Pods
categories: [Information Disclosure]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV039
title: Exposed Exec On Container
categories: [Remote Code Execution]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV040
title: Exposed Run Inside Container
categories: [Remote Code Execution]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV041
title: Exposed Port Forward
categories: [Remote Code Execution]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV042
title: Exposed Attaching To Container
categories: [Remote Code Execution]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV043
title: Cluster Health Disclosure
categories: [Information Disclosure]
severity: low
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV044
title: Privileged Container
categories: [Access Risk]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV045
title: Exposed System Logs
categories: [Information Disclosure]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV046
title: Exposed Kubelet Cmdline
categories: [Information Disclosure]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV047
title: Pod With Mount To /var/log
categories: [Privilege Escalation]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV049
title: kubectl proxy Exposed
categories: [Information Disclosure]
severity: high
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV050
title: Read access to Pod service account token
categories: [Access Risk]
severity: medium
---
# {{ page.vid }} - {{ page.title }}

View File

@@ -2,6 +2,7 @@
vid: KHV051
title: Exposed Existing Privileged Containers Via Secure Kubelet Port
categories: [Access Risk]
severity: high
---
# {{ page.vid }} - {{ page.title }}

24
docs/_kb/KHV052.md Normal file
View File

@@ -0,0 +1,24 @@
---
vid: KHV052
title: Exposed Pods
categories: [Information Disclosure]
severity: medium
---
# {{ page.vid }} - {{ page.title }}
## Issue description
An attacker could view sensitive information about pods that are bound to a Node using the exposed /pods endpoint
This can be done either by accessing the readonly port (default 10255), or from the secure kubelet port (10250)
## Remediation
Ensure kubelet is protected using `--anonymous-auth=false` kubelet flag. Allow only legitimate users using `--client-ca-file` or `--authentication-token-webhook` kubelet flags. This is usually done by the installer or cloud provider.
Disable the readonly port by using `--read-only-port=0` kubelet flag.
## References
- [Kubelet configuration](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
- [Kubelet authentication/authorization](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/)

25
docs/_kb/KHV053.md Normal file
View File

@@ -0,0 +1,25 @@
---
vid: KHV053
title: AWS Metadata Exposure
categories: [Information Disclosure]
severity: high
---
# {{ page.vid }} - {{ page.title }}
## Issue description
AWS EC2 provides an internal HTTP endpoint that exposes information from the cloud platform to workloads running in an instance. The endpoint is accessible to every workload running in the instance. An attacker that is able to execute a pod in the cluster may be able to query the metadata service and discover additional information about the environment.
## Remediation
* Limit access to the instance metadata service. Consider using a local firewall such as `iptables` to disable access from some or all processes/users to the instance metadata service.
* Disable the metadata service (via instance metadata options or IAM), or at a minimum enforce the use IMDSv2 on an instance to require token-based access to the service.
* Modify the HTTP PUT response hop limit on the instance to 1. This will only allow access to the service from the instance itself rather than from within a pod.
## References
- [AWS Instance Metadata service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html)
- [EC2 Instance Profiles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html)

View File

@@ -1,14 +1,17 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: kube-hunter
spec:
template:
metadata:
labels:
app: kube-hunter
spec:
containers:
- name: kube-hunter
image: aquasec/kube-hunter
command: ["python", "kube-hunter.py"]
args: ["--pod"]
- name: kube-hunter
image: aquasec/kube-hunter:0.6.8
command: ["kube-hunter"]
args: ["--pod"]
restartPolicy: Never
backoffLimit: 4

Binary file not shown.

Before

Width:  |  Height:  |  Size: 144 KiB

After

Width:  |  Height:  |  Size: 230 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

After

Width:  |  Height:  |  Size: 25 KiB

View File

@@ -76,7 +76,7 @@ in order to prevent circular dependency bug.
Following the above example, let's figure out the imports:
```python
from kube_hunter.core.types import Hunter
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import OpenPortEvent
@@ -206,7 +206,7 @@ __Make sure to return the event from the execute method, or the event will not g
For example, if you don't want to hunt services found on a localhost IP, you can create the following module, in the `kube_hunter/modules/report/`
```python
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Service, EventFilterBase
@handler.subscribe(Service)
@@ -222,7 +222,7 @@ That means other Hunters that are subscribed to this Service will not get trigge
That opens up a wide variety of possible operations, as this not only can __filter out__ events, but you can actually __change event attributes__, for example:
```python
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.types import InformationDisclosure
from kube_hunter.core.events.types import Vulnerability, EventFilterBase

View File

@@ -1,6 +1,7 @@
#!/usr/bin/env python3
# flake8: noqa: E402
from functools import partial
import logging
import threading
@@ -18,20 +19,27 @@ config = Config(
cidr=args.cidr,
include_patched_versions=args.include_patched_versions,
interface=args.interface,
log_file=args.log_file,
mapping=args.mapping,
network_timeout=args.network_timeout,
num_worker_threads=args.num_worker_threads,
pod=args.pod,
quick=args.quick,
remote=args.remote,
statistics=args.statistics,
k8s_auto_discover_nodes=args.k8s_auto_discover_nodes,
service_account_token=args.service_account_token,
kubeconfig=args.kubeconfig,
enable_cve_hunting=args.enable_cve_hunting,
custom=args.custom,
)
setup_logger(args.log)
setup_logger(args.log, args.log_file)
set_config(config)
# Running all other registered plugins before execution
pm.hook.load_plugin(args=args)
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import HuntFinished, HuntStarted
from kube_hunter.modules.discovery.hosts import RunningAsPodEvent, HostScanEvent
from kube_hunter.modules.report import get_reporter, get_dispatcher
@@ -68,17 +76,21 @@ def interactive_set_config():
return True
def list_hunters():
def list_hunters(class_names=False):
print("\nPassive Hunters:\n----------------")
for hunter, docs in handler.passive_hunters.items():
name, doc = hunter.parse_docs(docs)
print("* {}\n {}\n".format(name, doc))
if class_names:
name = hunter.__name__
print(f"* {name}\n {doc}\n")
if config.active:
print("\n\nActive Hunters:\n---------------")
for hunter, docs in handler.active_hunters.items():
name, doc = hunter.parse_docs(docs)
print("* {}\n {}\n".format(name, doc))
if class_names:
name = hunter.__name__
print(f"* {name}\n {doc}\n")
hunt_started_lock = threading.Lock()
@@ -87,10 +99,13 @@ hunt_started = False
def main():
global hunt_started
scan_options = [config.pod, config.cidr, config.remote, config.interface]
scan_options = [config.pod, config.cidr, config.remote, config.interface, config.k8s_auto_discover_nodes]
try:
if args.list:
list_hunters()
if args.raw_hunter_names:
list_hunters(class_names=True)
else:
list_hunters()
return
if not any(scan_options):

View File

@@ -1,10 +1,14 @@
from dataclasses import dataclass
from dataclasses import dataclass, field
from typing import Any, Optional
def get_default_core_hunters():
return ["FromPodHostDiscovery", "HostDiscovery", "PortDiscovery", "SendFullReport", "Collector", "StartedInfo"]
@dataclass
class Config:
""" Config is a configuration container.
"""Config is a configuration container.
It contains the following fields:
- active: Enable active hunters
- cidr: Network subnets to scan
@@ -13,13 +17,16 @@ class Config:
- interface: Interface scanning mode
- list_hunters: Print a list of existing hunters
- log_level: Log level
- log_file: Log File path
- mapping: Report only found components
- network_timeout: Timeout for network operations
- num_worker_threads: Add a flag --threads to change the default 800 thread count of the event handler
- pod: From pod scanning mode
- quick: Quick scanning mode
- remote: Hosts to scan
- report: Output format
- statistics: Include hunters statistics
- enable_cve_hunting: enables cve hunting, shows cve results
"""
active: bool = False
@@ -27,13 +34,22 @@ class Config:
dispatcher: Optional[Any] = None
include_patched_versions: bool = False
interface: bool = False
log_file: Optional[str] = None
mapping: bool = False
network_timeout: float = 5.0
num_worker_threads: int = 800
pod: bool = False
quick: bool = False
remote: Optional[str] = None
reporter: Optional[Any] = None
statistics: bool = False
k8s_auto_discover_nodes: bool = False
service_account_token: Optional[str] = None
kubeconfig: Optional[str] = None
enable_cve_hunting: bool = False
custom: Optional[list] = None
raw_hunter_names: bool = False
core_hunters: list = field(default_factory=get_default_core_hunters)
_config: Optional[Config] = None

View File

@@ -1,16 +1,11 @@
import logging
DEFAULT_LEVEL = logging.INFO
DEFAULT_LEVEL_NAME = logging.getLevelName(DEFAULT_LEVEL)
LOG_FORMAT = "%(asctime)s %(levelname)s %(name)s %(message)s"
# Suppress logging from scapy
logging.getLogger("scapy.runtime").setLevel(logging.CRITICAL)
logging.getLogger("scapy.loading").setLevel(logging.CRITICAL)
def setup_logger(level_name):
def setup_logger(level_name, logfile):
# Remove any existing handlers
# Unnecessary in Python 3.8 since `logging.basicConfig` has `force` parameter
for h in logging.getLogger().handlers[:]:
@@ -22,6 +17,9 @@ def setup_logger(level_name):
else:
log_level = getattr(logging, level_name.upper(), None)
log_level = log_level if isinstance(log_level, int) else None
logging.basicConfig(level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
if logfile is None:
logging.basicConfig(level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
else:
logging.basicConfig(filename=logfile, level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
if not log_level:
logging.warning(f"Unknown log level '{level_name}', using {DEFAULT_LEVEL_NAME}")

View File

@@ -9,7 +9,9 @@ def parser_add_arguments(parser):
Contains initialization for all default arguments
"""
parser.add_argument(
"--list", action="store_true", help="Displays all tests in kubehunter (add --active flag to see active tests)",
"--list",
action="store_true",
help="Displays all tests in kubehunter (add --active flag to see active tests)",
)
parser.add_argument("--interface", action="store_true", help="Set hunting on all network interfaces")
@@ -19,7 +21,9 @@ def parser_add_arguments(parser):
parser.add_argument("--quick", action="store_true", help="Prefer quick scan (subnet 24)")
parser.add_argument(
"--include-patched-versions", action="store_true", help="Don't skip patched versions when scanning",
"--include-patched-versions",
action="store_true",
help="Don't skip patched versions when scanning",
)
parser.add_argument(
@@ -29,15 +33,71 @@ def parser_add_arguments(parser):
)
parser.add_argument(
"--mapping", action="store_true", help="Outputs only a mapping of the cluster's nodes",
"--mapping",
action="store_true",
help="Outputs only a mapping of the cluster's nodes",
)
parser.add_argument(
"--remote", nargs="+", metavar="HOST", default=list(), help="One or more remote ip/dns to hunt",
"--remote",
nargs="+",
metavar="HOST",
default=list(),
help="One or more remote ip/dns to hunt",
)
parser.add_argument(
"-c",
"--custom",
nargs="+",
metavar="HUNTERS",
default=list(),
help="Custom hunting. Only given hunter names will register in the hunt."
"for a list of options run `--list --raw-hunter-names`",
)
parser.add_argument(
"--raw-hunter-names",
action="store_true",
help="Use in combination with `--list` to display hunter class names to pass for custom hunting flag",
)
parser.add_argument(
"--k8s-auto-discover-nodes",
action="store_true",
help="Enables automatic detection of all nodes in a Kubernetes cluster "
"by quering the Kubernetes API server. "
"It supports both in-cluster config (when running as a pod), "
"and a specific kubectl config file (use --kubeconfig to set this). "
"By default, when this flag is set, it will use in-cluster config. "
"NOTE: this is automatically switched on in --pod mode.",
)
parser.add_argument(
"--service-account-token",
type=str,
metavar="JWT_TOKEN",
help="Manually specify the service account jwt token to use for authenticating in the hunting process "
"NOTE: This overrides the loading of the pod's bounded authentication when running in --pod mode",
)
parser.add_argument(
"--kubeconfig",
type=str,
metavar="KUBECONFIG",
default=None,
help="Specify the kubeconfig file to use for Kubernetes nodes auto discovery "
" (to be used in conjuction with the --k8s-auto-discover-nodes flag.",
)
parser.add_argument("--active", action="store_true", help="Enables active hunting")
parser.add_argument(
"--enable-cve-hunting",
action="store_true",
help="Show cluster CVEs based on discovered version (Depending on different vendors, may result in False Positives)",
)
parser.add_argument(
"--log",
type=str,
@@ -47,7 +107,17 @@ def parser_add_arguments(parser):
)
parser.add_argument(
"--report", type=str, default="plain", help="Set report type, options are: plain, yaml, json",
"--log-file",
type=str,
default=None,
help="Path to a log file to output all logs to",
)
parser.add_argument(
"--report",
type=str,
default="plain",
help="Set report type, options are: plain, yaml, json",
)
parser.add_argument(
@@ -63,6 +133,14 @@ def parser_add_arguments(parser):
parser.add_argument("--network-timeout", type=float, default=5.0, help="network operations timeout")
parser.add_argument(
"--num-worker-threads",
type=int,
default=800,
help="In some environments the default thread count (800) can cause the process to crash. "
"In the case of a crash try lowering the thread count",
)
def parse_args(add_args_hook):
"""

View File

@@ -1,3 +1,2 @@
# flake8: noqa: E402
from .handler import EventQueue, handler
from . import types

View File

@@ -0,0 +1,370 @@
import logging
import time
from collections import defaultdict
from queue import Queue
from threading import Thread
from kube_hunter.conf import get_config
from kube_hunter.core.types import ActiveHunter, HunterBase
from kube_hunter.core.events.types import Vulnerability, EventFilterBase, MultipleEventsContainer
logger = logging.getLogger(__name__)
# Inherits Queue object, handles events asynchronously
class EventQueue(Queue):
def __init__(self, num_worker=10):
super().__init__()
self.passive_hunters = dict()
self.active_hunters = dict()
self.all_hunters = dict()
self.running = True
self.workers = list()
# -- Regular Subscription --
# Structure: key: Event Class, value: tuple(Registered Hunter, Predicate Function)
self.hooks = defaultdict(list)
self.filters = defaultdict(list)
# --------------------------
# -- Multiple Subscription --
# Structure: key: Event Class, value: tuple(Registered Hunter, Predicate Function)
self.multi_hooks = defaultdict(list)
# When subscribing to multiple events, this gets populated with required event classes
# Structure: key: Hunter Class, value: set(RequiredEventClass1, RequiredEventClass2)
self.hook_dependencies = defaultdict(set)
# To keep track of fulfilled dependencies. we need to have a structure which saves historical instanciated
# events mapped to a registered hunter.
# We used a 2 dimensional dictionary in order to fulfill two demands:
# * correctly count published required events
# * save historical events fired, easily sorted by their type
#
# Structure: hook_fulfilled_deps[hunter_class] -> fulfilled_events_for_hunter[event_class] -> [EventObject, EventObject2]
self.hook_fulfilled_deps = defaultdict(lambda: defaultdict(list))
# ---------------------------
for _ in range(num_worker):
t = Thread(target=self.worker)
t.daemon = True
t.start()
self.workers.append(t)
t = Thread(target=self.notifier)
t.daemon = True
t.start()
"""
######################################################
+ ----------------- Public Methods ----------------- +
######################################################
"""
def subscribe(self, event, hook=None, predicate=None, is_register=True):
"""
The Subscribe Decorator - For Regular Registration
Use this to register for one event only. Your hunter will execute each time this event is published
@param event - Event class to subscribe to
@param predicate - Optional: Function that will be called with the published event as a parameter before trigger.
If it's return value is False, the Hunter will not run (default=None).
@param hook - Hunter class to register for (ignore when using as a decorator)
"""
def wrapper(hook):
self.subscribe_event(event, hook=hook, predicate=predicate, is_register=is_register)
return hook
return wrapper
def subscribe_many(self, events, hook=None, predicates=None, is_register=True):
"""
The Subscribe Many Decorator - For Multiple Registration,
When your attack needs several prerequisites to exist in the cluster, You need to register for multiple events.
Your hunter will execute once for every new combination of required events.
For example:
1. event A was published 3 times
2. event B was published once.
3. event B was published again
Your hunter will execute 2 times:
* (on step 2) with the newest version of A
* (on step 3) with the newest version of A and newest version of B
@param events - List of event classes to subscribe to
@param predicates - Optional: List of function that will be called with the published event as a parameter before trigger.
If it's return value is False, the Hunter will not run (default=None).
@param hook - Hunter class to register for (ignore when using as a decorator)
"""
def wrapper(hook):
self.subscribe_events(events, hook=hook, predicates=predicates, is_register=is_register)
return hook
return wrapper
def subscribe_once(self, event, hook=None, predicate=None, is_register=True):
"""
The Subscribe Once Decorator - For Single Trigger Registration,
Use this when you want your hunter to execute only in your entire program run
wraps subscribe_event method
@param events - List of event classes to subscribe to
@param predicates - Optional: List of function that will be called with the published event as a parameter before trigger.
If it's return value is False, the Hunter will not run (default=None).
@param hook - Hunter class to register for (ignore when using as a decorator)
"""
def wrapper(hook):
# installing a __new__ magic method on the hunter
# which will remove the hunter from the list upon creation
def __new__unsubscribe_self(self, cls):
handler.hooks[event].remove((hook, predicate))
return object.__new__(self)
hook.__new__ = __new__unsubscribe_self
self.subscribe_event(event, hook=hook, predicate=predicate, is_register=is_register)
return hook
return wrapper
def publish_event(self, event, caller=None):
"""
The Publish Event Method - For Publishing Events To Kube-Hunter's Queue
"""
# Document that the hunter published a vulnerability (if it's indeed a vulnerability)
# For statistics options
self._increase_vuln_count(event, caller)
# sets the event's parent to be it's publisher hunter.
self._set_event_chain(event, caller)
# applying filters on the event, before publishing it to subscribers.
# if filter returned None, not proceeding to publish
event = self.apply_filters(event)
if event:
# If event was rewritten, make sure it's linked again
self._set_event_chain(event, caller)
# Regular Hunter registrations - publish logic
# Here we iterate over all the registered-to events:
for hooked_event in self.hooks.keys():
# We check if the event we want to publish is an inherited class of the current registered-to iterated event
# Meaning - if this is a relevant event:
if hooked_event in event.__class__.__mro__:
# If so, we want to publish to all registerd hunters.
for hook, predicate in self.hooks[hooked_event]:
if predicate and not predicate(event):
continue
self.put(hook(event))
logger.debug(f"Event {event.__class__} got published to hunter - {hook} with {event}")
# Multiple Hunter registrations - publish logic
# Here we iterate over all the registered-to events:
for hooked_event in self.multi_hooks.keys():
# We check if the event we want to publish is an inherited class of the current registered-to iterated event
# Meaning - if this is a relevant event:
if hooked_event in event.__class__.__mro__:
# now we iterate over the corresponding registered hunters.
for hook, predicate in self.multi_hooks[hooked_event]:
if predicate and not predicate(event):
continue
self._update_multi_hooks(hook, event)
if self._is_all_fulfilled_for_hunter(hook):
events_container = MultipleEventsContainer(self._get_latest_events_from_multi_hooks(hook))
self.put(hook(events_container))
logger.debug(
f"Multiple subscription requirements were met for hunter {hook}. events container was \
published with {self.hook_fulfilled_deps[hook].keys()}"
)
"""
######################################################
+ ---------------- Private Methods ----------------- +
+ ---------------- (Backend Logic) ----------------- +
######################################################
"""
def _get_latest_events_from_multi_hooks(self, hook):
"""
Iterates over fulfilled deps for the hunter, and fetching the latest appended events from history
"""
latest_events = list()
for event_class in self.hook_fulfilled_deps[hook].keys():
latest_events.append(self.hook_fulfilled_deps[hook][event_class][-1])
return latest_events
def _update_multi_hooks(self, hook, event):
"""
Updates published events in the multi hooks fulfilled store.
"""
self.hook_fulfilled_deps[hook][event.__class__].append(event)
def _is_all_fulfilled_for_hunter(self, hook):
"""
Returns true for multi hook fulfilled, else oterwise
"""
# Check if the first dimension already contains all necessary event classes
return len(self.hook_fulfilled_deps[hook].keys()) == len(self.hook_dependencies[hook])
def _set_event_chain(self, event, caller):
"""
Sets' events attribute chain.
In here we link the event with it's publisher (Hunter),
so in the next hunter that catches this event, we could access the previous one's attributes.
@param event: the event object to be chained
@param caller: the Hunter object that published this event.
"""
if caller:
event.previous = caller.event
event.hunter = caller.__class__
def _register_hunters(self, hook=None):
"""
This method is called when a Hunter registers itself to the handler.
this is done in order to track and correctly configure the current run of the program.
passive_hunters, active_hunters, all_hunters
"""
config = get_config()
if ActiveHunter in hook.__mro__:
if not config.active:
return False
else:
self.active_hunters[hook] = hook.__doc__
elif HunterBase in hook.__mro__:
self.passive_hunters[hook] = hook.__doc__
if HunterBase in hook.__mro__:
self.all_hunters[hook] = hook.__doc__
return True
def _register_filter(self, event, hook=None, predicate=None):
if hook not in self.filters[event]:
self.filters[event].append((hook, predicate))
logging.debug("{} filter subscribed to {}".format(hook, event))
def _register_hook(self, event, hook=None, predicate=None):
if hook not in self.hooks[event]:
self.hooks[event].append((hook, predicate))
logging.debug("{} subscribed to {}".format(hook, event))
def allowed_for_custom_registration(self, target_hunter):
"""
Check if the partial input list contains the hunter we are about to register for events
If hunter is considered a Core hunter as specified in `config.core_hunters` we allow it anyway
Returns true if:
1. partial hunt is disabled
2. partial hunt is enabled and hunter is in core hunter class
3. partial hunt is enabled and hunter is specified in config.partial
@param target_hunter: hunter class for registration check
"""
config = get_config()
if not config.custom:
return True
hunter_class_name = target_hunter.__name__
if hunter_class_name in config.core_hunters or hunter_class_name in config.custom:
return True
return False
def subscribe_event(self, event, hook=None, predicate=None, is_register=True):
if not is_register:
return
if not self.allowed_for_custom_registration(hook):
return
if not self._register_hunters(hook):
return
# registering filters
if EventFilterBase in hook.__mro__:
self._register_filter(event, hook, predicate)
# registering hunters
else:
self._register_hook(event, hook, predicate)
def subscribe_events(self, events, hook=None, predicates=None, is_register=True):
if not is_register:
return
if not self.allowed_for_custom_registration(hook):
return
if not self._register_hunters(hook):
return
if predicates is None:
predicates = [None] * len(events)
# registering filters.
if EventFilterBase in hook.__mro__:
for event, predicate in zip(events, predicates):
self._register_filter(event, hook, predicate)
# registering hunters.
else:
for event, predicate in zip(events, predicates):
self.multi_hooks[event].append((hook, predicate))
self.hook_dependencies[hook] = frozenset(events)
def apply_filters(self, event):
# if filters are subscribed, apply them on the event
for hooked_event in self.filters.keys():
if hooked_event in event.__class__.__mro__:
for filter_hook, predicate in self.filters[hooked_event]:
if predicate and not predicate(event):
continue
logger.debug(f"Event {event.__class__} filtered with {filter_hook}")
event = filter_hook(event).execute()
# if filter decided to remove event, returning None
if not event:
return None
return event
def _increase_vuln_count(self, event, caller):
config = get_config()
if config.statistics and caller:
if Vulnerability in event.__class__.__mro__:
caller.__class__.publishedVulnerabilities += 1
# executes callbacks on dedicated thread as a daemon
def worker(self):
while self.running:
try:
hook = self.get()
logger.debug(f"Executing {hook.__class__} with {hook.event.__dict__}")
hook.execute()
except Exception as ex:
logger.debug(ex, exc_info=True)
finally:
self.task_done()
logger.debug("closing thread...")
def notifier(self):
time.sleep(2)
# should consider locking on unfinished_tasks
while self.unfinished_tasks > 0:
logger.debug(f"{self.unfinished_tasks} tasks left")
time.sleep(3)
if self.unfinished_tasks == 1:
logger.debug("final hook is hanging")
# stops execution of all daemons
def free(self):
self.running = False
with self.mutex:
self.queue.clear()
config = get_config()
handler = EventQueue(config.num_worker_threads)

View File

@@ -1,160 +0,0 @@
import logging
import time
from collections import defaultdict
from queue import Queue
from threading import Thread
from kube_hunter.conf import get_config
from kube_hunter.core.types import ActiveHunter, HunterBase
from kube_hunter.core.events.types import Vulnerability, EventFilterBase
logger = logging.getLogger(__name__)
# Inherits Queue object, handles events asynchronously
class EventQueue(Queue):
def __init__(self, num_worker=10):
super(EventQueue, self).__init__()
self.passive_hunters = dict()
self.active_hunters = dict()
self.all_hunters = dict()
self.hooks = defaultdict(list)
self.filters = defaultdict(list)
self.running = True
self.workers = list()
for _ in range(num_worker):
t = Thread(target=self.worker)
t.daemon = True
t.start()
self.workers.append(t)
t = Thread(target=self.notifier)
t.daemon = True
t.start()
# decorator wrapping for easy subscription
def subscribe(self, event, hook=None, predicate=None):
def wrapper(hook):
self.subscribe_event(event, hook=hook, predicate=predicate)
return hook
return wrapper
# wrapper takes care of the subscribe once mechanism
def subscribe_once(self, event, hook=None, predicate=None):
def wrapper(hook):
# installing a __new__ magic method on the hunter
# which will remove the hunter from the list upon creation
def __new__unsubscribe_self(self, cls):
handler.hooks[event].remove((hook, predicate))
return object.__new__(self)
hook.__new__ = __new__unsubscribe_self
self.subscribe_event(event, hook=hook, predicate=predicate)
return hook
return wrapper
# getting uninstantiated event object
def subscribe_event(self, event, hook=None, predicate=None):
config = get_config()
if ActiveHunter in hook.__mro__:
if not config.active:
return
self.active_hunters[hook] = hook.__doc__
elif HunterBase in hook.__mro__:
self.passive_hunters[hook] = hook.__doc__
if HunterBase in hook.__mro__:
self.all_hunters[hook] = hook.__doc__
# registering filters
if EventFilterBase in hook.__mro__:
if hook not in self.filters[event]:
self.filters[event].append((hook, predicate))
logger.debug(f"{hook} filter subscribed to {event}")
# registering hunters
elif hook not in self.hooks[event]:
self.hooks[event].append((hook, predicate))
logger.debug(f"{hook} subscribed to {event}")
def apply_filters(self, event):
# if filters are subscribed, apply them on the event
for hooked_event in self.filters.keys():
if hooked_event in event.__class__.__mro__:
for filter_hook, predicate in self.filters[hooked_event]:
if predicate and not predicate(event):
continue
logger.debug(f"Event {event.__class__} filtered with {filter_hook}")
event = filter_hook(event).execute()
# if filter decided to remove event, returning None
if not event:
return None
return event
# getting instantiated event object
def publish_event(self, event, caller=None):
config = get_config()
# setting event chain
if caller:
event.previous = caller.event
event.hunter = caller.__class__
# applying filters on the event, before publishing it to subscribers.
# if filter returned None, not proceeding to publish
event = self.apply_filters(event)
if event:
# If event was rewritten, make sure it's linked to its parent ('previous') event
if caller:
event.previous = caller.event
event.hunter = caller.__class__
for hooked_event in self.hooks.keys():
if hooked_event in event.__class__.__mro__:
for hook, predicate in self.hooks[hooked_event]:
if predicate and not predicate(event):
continue
if config.statistics and caller:
if Vulnerability in event.__class__.__mro__:
caller.__class__.publishedVulnerabilities += 1
logger.debug(f"Event {event.__class__} got published with {event}")
self.put(hook(event))
# executes callbacks on dedicated thread as a daemon
def worker(self):
while self.running:
try:
hook = self.get()
logger.debug(f"Executing {hook.__class__} with {hook.event.__dict__}")
hook.execute()
except Exception as ex:
logger.debug(ex, exc_info=True)
finally:
self.task_done()
logger.debug("closing thread...")
def notifier(self):
time.sleep(2)
# should consider locking on unfinished_tasks
while self.unfinished_tasks > 0:
logger.debug(f"{self.unfinished_tasks} tasks left")
time.sleep(3)
if self.unfinished_tasks == 1:
logger.debug("final hook is hanging")
# stops execution of all daemons
def free(self):
self.running = False
with self.mutex:
self.queue.clear()
handler = EventQueue(800)

View File

@@ -3,15 +3,32 @@ import threading
import requests
from kube_hunter.conf import get_config
from kube_hunter.core.types import (
InformationDisclosure,
DenialOfService,
RemoteCodeExec,
IdentityTheft,
PrivilegeEscalation,
AccessRisk,
UnauthenticatedAccess,
KubernetesCluster,
from kube_hunter.core.types import KubernetesCluster
from kube_hunter.core.types.vulnerabilities import (
GeneralSensitiveInformationTechnique,
ExposedSensitiveInterfacesTechnique,
MountServicePrincipalTechnique,
ListK8sSecretsTechnique,
AccessContainerServiceAccountTechnique,
AccessK8sApiServerTechnique,
AccessKubeletAPITechnique,
AccessK8sDashboardTechnique,
InstanceMetadataApiTechnique,
ExecIntoContainerTechnique,
SidecarInjectionTechnique,
NewContainerTechnique,
GeneralPersistenceTechnique,
HostPathMountPrivilegeEscalationTechnique,
PrivilegedContainerTechnique,
ClusterAdminBindingTechnique,
ARPPoisoningTechnique,
CoreDNSPoisoningTechnique,
DataDestructionTechnique,
GeneralDefenseEvasionTechnique,
ConnectFromProxyServerTechnique,
CVERemoteCodeExecutionCategory,
CVEPrivilegeEscalationCategory,
CVEDenialOfServiceTechnique,
)
logger = logging.getLogger(__name__)
@@ -62,6 +79,20 @@ class Event:
return history
class MultipleEventsContainer(Event):
"""
This is the class of the object an hunter will get if he was registered to multiple events.
"""
def __init__(self, events):
self.events = events
def get_by_class(self, event_class):
for event in self.events:
if event.__class__ == event_class:
return event
class Service:
def __init__(self, name, path="", secure=True):
self.name = name
@@ -69,6 +100,12 @@ class Service:
self.path = path
self.role = "Node"
# if a service account token was specified, we load it to the Service class
# We load it here because generally all kuberentes services could be authenticated with the token
config = get_config()
if config.service_account_token:
self.auth_token = config.service_account_token
def get_name(self):
return self.name
@@ -82,13 +119,30 @@ class Service:
class Vulnerability:
severity = dict(
{
InformationDisclosure: "medium",
DenialOfService: "medium",
RemoteCodeExec: "high",
IdentityTheft: "high",
PrivilegeEscalation: "high",
AccessRisk: "low",
UnauthenticatedAccess: "low",
GeneralSensitiveInformationTechnique: "low",
ExposedSensitiveInterfacesTechnique: "high",
MountServicePrincipalTechnique: "high",
ListK8sSecretsTechnique: "high",
AccessContainerServiceAccountTechnique: "low",
AccessK8sApiServerTechnique: "medium",
AccessKubeletAPITechnique: "medium",
AccessK8sDashboardTechnique: "medium",
InstanceMetadataApiTechnique: "high",
ExecIntoContainerTechnique: "high",
SidecarInjectionTechnique: "high",
NewContainerTechnique: "high",
GeneralPersistenceTechnique: "high",
HostPathMountPrivilegeEscalationTechnique: "high",
PrivilegedContainerTechnique: "high",
ClusterAdminBindingTechnique: "high",
ARPPoisoningTechnique: "medium",
CoreDNSPoisoningTechnique: "high",
DataDestructionTechnique: "high",
GeneralDefenseEvasionTechnique: "high",
ConnectFromProxyServerTechnique: "low",
CVERemoteCodeExecutionCategory: "high",
CVEPrivilegeEscalationCategory: "high",
CVEDenialOfServiceTechnique: "medium",
}
)
@@ -144,7 +198,8 @@ class NewHostEvent(Event):
logger.debug("Checking whether the cluster is deployed on azure's cloud")
# Leverage 3rd tool https://github.com/blrchen/AzureSpeed for Azure cloud ip detection
result = requests.get(
f"https://api.azurespeed.com/api/region?ipOrUrl={self.host}", timeout=config.network_timeout,
f"https://api.azurespeed.com/api/region?ipOrUrl={self.host}",
timeout=config.network_timeout,
).json()
return result["cloud"] or "NoCloud"
except requests.ConnectionError:
@@ -190,16 +245,23 @@ class ReportDispatched(Event):
class K8sVersionDisclosure(Vulnerability, Event):
"""The kubernetes version could be obtained from the {} endpoint """
"""The kubernetes version could be obtained from the {} endpoint"""
def __init__(self, version, from_endpoint, extra_info=""):
def __init__(self, version, from_endpoint, extra_info="", category=None):
Vulnerability.__init__(
self, KubernetesCluster, "K8s Version Disclosure", category=InformationDisclosure, vid="KHV002",
self,
KubernetesCluster,
"K8s Version Disclosure",
category=ExposedSensitiveInterfacesTechnique,
vid="KHV002",
)
self.version = version
self.from_endpoint = from_endpoint
self.extra_info = extra_info
self.evidence = version
# depending from where the version came from, we might want to also override the category
if category:
self.category = category
def explain(self):
return self.__doc__.format(self.from_endpoint) + self.extra_info

View File

@@ -1,88 +0,0 @@
class HunterBase:
publishedVulnerabilities = 0
@staticmethod
def parse_docs(docs):
"""returns tuple of (name, docs)"""
if not docs:
return __name__, "<no documentation>"
docs = docs.strip().split("\n")
for i, line in enumerate(docs):
docs[i] = line.strip()
return docs[0], " ".join(docs[1:]) if len(docs[1:]) else "<no documentation>"
@classmethod
def get_name(cls):
name, _ = cls.parse_docs(cls.__doc__)
return name
def publish_event(self, event):
handler.publish_event(event, caller=self)
class ActiveHunter(HunterBase):
pass
class Hunter(HunterBase):
pass
class Discovery(HunterBase):
pass
class KubernetesCluster:
"""Kubernetes Cluster"""
name = "Kubernetes Cluster"
class KubectlClient:
"""The kubectl client binary is used by the user to interact with the cluster"""
name = "Kubectl Client"
class Kubelet(KubernetesCluster):
"""The kubelet is the primary "node agent" that runs on each node"""
name = "Kubelet"
class Azure(KubernetesCluster):
"""Azure Cluster"""
name = "Azure"
class InformationDisclosure:
name = "Information Disclosure"
class RemoteCodeExec:
name = "Remote Code Execution"
class IdentityTheft:
name = "Identity Theft"
class UnauthenticatedAccess:
name = "Unauthenticated Access"
class AccessRisk:
name = "Access Risk"
class PrivilegeEscalation(KubernetesCluster):
name = "Privilege Escalation"
class DenialOfService:
name = "Denial of Service"
# import is in the bottom to break import loops
from .events import handler # noqa

View File

@@ -0,0 +1,4 @@
# flake8: noqa: E402
from .hunters import *
from .components import *
from .vulnerabilities import *

View File

@@ -0,0 +1,28 @@
class KubernetesCluster:
"""Kubernetes Cluster"""
name = "Kubernetes Cluster"
class KubectlClient:
"""The kubectl client binary is used by the user to interact with the cluster"""
name = "Kubectl Client"
class Kubelet(KubernetesCluster):
"""The kubelet is the primary "node agent" that runs on each node"""
name = "Kubelet"
class AWS(KubernetesCluster):
"""AWS Cluster"""
name = "AWS"
class Azure(KubernetesCluster):
"""Azure Cluster"""
name = "Azure"

View File

@@ -0,0 +1,36 @@
class HunterBase:
publishedVulnerabilities = 0
@staticmethod
def parse_docs(docs):
"""returns tuple of (name, docs)"""
if not docs:
return __name__, "<no documentation>"
docs = docs.strip().split("\n")
for i, line in enumerate(docs):
docs[i] = line.strip()
return docs[0], " ".join(docs[1:]) if len(docs[1:]) else "<no documentation>"
@classmethod
def get_name(cls):
name, _ = cls.parse_docs(cls.__doc__)
return name
def publish_event(self, event):
# Import here to avoid circular import from events package.
# imports are cached in python so this should not affect runtime
from ..events.event_handler import handler # noqa
handler.publish_event(event, caller=self)
class ActiveHunter(HunterBase):
pass
class Hunter(HunterBase):
pass
class Discovery(HunterBase):
pass

View File

@@ -0,0 +1,188 @@
"""
Vulnerabilities are divided into 2 main categories.
MITRE Category
--------------
Vulnerability that correlates to a method in the official MITRE ATT&CK matrix for kubernetes
CVE Category
-------------
"General" category definition. The category is usually determined by the severity of the CVE
"""
class MITRECategory:
@classmethod
def get_name(cls):
"""
Returns the full name of MITRE technique: <MITRE CATEGORY> // <MITRE TECHNIQUE>
Should only be used on a direct technique class at the end of the MITRE inheritance chain.
Example inheritance:
MITRECategory -> InitialAccessCategory -> ExposedSensitiveInterfacesTechnique
"""
inheritance_chain = cls.__mro__
if len(inheritance_chain) >= 4:
# -3 == index of mitreCategory class. (object class is first)
mitre_category_class = inheritance_chain[-3]
return f"{mitre_category_class.name} // {cls.name}"
class CVECategory:
@classmethod
def get_name(cls):
"""
Returns the full name of the category: CVE // <CVE Category name>
"""
return f"CVE // {cls.name}"
"""
MITRE ATT&CK Technique Categories
"""
class InitialAccessCategory(MITRECategory):
name = "Initial Access"
class ExecutionCategory(MITRECategory):
name = "Execution"
class PersistenceCategory(MITRECategory):
name = "Persistence"
class PrivilegeEscalationCategory(MITRECategory):
name = "Privilege Escalation"
class DefenseEvasionCategory(MITRECategory):
name = "Defense Evasion"
class CredentialAccessCategory(MITRECategory):
name = "Credential Access"
class DiscoveryCategory(MITRECategory):
name = "Discovery"
class LateralMovementCategory(MITRECategory):
name = "Lateral Movement"
class CollectionCategory(MITRECategory):
name = "Collection"
class ImpactCategory(MITRECategory):
name = "Impact"
"""
MITRE ATT&CK Techniques
"""
class GeneralSensitiveInformationTechnique(InitialAccessCategory):
name = "General Sensitive Information"
class ExposedSensitiveInterfacesTechnique(InitialAccessCategory):
name = "Exposed sensitive interfaces"
class MountServicePrincipalTechnique(CredentialAccessCategory):
name = "Mount service principal"
class ListK8sSecretsTechnique(CredentialAccessCategory):
name = "List K8S secrets"
class AccessContainerServiceAccountTechnique(CredentialAccessCategory):
name = "Access container service account"
class AccessK8sApiServerTechnique(DiscoveryCategory):
name = "Access the K8S API Server"
class AccessKubeletAPITechnique(DiscoveryCategory):
name = "Access Kubelet API"
class AccessK8sDashboardTechnique(DiscoveryCategory):
name = "Access Kubernetes Dashboard"
class InstanceMetadataApiTechnique(DiscoveryCategory):
name = "Instance Metadata API"
class ExecIntoContainerTechnique(ExecutionCategory):
name = "Exec into container"
class SidecarInjectionTechnique(ExecutionCategory):
name = "Sidecar injection"
class NewContainerTechnique(ExecutionCategory):
name = "New container"
class GeneralPersistenceTechnique(PersistenceCategory):
name = "General Peristence"
class HostPathMountPrivilegeEscalationTechnique(PrivilegeEscalationCategory):
name = "hostPath mount"
class PrivilegedContainerTechnique(PrivilegeEscalationCategory):
name = "Privileged container"
class ClusterAdminBindingTechnique(PrivilegeEscalationCategory):
name = "Cluser-admin binding"
class ARPPoisoningTechnique(LateralMovementCategory):
name = "ARP poisoning and IP spoofing"
class CoreDNSPoisoningTechnique(LateralMovementCategory):
name = "CoreDNS poisoning"
class DataDestructionTechnique(ImpactCategory):
name = "Data Destruction"
class GeneralDefenseEvasionTechnique(DefenseEvasionCategory):
name = "General Defense Evasion"
class ConnectFromProxyServerTechnique(DefenseEvasionCategory):
name = "Connect from Proxy server"
"""
CVE Categories
"""
class CVERemoteCodeExecutionCategory(CVECategory):
name = "Remote Code Execution (CVE)"
class CVEPrivilegeEscalationCategory(CVECategory):
name = "Privilege Escalation (CVE)"
class CVEDenialOfServiceTechnique(CVECategory):
name = "Denial Of Service (CVE)"

View File

@@ -2,7 +2,7 @@ import logging
import requests
from kube_hunter.core.types import Discovery
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import OpenPortEvent, Service, Event, EventFilterBase
from kube_hunter.conf import get_config

View File

@@ -3,7 +3,7 @@ import logging
import requests
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Event, OpenPortEvent, Service
from kube_hunter.core.types import Discovery

View File

@@ -1,4 +1,4 @@
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Event, OpenPortEvent, Service
from kube_hunter.core.types import Discovery

View File

@@ -1,17 +1,19 @@
import json
import os
import sys
import socket
import logging
import itertools
import requests
from enum import Enum
from netaddr import IPNetwork, IPAddress, AddrFormatError
from netifaces import AF_INET, ifaddresses, interfaces
from scapy.all import ICMP, IP, Ether, srp1
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.modules.discovery.kubernetes_client import list_all_k8s_cluster_nodes
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Event, NewHostEvent, Vulnerability
from kube_hunter.core.types import Discovery, InformationDisclosure, Azure
from kube_hunter.core.types import Discovery, AWS, Azure, InstanceMetadataApiTechnique
logger = logging.getLogger(__name__)
@@ -19,11 +21,17 @@ logger = logging.getLogger(__name__)
class RunningAsPodEvent(Event):
def __init__(self):
self.name = "Running from within a pod"
self.auth_token = self.get_service_account_file("token")
self.client_cert = self.get_service_account_file("ca.crt")
self.namespace = self.get_service_account_file("namespace")
self.kubeservicehost = os.environ.get("KUBERNETES_SERVICE_HOST", None)
# if service account token was manually specified, we don't load the token file
config = get_config()
if config.service_account_token:
self.auth_token = config.service_account_token
else:
self.auth_token = self.get_service_account_file("token")
# Event's logical location to be used mainly for reports.
def location(self):
location = "Local to Pod"
@@ -37,19 +45,38 @@ class RunningAsPodEvent(Event):
try:
with open(f"/var/run/secrets/kubernetes.io/serviceaccount/{file}") as f:
return f.read()
except IOError:
except OSError:
pass
class AWSMetadataApi(Vulnerability, Event):
"""Access to the AWS Metadata API exposes information about the machines associated with the cluster"""
def __init__(self, cidr):
Vulnerability.__init__(
self,
AWS,
"AWS Metadata Exposure",
category=InstanceMetadataApiTechnique,
vid="KHV053",
)
self.cidr = cidr
self.evidence = f"cidr: {cidr}"
class AzureMetadataApi(Vulnerability, Event):
"""Access to the Azure Metadata API exposes information about the machines associated with the cluster"""
def __init__(self, cidr):
Vulnerability.__init__(
self, Azure, "Azure Metadata Exposure", category=InformationDisclosure, vid="KHV003",
self,
Azure,
"Azure Metadata Exposure",
category=InstanceMetadataApiTechnique,
vid="KHV003",
)
self.cidr = cidr
self.evidence = "cidr: {}".format(cidr)
self.evidence = f"cidr: {cidr}"
class HostScanEvent(Event):
@@ -96,16 +123,25 @@ class FromPodHostDiscovery(Discovery):
def execute(self):
config = get_config()
# Attempt to read all hosts from the Kubernetes API
for host in list_all_k8s_cluster_nodes(config.kubeconfig):
self.publish_event(NewHostEvent(host=host))
# Scan any hosts that the user specified
if config.remote or config.cidr:
self.publish_event(HostScanEvent())
else:
# Discover cluster subnets, we'll scan all these hosts
cloud = None
cloud, subnets = None, list()
if self.is_azure_pod():
subnets, cloud = self.azure_metadata_discovery()
else:
subnets = self.traceroute_discovery()
elif self.is_aws_pod_v1():
subnets, cloud = self.aws_metadata_v1_discovery()
elif self.is_aws_pod_v2():
subnets, cloud = self.aws_metadata_v2_discovery()
gateway_subnet = self.gateway_discovery()
if gateway_subnet:
subnets.append(gateway_subnet)
should_scan_apiserver = False
if self.event.kubeservicehost:
@@ -119,6 +155,50 @@ class FromPodHostDiscovery(Discovery):
if should_scan_apiserver:
self.publish_event(NewHostEvent(host=IPAddress(self.event.kubeservicehost), cloud=cloud))
def is_aws_pod_v1(self):
config = get_config()
try:
# Instance Metadata Service v1
logger.debug("From pod attempting to access AWS Metadata v1 API")
if (
requests.get(
"http://169.254.169.254/latest/meta-data/",
timeout=config.network_timeout,
).status_code
== 200
):
return True
except requests.exceptions.ConnectionError:
logger.debug("Failed to connect AWS metadata server v1")
except Exception:
logger.debug("Unknown error when trying to connect to AWS metadata v1 API")
return False
def is_aws_pod_v2(self):
config = get_config()
try:
# Instance Metadata Service v2
logger.debug("From pod attempting to access AWS Metadata v2 API")
token = requests.put(
"http://169.254.169.254/latest/api/token/",
headers={"X-aws-ec2-metatadata-token-ttl-seconds": "21600"},
timeout=config.network_timeout,
).text
if (
requests.get(
"http://169.254.169.254/latest/meta-data/",
headers={"X-aws-ec2-metatadata-token": token},
timeout=config.network_timeout,
).status_code
== 200
):
return True
except requests.exceptions.ConnectionError:
logger.debug("Failed to connect AWS metadata server v2")
except Exception:
logger.debug("Unknown error when trying to connect to AWS metadata v2 API")
return False
def is_azure_pod(self):
config = get_config()
try:
@@ -134,15 +214,97 @@ class FromPodHostDiscovery(Discovery):
return True
except requests.exceptions.ConnectionError:
logger.debug("Failed to connect Azure metadata server")
return False
except Exception:
logger.debug("Unknown error when trying to connect to Azure metadata server")
return False
# for pod scanning
def traceroute_discovery(self):
def gateway_discovery(self):
"""Retrieving default gateway of pod, which is usually also a contact point with the host"""
# read the default gateway directly from /proc
# netifaces currently does not have a maintainer. so we backported to linux support only for this cause.
# TODO: implement WMI queries for windows support
# https://stackoverflow.com/a/6556951
if sys.platform in ["linux", "linux2"]:
try:
from pyroute2 import IPDB
ip = IPDB()
gateway_ip = ip.routes["default"]["gateway"]
ip.release()
return [gateway_ip, "24"]
except Exception as x:
logging.debug(f"Exception while fetching default gateway from container - {x}")
finally:
ip.release()
else:
logging.debug("Not running in a linux env, will not scan default subnet")
return False
# querying AWS's interface metadata api v1 | works only from a pod
def aws_metadata_v1_discovery(self):
config = get_config()
node_internal_ip = srp1(
Ether() / IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout,
)[IP].src
return [[node_internal_ip, "24"]]
logger.debug("From pod attempting to access aws's metadata v1")
mac_address = requests.get(
"http://169.254.169.254/latest/meta-data/mac",
timeout=config.network_timeout,
).text
logger.debug(f"Extracted mac from aws's metadata v1: {mac_address}")
cidr = requests.get(
f"http://169.254.169.254/latest/meta-data/network/interfaces/macs/{mac_address}/subnet-ipv4-cidr-block",
timeout=config.network_timeout,
).text
logger.debug(f"Trying to extract cidr from aws's metadata v1: {cidr}")
try:
cidr = cidr.split("/")
address, subnet = (cidr[0], cidr[1])
subnet = subnet if not config.quick else "24"
cidr = f"{address}/{subnet}"
logger.debug(f"From pod discovered subnet {cidr}")
self.publish_event(AWSMetadataApi(cidr=cidr))
return [(address, subnet)], "AWS"
except Exception as x:
logger.debug(f"ERROR: could not parse cidr from aws metadata api: {cidr} - {x}")
return [], "AWS"
# querying AWS's interface metadata api v2 | works only from a pod
def aws_metadata_v2_discovery(self):
config = get_config()
logger.debug("From pod attempting to access aws's metadata v2")
token = requests.get(
"http://169.254.169.254/latest/api/token",
headers={"X-aws-ec2-metatadata-token-ttl-seconds": "21600"},
timeout=config.network_timeout,
).text
mac_address = requests.get(
"http://169.254.169.254/latest/meta-data/mac",
headers={"X-aws-ec2-metatadata-token": token},
timeout=config.network_timeout,
).text
cidr = requests.get(
f"http://169.254.169.254/latest/meta-data/network/interfaces/macs/{mac_address}/subnet-ipv4-cidr-block",
headers={"X-aws-ec2-metatadata-token": token},
timeout=config.network_timeout,
).text.split("/")
try:
address, subnet = (cidr[0], cidr[1])
subnet = subnet if not config.quick else "24"
cidr = f"{address}/{subnet}"
logger.debug(f"From pod discovered subnet {cidr}")
self.publish_event(AWSMetadataApi(cidr=cidr))
return [(address, subnet)], "AWS"
except Exception as x:
logger.debug(f"ERROR: could not parse cidr from aws metadata api: {cidr} - {x}")
return [], "AWS"
# querying azure's interface metadata api | works only from a pod
def azure_metadata_discovery(self):
@@ -188,6 +350,9 @@ class HostDiscovery(Discovery):
elif len(config.remote) > 0:
for host in config.remote:
self.publish_event(NewHostEvent(host=host))
elif config.k8s_auto_discover_nodes:
for host in list_all_k8s_cluster_nodes(config.kubeconfig):
self.publish_event(NewHostEvent(host=host))
# for normal scanning
def scan_interfaces(self):
@@ -196,13 +361,62 @@ class HostDiscovery(Discovery):
# generate all subnets from all internal network interfaces
def generate_interfaces_subnet(self, sn="24"):
for ifaceName in interfaces():
for ip in [i["addr"] for i in ifaddresses(ifaceName).setdefault(AF_INET, [])]:
if not self.event.localhost and InterfaceTypes.LOCALHOST.value in ip.__str__():
if sys.platform == "win32":
return self.generate_interfaces_subnet_windows()
elif sys.platform in ["linux", "linux2"]:
return self.generate_interfaces_subnet_linux()
def generate_interfaces_subnet_linux(self, sn="24"):
try:
from pyroute2 import IPRoute
ip = IPRoute()
for i in ip.get_addr():
# whitelist only ipv4 ips
if i["family"] == socket.AF_INET:
ipaddress = i[0].get_attr("IFA_ADDRESS")
# TODO: add this instead of hardcoded 24 subnet, (add a flag for full scan option)
# subnet = i['prefixlen']
# unless specified explicitly with localhost scan flag, skip localhost ip addresses
if not self.event.localhost and ipaddress.startswith(InterfaceTypes.LOCALHOST.value):
continue
ip_network = IPNetwork(f"{ipaddress}/{sn}")
for ip in ip_network:
yield ip
except Exception as x:
logging.debug(f"Exception while generating subnet scan from local interfaces: {x}")
finally:
ip.release()
def generate_interfaces_subnet_windows(self, sn="24"):
from subprocess import check_output
local_subnets = (
check_output(
"powershell -NoLogo -NoProfile -NonInteractive -ExecutionPolicy bypass -Command "
' "& {'
"Get-NetIPConfiguration | Get-NetIPAddress | Where-Object {$_.AddressFamily -eq 'IPv4'}"
" | Select-Object -Property IPAddress, PrefixLength | ConvertTo-Json "
' "}',
shell=True,
)
.decode()
.strip()
)
try:
subnets = json.loads(local_subnets)
for subnet in subnets:
if not self.event.localhost and subnet["IPAddress"].startswith(InterfaceTypes.LOCALHOST.value):
continue
for ip in IPNetwork(f"{ip}/{sn}"):
ip_network = IPNetwork(f"{subnet['IPAddress']}/{sn}")
for ip in ip_network:
yield ip
except Exception as x:
logging.debug(f"ERROR: Could not extract interface information using powershell - {x}")
# for comparing prefixes
class InterfaceTypes(Enum):

View File

@@ -2,7 +2,7 @@ import logging
import subprocess
from kube_hunter.core.types import Discovery
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import HuntStarted, Event
logger = logging.getLogger(__name__)

View File

@@ -5,7 +5,7 @@ from enum import Enum
from kube_hunter.conf import get_config
from kube_hunter.core.types import Discovery
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import OpenPortEvent, Event, Service
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

View File

@@ -0,0 +1,27 @@
import logging
import kubernetes
def list_all_k8s_cluster_nodes(kube_config=None, client=None):
logger = logging.getLogger(__name__)
try:
if kube_config:
logger.debug("Attempting to use kubeconfig file: %s", kube_config)
kubernetes.config.load_kube_config(config_file=kube_config)
else:
logger.debug("Attempting to use in cluster Kubernetes config")
kubernetes.config.load_incluster_config()
except kubernetes.config.config_exception.ConfigException as ex:
logger.debug(f"Failed to initiate Kubernetes client: {ex}")
return
try:
if client is None:
client = kubernetes.client.CoreV1Api()
ret = client.list_node(watch=False)
logger.info("Listed %d nodes in the cluster" % len(ret.items))
for item in ret.items:
for addr in item.status.addresses:
yield addr.address
except Exception as ex:
logger.debug(f"Failed to list nodes from Kubernetes: {ex}")

View File

@@ -2,7 +2,7 @@ import logging
from socket import socket
from kube_hunter.core.types import Discovery
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import NewHostEvent, OpenPortEvent
logger = logging.getLogger(__name__)

View File

@@ -3,7 +3,7 @@ import requests
from kube_hunter.conf import get_config
from kube_hunter.core.types import Discovery
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Service, Event, OpenPortEvent
logger = logging.getLogger(__name__)

View File

@@ -2,12 +2,10 @@
from . import (
aks,
apiserver,
arp,
capabilities,
certificates,
cves,
dashboard,
dns,
etcd,
kubelet,
mounts,

View File

@@ -1,12 +1,13 @@
import os
import json
import logging
import requests
from kube_hunter.conf import get_config
from kube_hunter.modules.hunting.kubelet import ExposedRunHandler
from kube_hunter.core.events import handler
from kube_hunter.modules.hunting.kubelet import ExposedPodsHandler, SecureKubeletPortHunter
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Event, Vulnerability
from kube_hunter.core.types import Hunter, ActiveHunter, IdentityTheft, Azure
from kube_hunter.core.types import Hunter, ActiveHunter, MountServicePrincipalTechnique, Azure
logger = logging.getLogger(__name__)
@@ -14,14 +15,19 @@ logger = logging.getLogger(__name__)
class AzureSpnExposure(Vulnerability, Event):
"""The SPN is exposed, potentially allowing an attacker to gain access to the Azure subscription"""
def __init__(self, container):
def __init__(self, container, evidence=""):
Vulnerability.__init__(
self, Azure, "Azure SPN Exposure", category=IdentityTheft, vid="KHV004",
self,
Azure,
"Azure SPN Exposure",
category=MountServicePrincipalTechnique,
vid="KHV004",
)
self.container = container
self.evidence = evidence
@handler.subscribe(ExposedRunHandler, predicate=lambda x: x.cloud == "Azure")
@handler.subscribe(ExposedPodsHandler, predicate=lambda x: x.cloud_type == "Azure")
class AzureSpnHunter(Hunter):
"""AKS Hunting
Hunting Azure cluster deployments using specific known configurations
@@ -33,30 +39,33 @@ class AzureSpnHunter(Hunter):
# getting a container that has access to the azure.json file
def get_key_container(self):
config = get_config()
endpoint = f"{self.base_url}/pods"
logger.debug("Trying to find container with access to azure.json file")
try:
r = requests.get(endpoint, verify=False, timeout=config.network_timeout)
except requests.Timeout:
logger.debug("failed getting pod info")
else:
pods_data = r.json().get("items", [])
for pod_data in pods_data:
for container in pod_data["spec"]["containers"]:
for mount in container["volumeMounts"]:
path = mount["mountPath"]
if "/etc/kubernetes/azure.json".startswith(path):
return {
"name": container["name"],
"pod": pod_data["metadata"]["name"],
"namespace": pod_data["metadata"]["namespace"],
}
# pods are saved in the previous event object
pods_data = self.event.pods
suspicious_volume_names = []
for pod_data in pods_data:
for volume in pod_data["spec"].get("volumes", []):
if volume.get("hostPath"):
path = volume["hostPath"]["path"]
if "/etc/kubernetes/azure.json".startswith(path):
suspicious_volume_names.append(volume["name"])
for container in pod_data["spec"]["containers"]:
for mount in container.get("volumeMounts", []):
if mount["name"] in suspicious_volume_names:
return {
"name": container["name"],
"pod": pod_data["metadata"]["name"],
"namespace": pod_data["metadata"]["namespace"],
"mount": mount,
}
def execute(self):
container = self.get_key_container()
if container:
self.publish_event(AzureSpnExposure(container=container))
evidence = f"pod: {container['pod']}, namespace: {container['namespace']}"
self.publish_event(AzureSpnExposure(container=container, evidence=evidence))
@handler.subscribe(AzureSpnExposure)
@@ -69,14 +78,42 @@ class ProveAzureSpnExposure(ActiveHunter):
self.event = event
self.base_url = f"https://{self.event.host}:{self.event.port}"
def test_run_capability(self):
"""
Uses SecureKubeletPortHunter to test the /run handler
TODO: when multiple event subscription is implemented, use this here to make sure /run is accessible
"""
debug_handlers = SecureKubeletPortHunter.DebugHandlers(path=self.base_url, session=self.event.session, pod=None)
return debug_handlers.test_run_container()
def run(self, command, container):
config = get_config()
run_url = "/".join(self.base_url, "run", container["namespace"], container["pod"], container["name"])
return requests.post(run_url, verify=False, params={"cmd": command}, timeout=config.network_timeout)
run_url = f"{self.base_url}/run/{container['namespace']}/{container['pod']}/{container['name']}"
return self.event.session.post(run_url, verify=False, params={"cmd": command}, timeout=config.network_timeout)
def get_full_path_to_azure_file(self):
"""
Returns a full path to /etc/kubernetes/azure.json
Taking into consideration the difference folder of the mount inside the container.
TODO: implement the edge case where the mount is to parent /etc folder.
"""
azure_file_path = self.event.container["mount"]["mountPath"]
# taking care of cases where a subPath is added to map the specific file
if not azure_file_path.endswith("azure.json"):
azure_file_path = os.path.join(azure_file_path, "azure.json")
return azure_file_path
def execute(self):
if not self.test_run_capability():
logger.debug("Not proving AzureSpnExposure because /run debug handler is disabled")
return
try:
subscription = self.run("cat /etc/kubernetes/azure.json", container=self.event.container).json()
azure_file_path = self.get_full_path_to_azure_file()
logger.debug(f"trying to access the azure.json at the resolved path: {azure_file_path}")
subscription = self.run(f"cat {azure_file_path}", container=self.event.container).json()
except requests.Timeout:
logger.debug("failed to run command in container", exc_info=True)
except json.decoder.JSONDecodeError:

View File

@@ -5,13 +5,18 @@ import requests
from kube_hunter.conf import get_config
from kube_hunter.modules.discovery.apiserver import ApiServer
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
from kube_hunter.core.types import Hunter, ActiveHunter, KubernetesCluster
from kube_hunter.core.types import (
AccessRisk,
InformationDisclosure,
UnauthenticatedAccess,
from kube_hunter.core.types.vulnerabilities import (
AccessK8sApiServerTechnique,
ExposedSensitiveInterfacesTechnique,
GeneralDefenseEvasionTechnique,
DataDestructionTechnique,
ClusterAdminBindingTechnique,
NewContainerTechnique,
PrivilegedContainerTechnique,
SidecarInjectionTechnique,
)
logger = logging.getLogger(__name__)
@@ -24,12 +29,16 @@ class ServerApiAccess(Vulnerability, Event):
def __init__(self, evidence, using_token):
if using_token:
name = "Access to API using service account token"
category = InformationDisclosure
category = AccessK8sApiServerTechnique
else:
name = "Unauthenticated access to API"
category = UnauthenticatedAccess
category = ExposedSensitiveInterfacesTechnique
Vulnerability.__init__(
self, KubernetesCluster, name=name, category=category, vid="KHV005",
self,
KubernetesCluster,
name=name,
category=category,
vid="KHV005",
)
self.evidence = evidence
@@ -40,48 +49,59 @@ class ServerApiHTTPAccess(Vulnerability, Event):
def __init__(self, evidence):
name = "Insecure (HTTP) access to API"
category = UnauthenticatedAccess
category = ExposedSensitiveInterfacesTechnique
Vulnerability.__init__(
self, KubernetesCluster, name=name, category=category, vid="KHV006",
self,
KubernetesCluster,
name=name,
category=category,
vid="KHV006",
)
self.evidence = evidence
class ApiInfoDisclosure(Vulnerability, Event):
"""Information Disclosure depending upon RBAC permissions and Kube-Cluster Setup"""
def __init__(self, evidence, using_token, name):
category = AccessK8sApiServerTechnique
if using_token:
name += " using service account token"
name += " using default service account token"
else:
name += " as anonymous user"
Vulnerability.__init__(
self, KubernetesCluster, name=name, category=InformationDisclosure, vid="KHV007",
self,
KubernetesCluster,
name=name,
category=category,
vid="KHV007",
)
self.evidence = evidence
class ListPodsAndNamespaces(ApiInfoDisclosure):
""" Accessing pods might give an attacker valuable information"""
"""Accessing pods might give an attacker valuable information"""
def __init__(self, evidence, using_token):
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing pods")
class ListNamespaces(ApiInfoDisclosure):
""" Accessing namespaces might give an attacker valuable information """
"""Accessing namespaces might give an attacker valuable information"""
def __init__(self, evidence, using_token):
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing namespaces")
class ListRoles(ApiInfoDisclosure):
""" Accessing roles might give an attacker valuable information """
"""Accessing roles might give an attacker valuable information"""
def __init__(self, evidence, using_token):
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing roles")
class ListClusterRoles(ApiInfoDisclosure):
""" Accessing cluster roles might give an attacker valuable information """
"""Accessing cluster roles might give an attacker valuable information"""
def __init__(self, evidence, using_token):
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing cluster roles")
@@ -89,129 +109,161 @@ class ListClusterRoles(ApiInfoDisclosure):
class CreateANamespace(Vulnerability, Event):
""" Creating a namespace might give an attacker an area with default (exploitable) permissions to run pods in.
"""
"""Creating a namespace might give an attacker an area with default (exploitable) permissions to run pods in."""
def __init__(self, evidence):
Vulnerability.__init__(
self, KubernetesCluster, name="Created a namespace", category=AccessRisk,
self,
KubernetesCluster,
name="Created a namespace",
category=GeneralDefenseEvasionTechnique,
)
self.evidence = evidence
class DeleteANamespace(Vulnerability, Event):
""" Deleting a namespace might give an attacker the option to affect application behavior """
"""Deleting a namespace might give an attacker the option to affect application behavior"""
def __init__(self, evidence):
Vulnerability.__init__(
self, KubernetesCluster, name="Delete a namespace", category=AccessRisk,
self,
KubernetesCluster,
name="Delete a namespace",
category=DataDestructionTechnique,
)
self.evidence = evidence
class CreateARole(Vulnerability, Event):
""" Creating a role might give an attacker the option to harm the normal behavior of newly created pods
within the specified namespaces.
"""Creating a role might give an attacker the option to harm the normal behavior of newly created pods
within the specified namespaces.
"""
def __init__(self, evidence):
Vulnerability.__init__(self, KubernetesCluster, name="Created a role", category=AccessRisk)
Vulnerability.__init__(self, KubernetesCluster, name="Created a role", category=GeneralDefenseEvasionTechnique)
self.evidence = evidence
class CreateAClusterRole(Vulnerability, Event):
""" Creating a cluster role might give an attacker the option to harm the normal behavior of newly created pods
across the whole cluster
"""Creating a cluster role might give an attacker the option to harm the normal behavior of newly created pods
across the whole cluster
"""
def __init__(self, evidence):
Vulnerability.__init__(
self, KubernetesCluster, name="Created a cluster role", category=AccessRisk,
self,
KubernetesCluster,
name="Created a cluster role",
category=ClusterAdminBindingTechnique,
)
self.evidence = evidence
class PatchARole(Vulnerability, Event):
""" Patching a role might give an attacker the option to create new pods with custom roles within the
"""Patching a role might give an attacker the option to create new pods with custom roles within the
specific role's namespace scope
"""
def __init__(self, evidence):
Vulnerability.__init__(
self, KubernetesCluster, name="Patched a role", category=AccessRisk,
self,
KubernetesCluster,
name="Patched a role",
category=ClusterAdminBindingTechnique,
)
self.evidence = evidence
class PatchAClusterRole(Vulnerability, Event):
""" Patching a cluster role might give an attacker the option to create new pods with custom roles within the whole
"""Patching a cluster role might give an attacker the option to create new pods with custom roles within the whole
cluster scope.
"""
def __init__(self, evidence):
Vulnerability.__init__(
self, KubernetesCluster, name="Patched a cluster role", category=AccessRisk,
self,
KubernetesCluster,
name="Patched a cluster role",
category=ClusterAdminBindingTechnique,
)
self.evidence = evidence
class DeleteARole(Vulnerability, Event):
""" Deleting a role might allow an attacker to affect access to resources in the namespace"""
"""Deleting a role might allow an attacker to affect access to resources in the namespace"""
def __init__(self, evidence):
Vulnerability.__init__(
self, KubernetesCluster, name="Deleted a role", category=AccessRisk,
self,
KubernetesCluster,
name="Deleted a role",
category=DataDestructionTechnique,
)
self.evidence = evidence
class DeleteAClusterRole(Vulnerability, Event):
""" Deleting a cluster role might allow an attacker to affect access to resources in the cluster"""
"""Deleting a cluster role might allow an attacker to affect access to resources in the cluster"""
def __init__(self, evidence):
Vulnerability.__init__(
self, KubernetesCluster, name="Deleted a cluster role", category=AccessRisk,
self,
KubernetesCluster,
name="Deleted a cluster role",
category=DataDestructionTechnique,
)
self.evidence = evidence
class CreateAPod(Vulnerability, Event):
""" Creating a new pod allows an attacker to run custom code"""
"""Creating a new pod allows an attacker to run custom code"""
def __init__(self, evidence):
Vulnerability.__init__(
self, KubernetesCluster, name="Created A Pod", category=AccessRisk,
self,
KubernetesCluster,
name="Created A Pod",
category=NewContainerTechnique,
)
self.evidence = evidence
class CreateAPrivilegedPod(Vulnerability, Event):
""" Creating a new PRIVILEGED pod would gain an attacker FULL CONTROL over the cluster"""
"""Creating a new PRIVILEGED pod would gain an attacker FULL CONTROL over the cluster"""
def __init__(self, evidence):
Vulnerability.__init__(
self, KubernetesCluster, name="Created A PRIVILEGED Pod", category=AccessRisk,
self,
KubernetesCluster,
name="Created A PRIVILEGED Pod",
category=PrivilegedContainerTechnique,
)
self.evidence = evidence
class PatchAPod(Vulnerability, Event):
""" Patching a pod allows an attacker to compromise and control it """
"""Patching a pod allows an attacker to compromise and control it"""
def __init__(self, evidence):
Vulnerability.__init__(
self, KubernetesCluster, name="Patched A Pod", category=AccessRisk,
self,
KubernetesCluster,
name="Patched A Pod",
category=SidecarInjectionTechnique,
)
self.evidence = evidence
class DeleteAPod(Vulnerability, Event):
""" Deleting a pod allows an attacker to disturb applications on the cluster """
"""Deleting a pod allows an attacker to disturb applications on the cluster"""
def __init__(self, evidence):
Vulnerability.__init__(
self, KubernetesCluster, name="Deleted A Pod", category=AccessRisk,
self,
KubernetesCluster,
name="Deleted A Pod",
category=DataDestructionTechnique,
)
self.evidence = evidence
@@ -225,7 +277,7 @@ class ApiServerPassiveHunterFinished(Event):
# If we have a service account token we'll also trigger AccessApiServerWithToken below
@handler.subscribe(ApiServer)
class AccessApiServer(Hunter):
""" API Server Hunter
"""API Server Hunter
Checks if API server is accessible
"""
@@ -268,7 +320,10 @@ class AccessApiServer(Hunter):
try:
if not namespace:
r = requests.get(
f"{self.path}/api/v1/pods", headers=self.headers, verify=False, timeout=config.network_timeout,
f"{self.path}/api/v1/pods",
headers=self.headers,
verify=False,
timeout=config.network_timeout,
)
else:
r = requests.get(
@@ -296,7 +351,7 @@ class AccessApiServer(Hunter):
else:
self.publish_event(ServerApiAccess(api, self.with_token))
namespaces = self.get_items("{path}/api/v1/namespaces".format(path=self.path))
namespaces = self.get_items(f"{self.path}/api/v1/namespaces")
if namespaces:
self.publish_event(ListNamespaces(namespaces, self.with_token))
@@ -319,15 +374,15 @@ class AccessApiServer(Hunter):
@handler.subscribe(ApiServer, predicate=lambda x: x.auth_token)
class AccessApiServerWithToken(AccessApiServer):
""" API Server Hunter
"""API Server Hunter
Accessing the API server using the service account token obtained from a compromised pod
"""
def __init__(self, event):
super(AccessApiServerWithToken, self).__init__(event)
super().__init__(event)
assert self.event.auth_token
self.headers = {"Authorization": f"Bearer {self.event.auth_token}"}
self.category = InformationDisclosure
self.category = AccessK8sApiServerTechnique
self.with_token = True
@@ -411,7 +466,8 @@ class AccessApiServerActive(ActiveHunter):
def patch_a_pod(self, namespace, pod_name):
data = [{"op": "add", "path": "/hello", "value": ["world"]}]
return self.patch_item(
path=f"{self.path}/api/v1/namespaces/{namespace}/pods/{pod_name}", data=json.dumps(data),
path=f"{self.path}/api/v1/namespaces/{namespace}/pods/{pod_name}",
data=json.dumps(data),
)
def create_namespace(self):
@@ -438,7 +494,8 @@ class AccessApiServerActive(ActiveHunter):
"rules": [{"apiGroups": [""], "resources": ["pods"], "verbs": ["get", "watch", "list"]}],
}
return self.create_item(
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles", data=json.dumps(role),
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles",
data=json.dumps(role),
)
def create_a_cluster_role(self):
@@ -450,7 +507,8 @@ class AccessApiServerActive(ActiveHunter):
"rules": [{"apiGroups": [""], "resources": ["pods"], "verbs": ["get", "watch", "list"]}],
}
return self.create_item(
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/clusterroles", data=json.dumps(cluster_role),
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/clusterroles",
data=json.dumps(cluster_role),
)
def delete_a_role(self, namespace, name):
@@ -477,7 +535,8 @@ class AccessApiServerActive(ActiveHunter):
def patch_a_cluster_role(self, cluster_role):
data = [{"op": "add", "path": "/hello", "value": ["world"]}]
return self.patch_item(
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/clusterroles/{cluster_role}", data=json.dumps(data),
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/clusterroles/{cluster_role}",
data=json.dumps(data),
)
def execute(self):

View File

@@ -1,65 +0,0 @@
import logging
from scapy.all import ARP, IP, ICMP, Ether, sr1, srp
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Event, Vulnerability
from kube_hunter.core.types import ActiveHunter, KubernetesCluster, IdentityTheft
from kube_hunter.modules.hunting.capabilities import CapNetRawEnabled
logger = logging.getLogger(__name__)
class PossibleArpSpoofing(Vulnerability, Event):
"""A malicious pod running on the cluster could potentially run an ARP Spoof attack
and perform a MITM between pods on the node."""
def __init__(self):
Vulnerability.__init__(
self, KubernetesCluster, "Possible Arp Spoof", category=IdentityTheft, vid="KHV020",
)
@handler.subscribe(CapNetRawEnabled)
class ArpSpoofHunter(ActiveHunter):
"""Arp Spoof Hunter
Checks for the possibility of running an ARP spoof
attack from within a pod (results are based on the running node)
"""
def __init__(self, event):
self.event = event
def try_getting_mac(self, ip):
config = get_config()
ans = sr1(ARP(op=1, pdst=ip), timeout=config.network_timeout, verbose=0)
return ans[ARP].hwsrc if ans else None
def detect_l3_on_host(self, arp_responses):
""" returns True for an existence of an L3 network plugin """
logger.debug("Attempting to detect L3 network plugin using ARP")
unique_macs = list(set(response[ARP].hwsrc for _, response in arp_responses))
# if LAN addresses not unique
if len(unique_macs) == 1:
# if an ip outside the subnets gets a mac address
outside_mac = self.try_getting_mac("1.1.1.1")
# outside mac is the same as lan macs
if outside_mac == unique_macs[0]:
return True
# only one mac address for whole LAN and outside
return False
def execute(self):
config = get_config()
self_ip = sr1(IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout)[IP].dst
arp_responses, _ = srp(
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=f"{self_ip}/24"), timeout=config.network_timeout, verbose=0,
)
# arp enabled on cluster and more than one pod on node
if len(arp_responses) > 1:
# L3 plugin not installed
if not self.detect_l3_on_host(arp_responses):
self.publish_event(PossibleArpSpoofing())

View File

@@ -2,9 +2,9 @@ import socket
import logging
from kube_hunter.modules.discovery.hosts import RunningAsPodEvent
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Event, Vulnerability
from kube_hunter.core.types import Hunter, AccessRisk, KubernetesCluster
from kube_hunter.core.types import Hunter, ARPPoisoningTechnique, KubernetesCluster
logger = logging.getLogger(__name__)
@@ -17,7 +17,10 @@ class CapNetRawEnabled(Event, Vulnerability):
def __init__(self):
Vulnerability.__init__(
self, KubernetesCluster, name="CAP_NET_RAW Enabled", category=AccessRisk,
self,
KubernetesCluster,
name="CAP_NET_RAW Enabled",
category=ARPPoisoningTechnique,
)

View File

@@ -3,23 +3,29 @@ import logging
import base64
import re
from kube_hunter.core.types import Hunter, KubernetesCluster, InformationDisclosure
from kube_hunter.core.events import handler
from kube_hunter.core.types import Hunter, KubernetesCluster, GeneralSensitiveInformationTechnique
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Vulnerability, Event, Service
logger = logging.getLogger(__name__)
email_pattern = re.compile(rb"([a-z0-9]+@[a-z0-9]+\.[a-z0-9]+)")
email_pattern = re.compile(rb"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)")
class CertificateEmail(Vulnerability, Event):
"""Certificate includes an email address"""
"""The Kubernetes API Server advertises a public certificate for TLS.
This certificate includes an email address, that may provide additional information for an attacker on your
organization, or be abused for further email based attacks."""
def __init__(self, email):
Vulnerability.__init__(
self, KubernetesCluster, "Certificate Includes Email Address", category=InformationDisclosure, vid="KHV021",
self,
KubernetesCluster,
"Certificate Includes Email Address",
category=GeneralSensitiveInformationTechnique,
vid="KHV021",
)
self.email = email
self.evidence = "email: {}".format(self.email)
self.evidence = f"email: {self.email}"
@handler.subscribe(Service)
@@ -42,7 +48,7 @@ class CertificateDiscovery(Hunter):
self.examine_certificate(cert)
def examine_certificate(self, cert):
c = cert.strip(ssl.PEM_HEADER).strip(ssl.PEM_FOOTER)
c = cert.strip(ssl.PEM_HEADER).strip("\n").strip(ssl.PEM_FOOTER).strip("\n")
certdata = base64.b64decode(c)
emails = re.findall(email_pattern, certdata)
for email in emails:

View File

@@ -2,19 +2,21 @@ import logging
from packaging import version
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import K8sVersionDisclosure, Vulnerability, Event
from kube_hunter.core.types import (
Hunter,
KubernetesCluster,
RemoteCodeExec,
PrivilegeEscalation,
DenialOfService,
KubectlClient,
KubernetesCluster,
CVERemoteCodeExecutionCategory,
CVEPrivilegeEscalationCategory,
CVEDenialOfServiceTechnique,
)
from kube_hunter.modules.discovery.kubectl import KubectlClientEvent
logger = logging.getLogger(__name__)
config = get_config()
class ServerApiVersionEndPointAccessPE(Vulnerability, Event):
@@ -25,7 +27,7 @@ class ServerApiVersionEndPointAccessPE(Vulnerability, Event):
self,
KubernetesCluster,
name="Critical Privilege Escalation CVE",
category=PrivilegeEscalation,
category=CVEPrivilegeEscalationCategory,
vid="KHV022",
)
self.evidence = evidence
@@ -33,14 +35,14 @@ class ServerApiVersionEndPointAccessPE(Vulnerability, Event):
class ServerApiVersionEndPointAccessDos(Vulnerability, Event):
"""Node not patched for CVE-2019-1002100. Depending on your RBAC settings,
a crafted json-patch could cause a Denial of Service."""
a crafted json-patch could cause a Denial of Service."""
def __init__(self, evidence):
Vulnerability.__init__(
self,
KubernetesCluster,
name="Denial of Service to Kubernetes API Server",
category=DenialOfService,
category=CVEDenialOfServiceTechnique,
vid="KHV023",
)
self.evidence = evidence
@@ -52,7 +54,11 @@ class PingFloodHttp2Implementation(Vulnerability, Event):
def __init__(self, evidence):
Vulnerability.__init__(
self, KubernetesCluster, name="Possible Ping Flood Attack", category=DenialOfService, vid="KHV024",
self,
KubernetesCluster,
name="Possible Ping Flood Attack",
category=CVEDenialOfServiceTechnique,
vid="KHV024",
)
self.evidence = evidence
@@ -63,7 +69,11 @@ class ResetFloodHttp2Implementation(Vulnerability, Event):
def __init__(self, evidence):
Vulnerability.__init__(
self, KubernetesCluster, name="Possible Reset Flood Attack", category=DenialOfService, vid="KHV025",
self,
KubernetesCluster,
name="Possible Reset Flood Attack",
category=CVEDenialOfServiceTechnique,
vid="KHV025",
)
self.evidence = evidence
@@ -77,7 +87,7 @@ class ServerApiClusterScopedResourcesAccess(Vulnerability, Event):
self,
KubernetesCluster,
name="Arbitrary Access To Cluster Scoped Resources",
category=PrivilegeEscalation,
category=CVEPrivilegeEscalationCategory,
vid="KHV026",
)
self.evidence = evidence
@@ -89,10 +99,14 @@ class IncompleteFixToKubectlCpVulnerability(Vulnerability, Event):
def __init__(self, binary_version):
Vulnerability.__init__(
self, KubectlClient, "Kubectl Vulnerable To CVE-2019-11246", category=RemoteCodeExec, vid="KHV027",
self,
KubectlClient,
"Kubectl Vulnerable To CVE-2019-11246",
category=CVERemoteCodeExecutionCategory,
vid="KHV027",
)
self.binary_version = binary_version
self.evidence = "kubectl version: {}".format(self.binary_version)
self.evidence = f"kubectl version: {self.binary_version}"
class KubectlCpVulnerability(Vulnerability, Event):
@@ -101,10 +115,14 @@ class KubectlCpVulnerability(Vulnerability, Event):
def __init__(self, binary_version):
Vulnerability.__init__(
self, KubectlClient, "Kubectl Vulnerable To CVE-2019-1002101", category=RemoteCodeExec, vid="KHV028",
self,
KubectlClient,
"Kubectl Vulnerable To CVE-2019-1002101",
category=CVERemoteCodeExecutionCategory,
vid="KHV028",
)
self.binary_version = binary_version
self.evidence = "kubectl version: {}".format(self.binary_version)
self.evidence = f"kubectl version: {self.binary_version}"
class CveUtils:
@@ -183,7 +201,7 @@ class CveUtils:
return vulnerable
@handler.subscribe_once(K8sVersionDisclosure)
@handler.subscribe_once(K8sVersionDisclosure, is_register=config.enable_cve_hunting)
class K8sClusterCveHunter(Hunter):
"""K8s CVE Hunter
Checks if Node is running a Kubernetes version vulnerable to
@@ -208,6 +226,7 @@ class K8sClusterCveHunter(Hunter):
self.publish_event(vulnerability(self.event.version))
# Removed due to incomplete implementation for multiple vendors revisions of kubernetes
@handler.subscribe(KubectlClientEvent)
class KubectlCVEHunter(Hunter):
"""Kubectl CVE Hunter

View File

@@ -3,8 +3,8 @@ import json
import requests
from kube_hunter.conf import get_config
from kube_hunter.core.types import Hunter, RemoteCodeExec, KubernetesCluster
from kube_hunter.core.events import handler
from kube_hunter.core.types import Hunter, AccessK8sDashboardTechnique, KubernetesCluster
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Vulnerability, Event
from kube_hunter.modules.discovery.dashboard import KubeDashboardEvent
@@ -16,7 +16,11 @@ class DashboardExposed(Vulnerability, Event):
def __init__(self, nodes):
Vulnerability.__init__(
self, KubernetesCluster, "Dashboard Exposed", category=RemoteCodeExec, vid="KHV029",
self,
KubernetesCluster,
"Dashboard Exposed",
category=AccessK8sDashboardTechnique,
vid="KHV029",
)
self.evidence = "nodes: {}".format(" ".join(nodes)) if nodes else None

View File

@@ -1,84 +0,0 @@
import re
import logging
from scapy.all import IP, ICMP, UDP, DNS, DNSQR, ARP, Ether, sr1, srp1, srp
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Event, Vulnerability
from kube_hunter.core.types import ActiveHunter, KubernetesCluster, IdentityTheft
from kube_hunter.modules.hunting.arp import PossibleArpSpoofing
logger = logging.getLogger(__name__)
class PossibleDnsSpoofing(Vulnerability, Event):
"""A malicious pod running on the cluster could potentially run a DNS Spoof attack
and perform a MITM attack on applications running in the cluster."""
def __init__(self, kubedns_pod_ip):
Vulnerability.__init__(
self, KubernetesCluster, "Possible DNS Spoof", category=IdentityTheft, vid="KHV030",
)
self.kubedns_pod_ip = kubedns_pod_ip
self.evidence = "kube-dns at: {}".format(self.kubedns_pod_ip)
# Only triggered with RunningAsPod base event
@handler.subscribe(PossibleArpSpoofing)
class DnsSpoofHunter(ActiveHunter):
"""DNS Spoof Hunter
Checks for the possibility for a malicious pod to compromise DNS requests of the cluster
(results are based on the running node)
"""
def __init__(self, event):
self.event = event
def get_cbr0_ip_mac(self):
config = get_config()
res = srp1(Ether() / IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout)
return res[IP].src, res.src
def extract_nameserver_ip(self):
with open("/etc/resolv.conf") as f:
# finds first nameserver in /etc/resolv.conf
match = re.search(r"nameserver (\d+.\d+.\d+.\d+)", f.read())
if match:
return match.group(1)
def get_kube_dns_ip_mac(self):
config = get_config()
kubedns_svc_ip = self.extract_nameserver_ip()
# getting actual pod ip of kube-dns service, by comparing the src mac of a dns response and arp scanning.
dns_info_res = srp1(
Ether() / IP(dst=kubedns_svc_ip) / UDP(dport=53) / DNS(rd=1, qd=DNSQR()),
verbose=0,
timeout=config.network_timeout,
)
kubedns_pod_mac = dns_info_res.src
self_ip = dns_info_res[IP].dst
arp_responses, _ = srp(
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=f"{self_ip}/24"), timeout=config.network_timeout, verbose=0,
)
for _, response in arp_responses:
if response[Ether].src == kubedns_pod_mac:
return response[ARP].psrc, response.src
def execute(self):
config = get_config()
logger.debug("Attempting to get kube-dns pod ip")
self_ip = sr1(IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.netork_timeout)[IP].dst
cbr0_ip, cbr0_mac = self.get_cbr0_ip_mac()
kubedns = self.get_kube_dns_ip_mac()
if kubedns:
kubedns_ip, kubedns_mac = kubedns
logger.debug(f"ip={self_ip} kubednsip={kubedns_ip} cbr0ip={cbr0_ip}")
if kubedns_mac != cbr0_mac:
# if self pod in the same subnet as kube-dns pod
self.publish_event(PossibleDnsSpoofing(kubedns_pod_ip=kubedns_ip))
else:
logger.debug("Could not get kubedns identity")

View File

@@ -2,16 +2,16 @@ import logging
import requests
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Vulnerability, Event, OpenPortEvent
from kube_hunter.core.types import (
ActiveHunter,
Hunter,
KubernetesCluster,
InformationDisclosure,
RemoteCodeExec,
UnauthenticatedAccess,
AccessRisk,
GeneralSensitiveInformationTechnique,
GeneralPersistenceTechnique,
ListK8sSecretsTechnique,
ExposedSensitiveInterfacesTechnique,
)
logger = logging.getLogger(__name__)
@@ -26,7 +26,11 @@ class EtcdRemoteWriteAccessEvent(Vulnerability, Event):
def __init__(self, write_res):
Vulnerability.__init__(
self, KubernetesCluster, name="Etcd Remote Write Access Event", category=RemoteCodeExec, vid="KHV031",
self,
KubernetesCluster,
name="Etcd Remote Write Access Event",
category=GeneralPersistenceTechnique,
vid="KHV031",
)
self.evidence = write_res
@@ -36,7 +40,11 @@ class EtcdRemoteReadAccessEvent(Vulnerability, Event):
def __init__(self, keys):
Vulnerability.__init__(
self, KubernetesCluster, name="Etcd Remote Read Access Event", category=AccessRisk, vid="KHV032",
self,
KubernetesCluster,
name="Etcd Remote Read Access Event",
category=ListK8sSecretsTechnique,
vid="KHV032",
)
self.evidence = keys
@@ -50,7 +58,7 @@ class EtcdRemoteVersionDisclosureEvent(Vulnerability, Event):
self,
KubernetesCluster,
name="Etcd Remote version disclosure",
category=InformationDisclosure,
category=GeneralSensitiveInformationTechnique,
vid="KHV033",
)
self.evidence = version
@@ -66,7 +74,7 @@ class EtcdAccessEnabledWithoutAuthEvent(Vulnerability, Event):
self,
KubernetesCluster,
name="Etcd is accessible using insecure connection (HTTP)",
category=UnauthenticatedAccess,
category=ExposedSensitiveInterfacesTechnique,
vid="KHV034",
)
self.evidence = version
@@ -135,7 +143,7 @@ class EtcdRemoteAccess(Hunter):
logger.debug(f"Trying to check etcd version remotely at {self.event.host}")
try:
r = requests.get(
f"{self.protocol}://{self.event.host}:{ETCD_PORT}/version",
f"{self.event.protocol}://{self.event.host}:{ETCD_PORT}/version",
verify=False,
timeout=config.network_timeout,
)
@@ -149,7 +157,9 @@ class EtcdRemoteAccess(Hunter):
logger.debug(f"Trying to access etcd insecurely at {self.event.host}")
try:
r = requests.get(
f"http://{self.event.host}:{ETCD_PORT}/version", verify=False, timeout=config.network_timeout,
f"http://{self.event.host}:{ETCD_PORT}/version",
verify=False,
timeout=config.network_timeout,
)
return r.content if r.status_code == 200 and r.content else False
except requests.exceptions.ConnectionError:
@@ -157,10 +167,10 @@ class EtcdRemoteAccess(Hunter):
def execute(self):
if self.insecure_access(): # make a decision between http and https protocol
self.protocol = "http"
self.event.protocol = "http"
if self.version_disclosure():
self.publish_event(EtcdRemoteVersionDisclosureEvent(self.version_evidence))
if self.protocol == "http":
if self.event.protocol == "http":
self.publish_event(EtcdAccessEnabledWithoutAuthEvent(self.version_evidence))
if self.db_keys_disclosure():
self.publish_event(EtcdRemoteReadAccessEvent(self.keys_evidence))

View File

@@ -9,16 +9,19 @@ import urllib3
import uuid
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
from kube_hunter.core.types import (
Hunter,
ActiveHunter,
KubernetesCluster,
Kubelet,
InformationDisclosure,
RemoteCodeExec,
AccessRisk,
ExposedSensitiveInterfacesTechnique,
ExecIntoContainerTechnique,
GeneralDefenseEvasionTechnique,
GeneralSensitiveInformationTechnique,
PrivilegedContainerTechnique,
AccessKubeletAPITechnique,
)
from kube_hunter.modules.discovery.kubelet import (
ReadOnlyKubeletEvent,
@@ -35,7 +38,7 @@ class ExposedPodsHandler(Vulnerability, Event):
def __init__(self, pods):
Vulnerability.__init__(
self, component=Kubelet, name="Exposed Pods", category=InformationDisclosure,
self, component=Kubelet, name="Exposed Pods", category=AccessKubeletAPITechnique, vid="KHV052"
)
self.pods = pods
self.evidence = f"count: {len(self.pods)}"
@@ -47,7 +50,11 @@ class AnonymousAuthEnabled(Vulnerability, Event):
def __init__(self):
Vulnerability.__init__(
self, component=Kubelet, name="Anonymous Authentication", category=RemoteCodeExec, vid="KHV036",
self,
component=Kubelet,
name="Anonymous Authentication",
category=ExposedSensitiveInterfacesTechnique,
vid="KHV036",
)
@@ -56,7 +63,11 @@ class ExposedContainerLogsHandler(Vulnerability, Event):
def __init__(self):
Vulnerability.__init__(
self, component=Kubelet, name="Exposed Container Logs", category=InformationDisclosure, vid="KHV037",
self,
component=Kubelet,
name="Exposed Container Logs",
category=AccessKubeletAPITechnique,
vid="KHV037",
)
@@ -66,10 +77,14 @@ class ExposedRunningPodsHandler(Vulnerability, Event):
def __init__(self, count):
Vulnerability.__init__(
self, component=Kubelet, name="Exposed Running Pods", category=InformationDisclosure, vid="KHV038",
self,
component=Kubelet,
name="Exposed Running Pods",
category=AccessKubeletAPITechnique,
vid="KHV038",
)
self.count = count
self.evidence = "{} running pods".format(self.count)
self.evidence = f"{self.count} running pods"
class ExposedExecHandler(Vulnerability, Event):
@@ -77,7 +92,11 @@ class ExposedExecHandler(Vulnerability, Event):
def __init__(self):
Vulnerability.__init__(
self, component=Kubelet, name="Exposed Exec On Container", category=RemoteCodeExec, vid="KHV039",
self,
component=Kubelet,
name="Exposed Exec On Container",
category=ExecIntoContainerTechnique,
vid="KHV039",
)
@@ -86,7 +105,11 @@ class ExposedRunHandler(Vulnerability, Event):
def __init__(self):
Vulnerability.__init__(
self, component=Kubelet, name="Exposed Run Inside Container", category=RemoteCodeExec, vid="KHV040",
self,
component=Kubelet,
name="Exposed Run Inside Container",
category=ExecIntoContainerTechnique,
vid="KHV040",
)
@@ -95,7 +118,11 @@ class ExposedPortForwardHandler(Vulnerability, Event):
def __init__(self):
Vulnerability.__init__(
self, component=Kubelet, name="Exposed Port Forward", category=RemoteCodeExec, vid="KHV041",
self,
component=Kubelet,
name="Exposed Port Forward",
category=GeneralDefenseEvasionTechnique,
vid="KHV041",
)
@@ -105,7 +132,11 @@ class ExposedAttachHandler(Vulnerability, Event):
def __init__(self):
Vulnerability.__init__(
self, component=Kubelet, name="Exposed Attaching To Container", category=RemoteCodeExec, vid="KHV042",
self,
component=Kubelet,
name="Exposed Attaching To Container",
category=ExecIntoContainerTechnique,
vid="KHV042",
)
@@ -115,7 +146,11 @@ class ExposedHealthzHandler(Vulnerability, Event):
def __init__(self, status):
Vulnerability.__init__(
self, component=Kubelet, name="Cluster Health Disclosure", category=InformationDisclosure, vid="KHV043",
self,
component=Kubelet,
name="Cluster Health Disclosure",
category=GeneralSensitiveInformationTechnique,
vid="KHV043",
)
self.status = status
self.evidence = f"status: {self.status}"
@@ -131,7 +166,7 @@ the whole cluster"""
self,
component=KubernetesCluster,
name="Exposed Existing Privileged Container(s) Via Secure Kubelet Port",
category=AccessRisk,
category=PrivilegedContainerTechnique,
vid="KHV051",
)
self.exposed_existing_privileged_containers = exposed_existing_privileged_containers
@@ -143,7 +178,11 @@ class PrivilegedContainers(Vulnerability, Event):
def __init__(self, containers):
Vulnerability.__init__(
self, component=KubernetesCluster, name="Privileged Container", category=AccessRisk, vid="KHV044",
self,
component=KubernetesCluster,
name="Privileged Container",
category=PrivilegedContainerTechnique,
vid="KHV044",
)
self.containers = containers
self.evidence = f"pod: {containers[0][0]}, " f"container: {containers[0][1]}, " f"count: {len(containers)}"
@@ -154,7 +193,11 @@ class ExposedSystemLogs(Vulnerability, Event):
def __init__(self):
Vulnerability.__init__(
self, component=Kubelet, name="Exposed System Logs", category=InformationDisclosure, vid="KHV045",
self,
component=Kubelet,
name="Exposed System Logs",
category=AccessKubeletAPITechnique,
vid="KHV045",
)
@@ -163,7 +206,11 @@ class ExposedKubeletCmdline(Vulnerability, Event):
def __init__(self, cmdline):
Vulnerability.__init__(
self, component=Kubelet, name="Exposed Kubelet Cmdline", category=InformationDisclosure, vid="KHV046",
self,
component=Kubelet,
name="Exposed Kubelet Cmdline",
category=AccessKubeletAPITechnique,
vid="KHV046",
)
self.cmdline = cmdline
self.evidence = f"cmdline: {self.cmdline}"
@@ -259,7 +306,7 @@ class SecureKubeletPortHunter(Hunter):
"""
class DebugHandlers:
""" all methods will return the handler name if successful """
"""all methods will return the handler name if successful"""
def __init__(self, path, pod, session=None):
self.path = path + ("/" if not path.endswith("/") else "")
@@ -270,7 +317,9 @@ class SecureKubeletPortHunter(Hunter):
def test_container_logs(self):
config = get_config()
logs_url = self.path + KubeletHandlers.CONTAINERLOGS.value.format(
pod_namespace=self.pod["namespace"], pod_id=self.pod["name"], container_name=self.pod["container"],
pod_namespace=self.pod["namespace"],
pod_id=self.pod["name"],
container_name=self.pod["container"],
)
return self.session.get(logs_url, verify=False, timeout=config.network_timeout).status_code == 200
@@ -288,36 +337,46 @@ class SecureKubeletPortHunter(Hunter):
return (
"/cri/exec/"
in self.session.get(
exec_url, headers=headers, allow_redirects=False, verify=False, timeout=config.network_timeout,
exec_url,
headers=headers,
allow_redirects=False,
verify=False,
timeout=config.network_timeout,
).text
)
# need further investigation on websockets protocol for further implementation
def test_port_forward(self):
config = get_config()
headers = {
"Upgrade": "websocket",
"Connection": "Upgrade",
"Sec-Websocket-Key": "s",
"Sec-Websocket-Version": "13",
"Sec-Websocket-Protocol": "SPDY",
}
pf_url = self.path + KubeletHandlers.PORTFORWARD.value.format(
pod_namespace=self.pod["namespace"], pod_id=self.pod["name"], port=80,
)
self.session.get(
pf_url, headers=headers, verify=False, stream=True, timeout=config.network_timeout,
).status_code == 200
pass
# TODO: what to return?
# Example starting code:
#
# config = get_config()
# headers = {
# "Upgrade": "websocket",
# "Connection": "Upgrade",
# "Sec-Websocket-Key": "s",
# "Sec-Websocket-Version": "13",
# "Sec-Websocket-Protocol": "SPDY",
# }
# pf_url = self.path + KubeletHandlers.PORTFORWARD.value.format(
# pod_namespace=self.pod["namespace"],
# pod_id=self.pod["name"],
# port=80,
# )
# executes one command and returns output
def test_run_container(self):
config = get_config()
run_url = self.path + KubeletHandlers.RUN.value.format(
pod_namespace="test", pod_id="test", container_name="test", cmd="",
pod_namespace="test",
pod_id="test",
container_name="test",
cmd="",
)
# if we get a Method Not Allowed, we know we passed Authentication and Authorization.
return self.session.get(run_url, verify=False, timeout=config.network_timeout).status_code == 405
# if we get this message, we know we passed Authentication and Authorization, and that the endpoint is enabled.
status_code = self.session.post(run_url, verify=False, timeout=config.network_timeout).status_code
return status_code == requests.codes.NOT_FOUND
# returns list of currently running pods
def test_running_pods(self):
@@ -339,7 +398,10 @@ class SecureKubeletPortHunter(Hunter):
return (
"/cri/attach/"
in self.session.get(
attach_url, allow_redirects=False, verify=False, timeout=config.network_timeout,
attach_url,
allow_redirects=False,
verify=False,
timeout=config.network_timeout,
).text
)
@@ -347,7 +409,8 @@ class SecureKubeletPortHunter(Hunter):
def test_logs_endpoint(self):
config = get_config()
logs_url = self.session.get(
self.path + KubeletHandlers.LOGS.value.format(path=""), timeout=config.network_timeout,
self.path + KubeletHandlers.LOGS.value.format(path=""),
timeout=config.network_timeout,
).text
return "<pre>" in logs_url
@@ -355,7 +418,9 @@ class SecureKubeletPortHunter(Hunter):
def test_pprof_cmdline(self):
config = get_config()
cmd = self.session.get(
self.path + KubeletHandlers.PPROF_CMDLINE.value, verify=False, timeout=config.network_timeout,
self.path + KubeletHandlers.PPROF_CMDLINE.value,
verify=False,
timeout=config.network_timeout,
)
return cmd.text if cmd.status_code == 200 else None
@@ -464,7 +529,7 @@ class ProveAnonymousAuth(ActiveHunter):
def __init__(self, event):
self.event = event
self.base_url = "https://{host}:10250/".format(host=self.event.host)
self.base_url = f"https://{self.event.host}:10250/"
def get_request(self, url, verify=False):
config = get_config()
@@ -503,7 +568,7 @@ class ProveAnonymousAuth(ActiveHunter):
return ProveAnonymousAuth.has_no_error(result) and ProveAnonymousAuth.has_no_exception(result)
def cat_command(self, run_request_url, full_file_path):
return self.post_request(run_request_url, {"cmd": "cat {}".format(full_file_path)})
return self.post_request(run_request_url, {"cmd": f"cat {full_file_path}"})
def process_container(self, run_request_url):
service_account_token = self.cat_command(run_request_url, "/var/run/secrets/kubernetes.io/serviceaccount/token")
@@ -540,7 +605,7 @@ class ProveAnonymousAuth(ActiveHunter):
for container_data in pod_data["spec"]["containers"]:
container_name = container_data["name"]
run_request_url = self.base_url + "run/{}/{}/{}".format(pod_namespace, pod_id, container_name)
run_request_url = self.base_url + f"run/{pod_namespace}/{pod_id}/{container_name}"
extracted_data = self.process_container(run_request_url)
@@ -549,11 +614,11 @@ class ProveAnonymousAuth(ActiveHunter):
environment_variables = extracted_data["environment_variables"]
temp_message += (
"\n\nPod namespace: {}".format(pod_namespace)
+ "\n\nPod ID: {}".format(pod_id)
+ "\n\nContainer name: {}".format(container_name)
+ "\n\nService account token: {}".format(service_account_token)
+ "\nEnvironment variables: {}".format(environment_variables)
f"\n\nPod namespace: {pod_namespace}"
+ f"\n\nPod ID: {pod_id}"
+ f"\n\nContainer name: {container_name}"
+ f"\n\nService account token: {service_account_token}"
+ f"\nEnvironment variables: {environment_variables}"
)
first_check = container_data.get("securityContext", {}).get("privileged")
@@ -578,7 +643,7 @@ class ProveAnonymousAuth(ActiveHunter):
if temp_message:
message = "The following containers have been successfully breached." + temp_message
self.event.evidence = "{}".format(message)
self.event.evidence = f"{message}"
if exposed_existing_privileged_containers:
self.publish_event(
@@ -598,7 +663,7 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
def __init__(self, event, seconds_to_wait_for_os_command=1):
self.event = event
self.base_url = "https://{host}:10250/".format(host=self.event.host)
self.base_url = f"https://{self.event.host}:10250/"
self.seconds_to_wait_for_os_command = seconds_to_wait_for_os_command
self.number_of_rm_attempts = 5
self.number_of_rmdir_attempts = 5
@@ -617,7 +682,7 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
return "Exception: " + str(ex)
def cat_command(self, run_request_url, full_file_path):
return self.post_request(run_request_url, {"cmd": "cat {}".format(full_file_path)})
return self.post_request(run_request_url, {"cmd": f"cat {full_file_path}"})
def clean_attacked_exposed_existing_privileged_container(
self,
@@ -633,7 +698,7 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
self.rm_command(
run_request_url,
"{}/etc/cron.daily/{}".format(directory_created, file_created),
f"{directory_created}/etc/cron.daily/{file_created}",
number_of_rm_attempts,
seconds_to_wait_for_os_command,
)
@@ -647,7 +712,10 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
)
self.rmdir_command(
run_request_url, directory_created, number_of_rmdir_attempts, seconds_to_wait_for_os_command,
run_request_url,
directory_created,
number_of_rmdir_attempts,
seconds_to_wait_for_os_command,
)
def check_file_exists(self, run_request_url, file):
@@ -658,9 +726,7 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
def rm_command(self, run_request_url, file_to_remove, number_of_rm_attempts, seconds_to_wait_for_os_command):
if self.check_file_exists(run_request_url, file_to_remove):
for _ in range(number_of_rm_attempts):
command_execution_outcome = self.post_request(
run_request_url, {"cmd": "rm -f {}".format(file_to_remove)}
)
command_execution_outcome = self.post_request(run_request_url, {"cmd": f"rm -f {file_to_remove}"})
if seconds_to_wait_for_os_command:
time.sleep(seconds_to_wait_for_os_command)
@@ -687,10 +753,10 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
return False
def chmod_command(self, run_request_url, permissions, file):
return self.post_request(run_request_url, {"cmd": "chmod {} {}".format(permissions, file)})
return self.post_request(run_request_url, {"cmd": f"chmod {permissions} {file}"})
def touch_command(self, run_request_url, file_to_create):
return self.post_request(run_request_url, {"cmd": "touch {}".format(file_to_create)})
return self.post_request(run_request_url, {"cmd": f"touch {file_to_create}"})
def attack_exposed_existing_privileged_container(
self, run_request_url, directory_created, number_of_rm_attempts, seconds_to_wait_for_os_command, file_name=None
@@ -698,7 +764,7 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
if file_name is None:
file_name = "kube-hunter" + str(uuid.uuid1())
file_name_with_path = "{}/etc/cron.daily/{}".format(directory_created, file_name)
file_name_with_path = f"{directory_created}/etc/cron.daily/{file_name}"
file_created = self.touch_command(run_request_url, file_name_with_path)
@@ -718,13 +784,15 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
return ProveAnonymousAuth.has_no_error_nor_exception(directory_exists)
def rmdir_command(
self, run_request_url, directory_to_remove, number_of_rmdir_attempts, seconds_to_wait_for_os_command,
self,
run_request_url,
directory_to_remove,
number_of_rmdir_attempts,
seconds_to_wait_for_os_command,
):
if self.check_directory_exists(run_request_url, directory_to_remove):
for _ in range(number_of_rmdir_attempts):
command_execution_outcome = self.post_request(
run_request_url, {"cmd": "rmdir {}".format(directory_to_remove)}
)
command_execution_outcome = self.post_request(run_request_url, {"cmd": f"rmdir {directory_to_remove}"})
if seconds_to_wait_for_os_command:
time.sleep(seconds_to_wait_for_os_command)
@@ -751,7 +819,7 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
return False
def ls_command(self, run_request_url, file_or_directory):
return self.post_request(run_request_url, {"cmd": "ls {}".format(file_or_directory)})
return self.post_request(run_request_url, {"cmd": f"ls {file_or_directory}"})
def umount_command(
self,
@@ -769,7 +837,7 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
for _ in range(number_of_umount_attempts):
# Ref: http://man7.org/linux/man-pages/man2/umount.2.html
command_execution_outcome = self.post_request(
run_request_url, {"cmd": "umount {} {}".format(file_system_or_partition, directory)}
run_request_url, {"cmd": f"umount {file_system_or_partition} {directory}"}
)
if seconds_to_wait_for_os_command:
@@ -800,16 +868,16 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
def mount_command(self, run_request_url, file_system_or_partition, directory):
# Ref: http://man7.org/linux/man-pages/man1/mkdir.1.html
return self.post_request(run_request_url, {"cmd": "mount {} {}".format(file_system_or_partition, directory)})
return self.post_request(run_request_url, {"cmd": f"mount {file_system_or_partition} {directory}"})
def mkdir_command(self, run_request_url, directory_to_create):
# Ref: http://man7.org/linux/man-pages/man1/mkdir.1.html
return self.post_request(run_request_url, {"cmd": "mkdir {}".format(directory_to_create)})
return self.post_request(run_request_url, {"cmd": f"mkdir {directory_to_create}"})
def findfs_command(self, run_request_url, file_system_or_partition_type, file_system_or_partition):
# Ref: http://man7.org/linux/man-pages/man8/findfs.8.html
return self.post_request(
run_request_url, {"cmd": "findfs {}{}".format(file_system_or_partition_type, file_system_or_partition)}
run_request_url, {"cmd": f"findfs {file_system_or_partition_type}{file_system_or_partition}"}
)
def get_root_values(self, command_line):
@@ -868,9 +936,7 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
)
if ProveAnonymousAuth.has_no_error_nor_exception(mounted_file_system_or_partition):
host_name = self.cat_command(
run_request_url, "{}/etc/hostname".format(directory_created)
)
host_name = self.cat_command(run_request_url, f"{directory_created}/etc/hostname")
if ProveAnonymousAuth.has_no_error_nor_exception(host_name):
return {
@@ -904,7 +970,7 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
pod_id = exposed_existing_privileged_containers["pod_id"]
container_name = exposed_existing_privileged_containers["container_name"]
run_request_url = self.base_url + "run/{}/{}/{}".format(pod_namespace, pod_id, container_name)
run_request_url = self.base_url + f"run/{pod_namespace}/{pod_id}/{container_name}"
is_exposed_existing_privileged_container_privileged = self.process_exposed_existing_privileged_container(
run_request_url,
@@ -954,7 +1020,7 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
+ temp_message
)
self.event.evidence = "{}".format(message)
self.event.evidence = f"{message}"
else:
message = (
"The following exposed existing privileged containers"
@@ -963,7 +1029,7 @@ class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
+ temp_message
)
self.event.evidence = "{}".format(message)
self.event.evidence = f"{message}"
@handler.subscribe(ExposedRunHandler)
@@ -985,13 +1051,17 @@ class ProveRunHandler(ActiveHunter):
cmd=command,
)
return self.event.session.post(
f"{self.base_path}/{run_url}", verify=False, timeout=config.network_timeout,
f"{self.base_path}/{run_url}",
verify=False,
timeout=config.network_timeout,
).text
def execute(self):
config = get_config()
r = self.event.session.get(
f"{self.base_path}/" + KubeletHandlers.PODS.value, verify=False, timeout=config.network_timeout,
f"{self.base_path}/" + KubeletHandlers.PODS.value,
verify=False,
timeout=config.network_timeout,
)
if "items" in r.text:
pods_data = r.json()["items"]
@@ -1025,7 +1095,9 @@ class ProveContainerLogsHandler(ActiveHunter):
def execute(self):
config = get_config()
pods_raw = self.event.session.get(
self.base_url + KubeletHandlers.PODS.value, verify=False, timeout=config.network_timeout,
self.base_url + KubeletHandlers.PODS.value,
verify=False,
timeout=config.network_timeout,
).text
if "items" in pods_raw:
pods_data = json.loads(pods_raw)["items"]
@@ -1064,11 +1136,16 @@ class ProveSystemLogs(ActiveHunter):
f"{self.base_url}/" + KubeletHandlers.LOGS.value.format(path="audit/audit.log"),
verify=False,
timeout=config.network_timeout,
).text
logger.debug(f"Audit log of host {self.event.host}: {audit_logs[:10]}")
# iterating over proctitles and converting them into readable strings
proctitles = []
for proctitle in re.findall(r"proctitle=(\w+)", audit_logs):
proctitles.append(bytes.fromhex(proctitle).decode("utf-8").replace("\x00", " "))
self.event.proctitles = proctitles
self.event.evidence = f"audit log: {proctitles}"
)
# TODO: add more methods for proving system logs
if audit_logs.status_code == requests.status_codes.codes.OK:
logger.debug(f"Audit log of host {self.event.host}: {audit_logs.text[:10]}")
# iterating over proctitles and converting them into readable strings
proctitles = []
for proctitle in re.findall(r"proctitle=(\w+)", audit_logs.text):
proctitles.append(bytes.fromhex(proctitle).decode("utf-8").replace("\x00", " "))
self.event.proctitles = proctitles
self.event.evidence = f"audit log: {proctitles}"
else:
self.event.evidence = "Could not parse system logs"

View File

@@ -3,14 +3,9 @@ import re
import uuid
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Event, Vulnerability
from kube_hunter.core.types import (
ActiveHunter,
Hunter,
KubernetesCluster,
PrivilegeEscalation,
)
from kube_hunter.core.types import ActiveHunter, Hunter, KubernetesCluster, HostPathMountPrivilegeEscalationTechnique
from kube_hunter.modules.hunting.kubelet import (
ExposedPodsHandler,
ExposedRunHandler,
@@ -25,10 +20,14 @@ class WriteMountToVarLog(Vulnerability, Event):
def __init__(self, pods):
Vulnerability.__init__(
self, KubernetesCluster, "Pod With Mount To /var/log", category=PrivilegeEscalation, vid="KHV047",
self,
KubernetesCluster,
"Pod With Mount To /var/log",
category=HostPathMountPrivilegeEscalationTechnique,
vid="KHV047",
)
self.pods = pods
self.evidence = "pods: {}".format(", ".join((pod["metadata"]["name"] for pod in self.pods)))
self.evidence = "pods: {}".format(", ".join(pod["metadata"]["name"] for pod in self.pods))
class DirectoryTraversalWithKubelet(Vulnerability, Event):
@@ -37,10 +36,13 @@ class DirectoryTraversalWithKubelet(Vulnerability, Event):
def __init__(self, output):
Vulnerability.__init__(
self, KubernetesCluster, "Root Traversal Read On The Kubelet", category=PrivilegeEscalation,
self,
KubernetesCluster,
"Root Traversal Read On The Kubelet",
category=HostPathMountPrivilegeEscalationTechnique,
)
self.output = output
self.evidence = "output: {}".format(self.output)
self.evidence = f"output: {self.output}"
@handler.subscribe(ExposedPodsHandler)
@@ -70,34 +72,27 @@ class VarLogMountHunter(Hunter):
self.publish_event(WriteMountToVarLog(pods=pe_pods))
@handler.subscribe(ExposedRunHandler)
@handler.subscribe_many([ExposedRunHandler, WriteMountToVarLog])
class ProveVarLogMount(ActiveHunter):
"""Prove /var/log Mount Hunter
Tries to read /etc/shadow on the host by running commands inside a pod with host mount to /var/log
"""
def __init__(self, event):
self.event = event
self.base_path = f"https://{self.event.host}:{self.event.port}"
self.write_mount_event = self.event.get_by_class(WriteMountToVarLog)
self.event = self.write_mount_event
self.base_path = f"https://{self.write_mount_event.host}:{self.write_mount_event.port}"
def run(self, command, container):
run_url = KubeletHandlers.RUN.value.format(
podNamespace=container["namespace"], podID=container["pod"], containerName=container["name"], cmd=command,
podNamespace=container["namespace"],
podID=container["pod"],
containerName=container["name"],
cmd=command,
)
return self.event.session.post(f"{self.base_path}/{run_url}", verify=False).text
# TODO: replace with multiple subscription to WriteMountToVarLog as well
def get_varlog_mounters(self):
config = get_config()
logger.debug("accessing /pods manually on ProveVarLogMount")
pods = self.event.session.get(
f"{self.base_path}/" + KubeletHandlers.PODS.value, verify=False, timeout=config.network_timeout,
).json()["items"]
for pod in pods:
volume = VarLogMountHunter(ExposedPodsHandler(pods=pods)).has_write_mount_to(pod, "/var/log")
if volume:
yield pod, volume
def mount_path_from_mountname(self, pod, mount_name):
"""returns container name, and container mount path correlated to mount_name"""
for container in pod["spec"]["containers"]:
@@ -117,14 +112,16 @@ class ProveVarLogMount(ActiveHunter):
path=re.sub(r"^/var/log", "", host_path) + symlink_name
)
content = self.event.session.get(
f"{self.base_path}/{path_in_logs_endpoint}", verify=False, timeout=config.network_timeout,
f"{self.base_path}/{path_in_logs_endpoint}",
verify=False,
timeout=config.network_timeout,
).text
# removing symlink
self.run(f"rm {mount_path}/{symlink_name}", container=container)
return content
def execute(self):
for pod, volume in self.get_varlog_mounters():
for pod, volume in self.write_mount_event.pe_pods():
for container, mount_path in self.mount_path_from_mountname(pod, volume["name"]):
logger.debug("Correlated container to mount_name")
cont = {
@@ -134,7 +131,10 @@ class ProveVarLogMount(ActiveHunter):
}
try:
output = self.traverse_read(
"/etc/shadow", container=cont, mount_path=mount_path, host_path=volume["hostPath"]["path"],
"/etc/shadow",
container=cont,
mount_path=mount_path,
host_path=volume["hostPath"]["path"],
)
self.publish_event(DirectoryTraversalWithKubelet(output=output))
except Exception:

View File

@@ -4,13 +4,13 @@ import requests
from enum import Enum
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Event, Vulnerability, K8sVersionDisclosure
from kube_hunter.core.types import (
ActiveHunter,
Hunter,
KubernetesCluster,
InformationDisclosure,
ConnectFromProxyServerTechnique,
)
from kube_hunter.modules.discovery.dashboard import KubeDashboardEvent
from kube_hunter.modules.discovery.proxy import KubeProxyEvent
@@ -23,7 +23,11 @@ class KubeProxyExposed(Vulnerability, Event):
def __init__(self):
Vulnerability.__init__(
self, KubernetesCluster, "Proxy Exposed", category=InformationDisclosure, vid="KHV049",
self,
KubernetesCluster,
"Proxy Exposed",
category=ConnectFromProxyServerTechnique,
vid="KHV049",
)
@@ -89,7 +93,9 @@ class ProveProxyExposed(ActiveHunter):
def execute(self):
config = get_config()
version_metadata = requests.get(
f"http://{self.event.host}:{self.event.port}/version", verify=False, timeout=config.network_timeout,
f"http://{self.event.host}:{self.event.port}/version",
verify=False,
timeout=config.network_timeout,
).json()
if "buildDate" in version_metadata:
self.event.evidence = "build date: {}".format(version_metadata["buildDate"])
@@ -107,11 +113,16 @@ class K8sVersionDisclosureProve(ActiveHunter):
def execute(self):
config = get_config()
version_metadata = requests.get(
f"http://{self.event.host}:{self.event.port}/version", verify=False, timeout=config.network_timeout,
f"http://{self.event.host}:{self.event.port}/version",
verify=False,
timeout=config.network_timeout,
).json()
if "gitVersion" in version_metadata:
self.publish_event(
K8sVersionDisclosure(
version=version_metadata["gitVersion"], from_endpoint="/version", extra_info="on kube-proxy",
version=version_metadata["gitVersion"],
from_endpoint="/version",
extra_info="on kube-proxy",
category=ConnectFromProxyServerTechnique,
)
)

View File

@@ -1,34 +1,37 @@
import logging
import os
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import Vulnerability, Event
from kube_hunter.core.types import Hunter, KubernetesCluster, AccessRisk
from kube_hunter.core.types import Hunter, KubernetesCluster, AccessContainerServiceAccountTechnique
from kube_hunter.modules.discovery.hosts import RunningAsPodEvent
logger = logging.getLogger(__name__)
class ServiceAccountTokenAccess(Vulnerability, Event):
""" Accessing the pod service account token gives an attacker the option to use the server API """
"""Accessing the pod service account token gives an attacker the option to use the server API"""
def __init__(self, evidence):
Vulnerability.__init__(
self,
KubernetesCluster,
name="Read access to pod's service account token",
category=AccessRisk,
category=AccessContainerServiceAccountTechnique,
vid="KHV050",
)
self.evidence = evidence
class SecretsAccess(Vulnerability, Event):
""" Accessing the pod's secrets within a compromised pod might disclose valuable data to a potential attacker"""
"""Accessing the pod's secrets within a compromised pod might disclose valuable data to a potential attacker"""
def __init__(self, evidence):
Vulnerability.__init__(
self, component=KubernetesCluster, name="Access to pod's secrets", category=AccessRisk,
self,
component=KubernetesCluster,
name="Access to pod's secrets",
category=AccessContainerServiceAccountTechnique,
)
self.evidence = evidence

View File

@@ -7,6 +7,9 @@ from kube_hunter.modules.report.collector import (
vulnerabilities_lock,
)
BASE_KB_LINK = "https://avd.aquasec.com/"
FULL_KB_LINK = "https://avd.aquasec.com/kube-hunter/{vid}/"
class BaseReporter:
def get_nodes(self):
@@ -33,11 +36,12 @@ class BaseReporter:
{
"location": vuln.location(),
"vid": vuln.get_vid(),
"category": vuln.category.name,
"category": vuln.category.get_name(),
"severity": vuln.get_severity(),
"vulnerability": vuln.get_name(),
"description": vuln.explain(),
"evidence": str(vuln.evidence),
"avd_reference": FULL_KB_LINK.format(vid=vuln.get_vid().lower()),
"hunter": vuln.hunter.get_name(),
}
for vuln in vulnerabilities
@@ -63,6 +67,4 @@ class BaseReporter:
if statistics:
report["hunter_statistics"] = self.get_hunter_statistics()
report["kburl"] = "https://aquasecurity.github.io/kube-hunter/kb/{vid}"
return report

View File

@@ -2,7 +2,7 @@ import logging
import threading
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.event_handler import handler
from kube_hunter.core.events.types import (
Event,
Service,

View File

@@ -12,7 +12,7 @@ class HTTPDispatcher:
dispatch_url = os.environ.get("KUBEHUNTER_HTTP_DISPATCH_URL", "https://localhost/")
try:
r = requests.request(
dispatch_method, dispatch_url, json=report, headers={"Content-Type": "application/json"},
dispatch_method, dispatch_url, json=report, headers={"Content-Type": "application/json"}, verify=False
)
r.raise_for_status()
logger.info(f"Report was dispatched to: {dispatch_url}")

View File

@@ -1,6 +1,6 @@
from prettytable import ALL, PrettyTable
from kube_hunter.modules.report.base import BaseReporter
from kube_hunter.modules.report.base import BaseReporter, BASE_KB_LINK
from kube_hunter.modules.report.collector import (
services,
vulnerabilities,
@@ -9,9 +9,8 @@ from kube_hunter.modules.report.collector import (
vulnerabilities_lock,
)
EVIDENCE_PREVIEW = 40
EVIDENCE_PREVIEW = 100
MAX_TABLE_WIDTH = 20
KB_LINK = "https://github.com/aquasecurity/kube-hunter/tree/master/docs/_kb"
class PlainReporter(BaseReporter):
@@ -60,7 +59,7 @@ class PlainReporter(BaseReporter):
if service.event_id not in id_memory:
nodes_table.add_row(["Node/Master", service.host])
id_memory.add(service.event_id)
nodes_ret = "\nNodes\n{}\n".format(nodes_table)
nodes_ret = f"\nNodes\n{nodes_table}\n"
services_lock.release()
return nodes_ret
@@ -84,7 +83,7 @@ class PlainReporter(BaseReporter):
column_names = [
"ID",
"Location",
"Category",
"MITRE Category",
"Vulnerability",
"Description",
"Evidence",
@@ -92,7 +91,7 @@ class PlainReporter(BaseReporter):
vuln_table = PrettyTable(column_names, hrules=ALL)
vuln_table.align = "l"
vuln_table.max_width = MAX_TABLE_WIDTH
vuln_table.sortby = "Category"
vuln_table.sortby = "MITRE Category"
vuln_table.reversesort = True
vuln_table.padding_width = 1
vuln_table.header_style = "upper"
@@ -102,10 +101,11 @@ class PlainReporter(BaseReporter):
evidence = str(vuln.evidence)
if len(evidence) > EVIDENCE_PREVIEW:
evidence = evidence[:EVIDENCE_PREVIEW] + "..."
row = [
vuln.get_vid(),
vuln.location(),
vuln.category.name,
vuln.category.get_name(),
vuln.get_name(),
vuln.explain(),
evidence,
@@ -114,7 +114,7 @@ class PlainReporter(BaseReporter):
return (
"\nVulnerabilities\n"
"For further information about a vulnerability, search its ID in: \n"
f"{KB_LINK}\n{vuln_table}\n"
f"{BASE_KB_LINK}\n{vuln_table}\n"
)
def hunters_table(self):

View File

@@ -0,0 +1,3 @@
from PyInstaller.utils.hooks import collect_all
datas, binaries, hiddenimports = collect_all("prettytable")

Some files were not shown because too many files have changed in this diff Show More