Compare commits

...

16 Commits

Author SHA1 Message Date
Daniel Sagi
b6b3dbc0f2 black reformat 2020-06-19 13:44:09 +03:00
Daniel Sagi
b16f993687 removed blank line space 2020-06-19 13:40:25 +03:00
Daniel Sagi
ac1a022433 added tests for the plugins hooks 2020-06-19 13:36:32 +03:00
Daniel Sagi
eb45ded19d moved import of parser to right before the register call, to avoid circular imports 2020-06-19 13:35:56 +03:00
Daniel Sagi
5592a3ea81 restored main link file from master 2020-06-19 12:15:31 +03:00
Daniel Sagi
8df27f7d27 fixed formatting using black 2020-06-19 11:39:20 +03:00
Daniel Sagi
98a47d0500 removed unecessary add_config import 2020-06-12 17:40:16 +03:00
Daniel Sagi
075998367e added pluggy to required deps 2020-06-12 17:18:33 +03:00
Daniel Sagi
ca091e1b0f changed previous parsing function to not create the ArgumentParser, and implemented it as a hook for the parsing mechanism 2020-06-12 17:17:59 +03:00
Daniel Sagi
394b83d04e implemented plugins application on main file, changed mechanism for argument parsing 2020-06-12 17:15:58 +03:00
Daniel Sagi
fba0ca374a added plugins submodule, created two hookspecs, one for adding arguments, one for running code after the argument parsing 2020-06-12 17:11:33 +03:00
Konstantin Weddige
78e16729e0 Fix typo (#354)
This fixes #353
2020-06-08 13:47:40 +01:00
danielsagi
78c0133d9d removed an unnecessary f-string on an info logging (#355) 2020-06-08 15:04:29 +03:00
Liz Rice
4484ad734f Fix CertificateDiscovery hunter for Python3 (#350)
* update base64 decode for python3

* chore: remove lint error about imports
2020-05-11 10:42:31 +01:00
Yehuda Chikvashvili
a0127659b7 Decouple config and argument parsing (#342)
* Make config initialized explicitly
* Add mypy linting
* Make tests run individually
Resolve #341
2020-04-26 19:37:16 +03:00
Yehuda Chikvashvili
f034c8c7a1 Removed unused imports (#338)
* Update snippets in README.md
The README file had deprecated code snippets
* Remove unnecessary imports
* Complete tests for hunters registration

Resolves #334
2020-04-23 02:31:07 +03:00
59 changed files with 463 additions and 201 deletions

View File

@@ -1,5 +1,6 @@
[flake8]
ignore = E203, E266, E501, W503, B903
ignore = E203, E266, E501, W503, B903, T499
max-line-length = 120
max-complexity = 18
select = B,C,E,F,W,B9
select = B,C,E,F,W,B9,T4
mypy_config=mypy.ini

1
.gitignore vendored
View File

@@ -29,3 +29,4 @@ var/
.DS_Store
thumbs.db
__pycache__
.mypy_cache

View File

@@ -63,5 +63,5 @@ publish:
.PHONY: clean
clean:
rm -rf build/ dist/ *.egg-info/ .eggs/ .pytest_cache/ .coverage *.spec
rm -rf build/ dist/ *.egg-info/ .eggs/ .pytest_cache/ .mypy_cache .coverage *.spec
find . -type d -name __pycache__ -exec rm -rf '{}' +

View File

@@ -75,10 +75,10 @@ in order to prevent circular dependency bug.
Following the above example, let's figure out the imports:
```python
from ...core.types import Hunter
from ...core.events import handler
from kube_hunter.core.types import Hunter
from kube_hunter.core.events import handler
from ...core.events.types import OpenPortEvent
from kube_hunter.core.events.types import OpenPortEvent
@handler.subscribe(OpenPortEvent, predicate=lambda event: event.port == 30000)
class KubeDashboardDiscovery(Hunter):
@@ -90,13 +90,13 @@ class KubeDashboardDiscovery(Hunter):
As you can see, all of the types here come from the `core` module.
### Core Imports
relative import: `...core.events`
Absolute import: `kube_hunter.core.events`
|Name|Description|
|---|---|
|handler|Core object for using events, every module should import this object|
relative import `...core.events.types`
Absolute import `kube_hunter.core.events.types`
|Name|Description|
|---|---|
@@ -104,7 +104,7 @@ relative import `...core.events.types`
|Vulnerability|Base class for defining a new vulnerability|
|OpenPortEvent|Published when a new port is discovered. open port is assigned to the `port ` attribute|
relative import: `...core.types`
Absolute import: `kube_hunter.core.types`
|Type|Description|
|---|---|

View File

@@ -1,4 +0,0 @@
from . import core
from . import modules
__all__ = [core, modules]

View File

@@ -1,19 +1,44 @@
#!/usr/bin/env python3
# flake8: noqa: E402
import logging
import threading
from kube_hunter.conf import config
from kube_hunter.conf import Config, set_config
from kube_hunter.conf.parser import parse_args
from kube_hunter.conf.logging import setup_logger
from kube_hunter.plugins import initialize_plugin_manager
pm = initialize_plugin_manager()
# Using a plugin hook for adding arguments before parsing
args = parse_args(add_args_hook=pm.hook.parser_add_arguments)
config = Config(
active=args.active,
cidr=args.cidr,
include_patched_versions=args.include_patched_versions,
interface=args.interface,
mapping=args.mapping,
network_timeout=args.network_timeout,
pod=args.pod,
quick=args.quick,
remote=args.remote,
statistics=args.statistics,
)
setup_logger(args.log)
set_config(config)
# Running all other registered plugins before execution
pm.hook.load_plugin(args=args)
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import HuntFinished, HuntStarted
from kube_hunter.modules.discovery.hosts import RunningAsPodEvent, HostScanEvent
from kube_hunter.modules.report import get_reporter, get_dispatcher
config.reporter = get_reporter(config.report)
config.dispatcher = get_dispatcher(config.dispatch)
logger = logging.getLogger(__name__)
import kube_hunter # noqa
config.dispatcher = get_dispatcher(args.dispatch)
config.reporter = get_reporter(args.report)
def interactive_set_config():
@@ -56,7 +81,6 @@ def list_hunters():
print("* {}\n {}\n".format(name, doc))
global hunt_started_lock
hunt_started_lock = threading.Lock()
hunt_started = False
@@ -65,7 +89,7 @@ def main():
global hunt_started
scan_options = [config.pod, config.cidr, config.remote, config.interface]
try:
if config.list:
if args.list:
list_hunters()
return

View File

@@ -1,8 +1,50 @@
from kube_hunter.conf.parser import parse_args
from kube_hunter.conf.logging import setup_logger
from dataclasses import dataclass
from typing import Any, Optional
config = parse_args()
setup_logger(config.log)
@dataclass
class Config:
""" Config is a configuration container.
It contains the following fields:
- active: Enable active hunters
- cidr: Network subnets to scan
- dispatcher: Dispatcher object
- include_patched_version: Include patches in version comparison
- interface: Interface scanning mode
- list_hunters: Print a list of existing hunters
- log_level: Log level
- mapping: Report only found components
- network_timeout: Timeout for network operations
- pod: From pod scanning mode
- quick: Quick scanning mode
- remote: Hosts to scan
- report: Output format
- statistics: Include hunters statistics
"""
__all__ = [config]
active: bool = False
cidr: Optional[str] = None
dispatcher: Optional[Any] = None
include_patched_versions: bool = False
interface: bool = False
mapping: bool = False
network_timeout: float = 5.0
pod: bool = False
quick: bool = False
remote: Optional[str] = None
reporter: Optional[Any] = None
statistics: bool = False
_config: Optional[Config] = None
def get_config() -> Config:
if not _config:
raise ValueError("Configuration is not initialized")
return _config
def set_config(new_config: Config) -> None:
global _config
_config = new_config

View File

@@ -21,7 +21,7 @@ def setup_logger(level_name):
logging.disable(logging.CRITICAL)
else:
log_level = getattr(logging, level_name.upper(), None)
log_level = log_level if type(log_level) is int else None
log_level = log_level if isinstance(log_level, int) else None
logging.basicConfig(level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
if not log_level:
logging.warning(f"Unknown log level '{level_name}', using {DEFAULT_LEVEL_NAME}")

View File

@@ -1,9 +1,13 @@
from argparse import ArgumentParser
from kube_hunter.plugins import hookimpl
def parse_args():
parser = ArgumentParser(description="kube-hunter - hunt for security weaknesses in Kubernetes clusters")
@hookimpl
def parser_add_arguments(parser):
"""
This is the default hook implementation for parse_add_argument
Contains initialization for all default arguments
"""
parser.add_argument(
"--list", action="store_true", help="Displays all tests in kubehunter (add --active flag to see active tests)",
)
@@ -59,6 +63,18 @@ def parse_args():
parser.add_argument("--network-timeout", type=float, default=5.0, help="network operations timeout")
def parse_args(add_args_hook):
"""
Function handles all argument parsing
@param add_arguments: hook for adding arguments to it's given ArgumentParser parameter
@return: parsed arguments dict
"""
parser = ArgumentParser(description="kube-hunter - hunt for security weaknesses in Kubernetes clusters")
# adding all arguments to the parser
add_args_hook(parser=parser)
args = parser.parse_args()
if args.cidr:
args.cidr = args.cidr.replace(" ", "").split(",")

View File

@@ -1,4 +1,3 @@
# flake8: noqa: E402
from . import types
from . import events
__all__ = [types, events]

View File

@@ -1,4 +1,3 @@
# flake8: noqa: E402
from .handler import EventQueue, handler
from . import types
__all__ = [EventQueue, handler, types]

View File

@@ -4,7 +4,7 @@ from collections import defaultdict
from queue import Queue
from threading import Thread
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.types import ActiveHunter, HunterBase
from kube_hunter.core.events.types import Vulnerability, EventFilterBase
@@ -12,7 +12,7 @@ logger = logging.getLogger(__name__)
# Inherits Queue object, handles events asynchronously
class EventQueue(Queue, object):
class EventQueue(Queue):
def __init__(self, num_worker=10):
super(EventQueue, self).__init__()
self.passive_hunters = dict()
@@ -60,6 +60,7 @@ class EventQueue(Queue, object):
# getting uninstantiated event object
def subscribe_event(self, event, hook=None, predicate=None):
config = get_config()
if ActiveHunter in hook.__mro__:
if not config.active:
return
@@ -98,6 +99,8 @@ class EventQueue(Queue, object):
# getting instantiated event object
def publish_event(self, event, caller=None):
config = get_config()
# setting event chain
if caller:
event.previous = caller.event

View File

@@ -1,8 +1,8 @@
import logging
import threading
import requests
import logging
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.types import (
InformationDisclosure,
DenialOfService,
@@ -17,7 +17,7 @@ from kube_hunter.core.types import (
logger = logging.getLogger(__name__)
class EventFilterBase(object):
class EventFilterBase:
def __init__(self, event):
self.event = event
@@ -28,7 +28,7 @@ class EventFilterBase(object):
return self.event
class Event(object):
class Event:
def __init__(self):
self.previous = None
self.hunter = None
@@ -62,7 +62,7 @@ class Event(object):
return history
class Service(object):
class Service:
def __init__(self, name, path="", secure=True):
self.name = name
self.secure = secure
@@ -79,7 +79,7 @@ class Service(object):
return self.__doc__
class Vulnerability(object):
class Vulnerability:
severity = dict(
{
InformationDisclosure: "medium",
@@ -118,7 +118,6 @@ class Vulnerability(object):
return self.severity.get(self.category, "low")
global event_id_count_lock
event_id_count_lock = threading.Lock()
event_id_count = 0
@@ -140,6 +139,7 @@ class NewHostEvent(Event):
return self.cloud_type
def get_cloud(self):
config = get_config()
try:
logger.debug("Checking whether the cluster is deployed on azure's cloud")
# Leverage 3rd tool https://github.com/blrchen/AzureSpeed for Azure cloud ip detection
@@ -148,7 +148,7 @@ class NewHostEvent(Event):
).json()
return result["cloud"] or "NoCloud"
except requests.ConnectionError:
logger.info(f"Failed to connect cloud type service", exc_info=True)
logger.info("Failed to connect cloud type service", exc_info=True)
except Exception:
logger.warning(f"Unable to check cloud of {self.host}", exc_info=True)
return "NoCloud"

View File

@@ -1,4 +1,4 @@
class HunterBase(object):
class HunterBase:
publishedVulnerabilities = 0
@staticmethod

View File

@@ -1,5 +1,4 @@
# flake8: noqa: E402
from . import report
from . import discovery
from . import hunting
__all__ = [report, discovery, hunting]

View File

@@ -1,3 +1,4 @@
# flake8: noqa: E402
from . import (
apiserver,
dashboard,
@@ -8,14 +9,3 @@ from . import (
ports,
proxy,
)
__all__ = [
apiserver,
dashboard,
etcd,
hosts,
kubectl,
kubelet,
ports,
proxy,
]

View File

@@ -1,11 +1,11 @@
import requests
import logging
import requests
from kube_hunter.core.types import Discovery
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import OpenPortEvent, Service, Event, EventFilterBase
from kube_hunter.conf import config
from kube_hunter.conf import get_config
KNOWN_API_PORTS = [443, 6443, 8080]
@@ -57,6 +57,7 @@ class ApiServiceDiscovery(Discovery):
self.publish_event(K8sApiService(protocol))
def has_api_behaviour(self, protocol):
config = get_config()
try:
r = self.session.get(f"{protocol}://{self.event.host}:{self.event.port}", timeout=config.network_timeout)
if ("k8s" in r.text) or ('"code"' in r.text and r.status_code != 200):
@@ -93,6 +94,7 @@ class ApiServiceClassify(EventFilterBase):
def classify_using_version_endpoint(self):
"""Tries to classify by accessing /version. if could not access succeded, returns"""
config = get_config()
try:
endpoint = f"{self.event.protocol}://{self.event.host}:{self.event.port}/version"
versions = self.session.get(endpoint, timeout=config.network_timeout).json()

View File

@@ -2,7 +2,7 @@ import json
import logging
import requests
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Event, OpenPortEvent, Service
from kube_hunter.core.types import Discovery
@@ -28,6 +28,7 @@ class KubeDashboard(Discovery):
@property
def secure(self):
config = get_config()
endpoint = f"http://{self.event.host}:{self.event.port}/api/v1/service/default"
logger.debug("Attempting to discover an Api server to access dashboard")
try:

View File

@@ -1,14 +1,14 @@
import os
import logging
import requests
import itertools
import requests
from enum import Enum
from netaddr import IPNetwork, IPAddress, AddrFormatError
from netifaces import AF_INET, ifaddresses, interfaces
from scapy.all import ICMP, IP, Ether, srp1
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Event, NewHostEvent, Vulnerability
from kube_hunter.core.types import Discovery, InformationDisclosure, Azure
@@ -95,6 +95,7 @@ class FromPodHostDiscovery(Discovery):
self.event = event
def execute(self):
config = get_config()
# Scan any hosts that the user specified
if config.remote or config.cidr:
self.publish_event(HostScanEvent())
@@ -104,7 +105,7 @@ class FromPodHostDiscovery(Discovery):
if self.is_azure_pod():
subnets, cloud = self.azure_metadata_discovery()
else:
subnets, ext_ip = self.traceroute_discovery()
subnets = self.traceroute_discovery()
should_scan_apiserver = False
if self.event.kubeservicehost:
@@ -119,6 +120,7 @@ class FromPodHostDiscovery(Discovery):
self.publish_event(NewHostEvent(host=IPAddress(self.event.kubeservicehost), cloud=cloud))
def is_azure_pod(self):
config = get_config()
try:
logger.debug("From pod attempting to access Azure Metadata API")
if (
@@ -136,16 +138,15 @@ class FromPodHostDiscovery(Discovery):
# for pod scanning
def traceroute_discovery(self):
# getting external ip, to determine if cloud cluster
external_ip = requests.get("https://canhazip.com", timeout=config.network_timeout).text
config = get_config()
node_internal_ip = srp1(
Ether() / IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout,
)[IP].src
return [[node_internal_ip, "24"]], external_ip
return [[node_internal_ip, "24"]]
# querying azure's interface metadata api | works only from a pod
def azure_metadata_discovery(self):
config = get_config()
logger.debug("From pod attempting to access azure's metadata")
machine_metadata = requests.get(
"http://169.254.169.254/metadata/instance?api-version=2017-08-01",
@@ -178,6 +179,7 @@ class HostDiscovery(Discovery):
self.event = event
def execute(self):
config = get_config()
if config.cidr:
for ip in HostDiscoveryHelpers.generate_hosts(config.cidr):
self.publish_event(NewHostEvent(host=ip))

View File

@@ -3,7 +3,7 @@ import requests
import urllib3
from enum import Enum
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.types import Discovery
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import OpenPortEvent, Event, Service
@@ -47,6 +47,7 @@ class KubeletDiscovery(Discovery):
self.event = event
def get_read_only_access(self):
config = get_config()
endpoint = f"http://{self.event.host}:{self.event.port}/pods"
logger.debug(f"Trying to get kubelet read access at {endpoint}")
r = requests.get(endpoint, timeout=config.network_timeout)
@@ -64,6 +65,7 @@ class KubeletDiscovery(Discovery):
self.publish_event(SecureKubeletEvent(secure=True, anonymous_auth=False))
def ping_kubelet(self):
config = get_config()
endpoint = f"https://{self.event.host}:{self.event.port}/pods"
logger.debug("Attempting to get pods info from kubelet")
try:

View File

@@ -1,7 +1,7 @@
import logging
import requests
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.types import Discovery
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Service, Event, OpenPortEvent
@@ -29,6 +29,7 @@ class KubeProxy(Discovery):
@property
def accesible(self):
config = get_config()
endpoint = f"http://{self.host}:{self.port}/api/v1"
logger.debug("Attempting to discover a proxy service")
try:

View File

@@ -1,3 +1,4 @@
# flake8: noqa: E402
from . import (
aks,
apiserver,
@@ -13,19 +14,3 @@ from . import (
proxy,
secrets,
)
__all__ = [
aks,
apiserver,
arp,
capabilities,
certificates,
cves,
dashboard,
dns,
etcd,
kubelet,
mounts,
proxy,
secrets,
]

View File

@@ -2,7 +2,7 @@ import json
import logging
import requests
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.modules.hunting.kubelet import ExposedRunHandler
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Event, Vulnerability
@@ -33,6 +33,7 @@ class AzureSpnHunter(Hunter):
# getting a container that has access to the azure.json file
def get_key_container(self):
config = get_config()
endpoint = f"{self.base_url}/pods"
logger.debug("Trying to find container with access to azure.json file")
try:
@@ -69,6 +70,7 @@ class ProveAzureSpnExposure(ActiveHunter):
self.base_url = f"https://{self.event.host}:{self.event.port}"
def run(self, command, container):
config = get_config()
run_url = "/".join(self.base_url, "run", container["namespace"], container["pod"], container["name"])
return requests.post(run_url, verify=False, params={"cmd": command}, timeout=config.network_timeout)

View File

@@ -1,9 +1,9 @@
import logging
import json
import requests
import uuid
import requests
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.modules.discovery.apiserver import ApiServer
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
@@ -236,6 +236,7 @@ class AccessApiServer(Hunter):
self.with_token = False
def access_api_server(self):
config = get_config()
logger.debug(f"Passive Hunter is attempting to access the API at {self.path}")
try:
r = requests.get(f"{self.path}/api", headers=self.headers, verify=False, timeout=config.network_timeout)
@@ -246,6 +247,7 @@ class AccessApiServer(Hunter):
return False
def get_items(self, path):
config = get_config()
try:
items = []
r = requests.get(path, headers=self.headers, verify=False, timeout=config.network_timeout)
@@ -261,6 +263,7 @@ class AccessApiServer(Hunter):
return None
def get_pods(self, namespace=None):
config = get_config()
pods = []
try:
if not namespace:
@@ -340,6 +343,7 @@ class AccessApiServerActive(ActiveHunter):
self.path = f"{self.event.protocol}://{self.event.host}:{self.event.port}"
def create_item(self, path, data):
config = get_config()
headers = {"Content-Type": "application/json"}
if self.event.auth_token:
headers["Authorization"] = f"Bearer {self.event.auth_token}"
@@ -354,6 +358,7 @@ class AccessApiServerActive(ActiveHunter):
return None
def patch_item(self, path, data):
config = get_config()
headers = {"Content-Type": "application/json-patch+json"}
if self.event.auth_token:
headers["Authorization"] = f"Bearer {self.event.auth_token}"
@@ -369,6 +374,7 @@ class AccessApiServerActive(ActiveHunter):
return None
def delete_item(self, path):
config = get_config()
headers = {}
if self.event.auth_token:
headers["Authorization"] = f"Bearer {self.event.auth_token}"
@@ -570,6 +576,7 @@ class ApiVersionHunter(Hunter):
self.session.headers.update({"Authorization": f"Bearer {self.event.auth_token}"})
def execute(self):
config = get_config()
if self.event.auth_token:
logger.debug(
"Trying to access the API server version endpoint using pod's"

View File

@@ -2,7 +2,7 @@ import logging
from scapy.all import ARP, IP, ICMP, Ether, sr1, srp
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Event, Vulnerability
from kube_hunter.core.types import ActiveHunter, KubernetesCluster, IdentityTheft
@@ -32,6 +32,7 @@ class ArpSpoofHunter(ActiveHunter):
self.event = event
def try_getting_mac(self, ip):
config = get_config()
ans = sr1(ARP(op=1, pdst=ip), timeout=config.network_timeout, verbose=0)
return ans[ARP].hwsrc if ans else None
@@ -51,9 +52,10 @@ class ArpSpoofHunter(ActiveHunter):
return False
def execute(self):
config = get_config()
self_ip = sr1(IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout)[IP].dst
arp_responses, _ = srp(
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=f"{self_ip}/24"), timeout=config.netork_timeout, verbose=0,
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=f"{self_ip}/24"), timeout=config.network_timeout, verbose=0,
)
# arp enabled on cluster and more than one pod on node

View File

@@ -8,7 +8,7 @@ from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Vulnerability, Event, Service
logger = logging.getLogger(__name__)
email_pattern = re.compile(r"([a-z0-9]+@[a-z0-9]+\.[a-z0-9]+)")
email_pattern = re.compile(rb"([a-z0-9]+@[a-z0-9]+\.[a-z0-9]+)")
class CertificateEmail(Vulnerability, Event):
@@ -16,7 +16,7 @@ class CertificateEmail(Vulnerability, Event):
def __init__(self, email):
Vulnerability.__init__(
self, KubernetesCluster, "Certificate Includes Email Address", category=InformationDisclosure, khv="KHV021",
self, KubernetesCluster, "Certificate Includes Email Address", category=InformationDisclosure, vid="KHV021",
)
self.email = email
self.evidence = "email: {}".format(self.email)
@@ -39,8 +39,11 @@ class CertificateDiscovery(Hunter):
except ssl.SSLError:
# If the server doesn't offer SSL on this port we won't get a certificate
return
self.examine_certificate(cert)
def examine_certificate(self, cert):
c = cert.strip(ssl.PEM_HEADER).strip(ssl.PEM_FOOTER)
certdata = base64.decodebytes(c)
certdata = base64.b64decode(c)
emails = re.findall(email_pattern, certdata)
for email in emails:
self.publish_event(CertificateEmail(email=email))

View File

@@ -1,7 +1,7 @@
import logging
from packaging import version
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
from kube_hunter.core.types import (
@@ -111,7 +111,7 @@ class CveUtils:
@staticmethod
def get_base_release(full_ver):
# if LegacyVersion, converting manually to a base version
if type(full_ver) == version.LegacyVersion:
if isinstance(full_ver, version.LegacyVersion):
return version.parse(".".join(full_ver._version.split(".")[:2]))
return version.parse(".".join(map(str, full_ver._version.release[:2])))
@@ -122,7 +122,7 @@ class CveUtils:
@staticmethod
def to_raw_version(v):
if type(v) != version.LegacyVersion:
if not isinstance(v, version.LegacyVersion):
return ".".join(map(str, v._version.release))
return v._version
@@ -159,7 +159,7 @@ class CveUtils:
# default to classic compare, unless the check_version is legacy.
version_compare_func = CveUtils.basic_compare
if type(check_v) == version.LegacyVersion:
if isinstance(check_v, version.LegacyVersion):
version_compare_func = CveUtils.version_compare
if check_version not in fix_versions:
@@ -170,7 +170,7 @@ class CveUtils:
# if the check version and the current fix has the same base release
if base_check_v == base_fix_v:
# when check_version is legacy, we use a custom compare func, to handle differences between versions.
# when check_version is legacy, we use a custom compare func, to handle differences between versions
if version_compare_func(check_v, fix_v) == -1:
# determine vulnerable if smaller and with same base version
vulnerable = True
@@ -194,6 +194,7 @@ class K8sClusterCveHunter(Hunter):
self.event = event
def execute(self):
config = get_config()
logger.debug(f"Checking known CVEs for k8s API version: {self.event.version}")
cve_mapping = {
ServerApiVersionEndPointAccessPE: ["1.10.11", "1.11.5", "1.12.3"],
@@ -217,6 +218,7 @@ class KubectlCVEHunter(Hunter):
self.event = event
def execute(self):
config = get_config()
cve_mapping = {
KubectlCpVulnerability: ["1.11.9", "1.12.7", "1.13.5", "1.14.0"],
IncompleteFixToKubectlCpVulnerability: ["1.12.9", "1.13.6", "1.14.2"],

View File

@@ -2,7 +2,7 @@ import logging
import json
import requests
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.types import Hunter, RemoteCodeExec, KubernetesCluster
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Vulnerability, Event
@@ -31,6 +31,7 @@ class KubeDashboard(Hunter):
self.event = event
def get_nodes(self):
config = get_config()
logger.debug("Passive hunter is attempting to get nodes types of the cluster")
r = requests.get(f"http://{self.event.host}:{self.event.port}/api/v1/node", timeout=config.network_timeout)
if r.status_code == 200 and "nodes" in r.text:

View File

@@ -3,7 +3,7 @@ import logging
from scapy.all import IP, ICMP, UDP, DNS, DNSQR, ARP, Ether, sr1, srp1, srp
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Event, Vulnerability
from kube_hunter.core.types import ActiveHunter, KubernetesCluster, IdentityTheft
@@ -36,6 +36,7 @@ class DnsSpoofHunter(ActiveHunter):
self.event = event
def get_cbr0_ip_mac(self):
config = get_config()
res = srp1(Ether() / IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout)
return res[IP].src, res.src
@@ -47,6 +48,7 @@ class DnsSpoofHunter(ActiveHunter):
return match.group(1)
def get_kube_dns_ip_mac(self):
config = get_config()
kubedns_svc_ip = self.extract_nameserver_ip()
# getting actual pod ip of kube-dns service, by comparing the src mac of a dns response and arp scanning.
@@ -66,6 +68,7 @@ class DnsSpoofHunter(ActiveHunter):
return response[ARP].psrc, response.src
def execute(self):
config = get_config()
logger.debug("Attempting to get kube-dns pod ip")
self_ip = sr1(IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.netork_timeout)[IP].dst
cbr0_ip, cbr0_mac = self.get_cbr0_ip_mac()

View File

@@ -1,7 +1,7 @@
import logging
import requests
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Vulnerability, Event, OpenPortEvent
from kube_hunter.core.types import (
@@ -83,6 +83,7 @@ class EtcdRemoteAccessActive(ActiveHunter):
self.write_evidence = ""
def db_keys_write_access(self):
config = get_config()
logger.debug(f"Trying to write keys remotely on host {self.event.host}")
data = {"value": "remotely written data"}
try:
@@ -115,6 +116,7 @@ class EtcdRemoteAccess(Hunter):
self.protocol = "https"
def db_keys_disclosure(self):
config = get_config()
logger.debug(f"{self.event.host} Passive hunter is attempting to read etcd keys remotely")
try:
r = requests.get(
@@ -126,6 +128,7 @@ class EtcdRemoteAccess(Hunter):
return False
def version_disclosure(self):
config = get_config()
logger.debug(f"Trying to check etcd version remotely at {self.event.host}")
try:
r = requests.get(
@@ -139,6 +142,7 @@ class EtcdRemoteAccess(Hunter):
return False
def insecure_access(self):
config = get_config()
logger.debug(f"Trying to access etcd insecurely at {self.event.host}")
try:
r = requests.get(

View File

@@ -6,7 +6,7 @@ import re
import requests
import urllib3
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
from kube_hunter.core.types import (
@@ -27,9 +27,6 @@ logger = logging.getLogger(__name__)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
""" Vulnerabilities """
class ExposedPodsHandler(Vulnerability, Event):
"""An attacker could view sensitive information about pods that are
bound to a Node using the /pods endpoint"""
@@ -187,6 +184,7 @@ class ReadOnlyKubeletPortHunter(Hunter):
self.pods_endpoint_data = ""
def get_k8s_version(self):
config = get_config()
logger.debug("Passive hunter is attempting to find kubernetes version")
metrics = requests.get(f"{self.path}/metrics", timeout=config.network_timeout).text
for line in metrics.split("\n"):
@@ -208,12 +206,14 @@ class ReadOnlyKubeletPortHunter(Hunter):
return privileged_containers if len(privileged_containers) > 0 else None
def get_pods_endpoint(self):
config = get_config()
logger.debug("Attempting to find pods endpoints")
response = requests.get(f"{self.path}/pods", timeout=config.network_timeout)
if "items" in response.text:
return response.json()
def check_healthz_endpoint(self):
config = get_config()
r = requests.get(f"{self.path}/healthz", verify=False, timeout=config.network_timeout)
return r.text if r.status_code == 200 else False
@@ -240,7 +240,7 @@ class SecureKubeletPortHunter(Hunter):
Hunts specific endpoints on an open secured Kubelet
"""
class DebugHandlers(object):
class DebugHandlers:
""" all methods will return the handler name if successful """
def __init__(self, path, pod, session=None):
@@ -250,6 +250,7 @@ class SecureKubeletPortHunter(Hunter):
# outputs logs from a specific container
def test_container_logs(self):
config = get_config()
logs_url = self.path + KubeletHandlers.CONTAINERLOGS.value.format(
pod_namespace=self.pod["namespace"], pod_id=self.pod["name"], container_name=self.pod["container"],
)
@@ -257,6 +258,7 @@ class SecureKubeletPortHunter(Hunter):
# need further investigation on websockets protocol for further implementation
def test_exec_container(self):
config = get_config()
# opens a stream to connect to using a web socket
headers = {"X-Stream-Protocol-Version": "v2.channel.k8s.io"}
exec_url = self.path + KubeletHandlers.EXEC.value.format(
@@ -274,6 +276,7 @@ class SecureKubeletPortHunter(Hunter):
# need further investigation on websockets protocol for further implementation
def test_port_forward(self):
config = get_config()
headers = {
"Upgrade": "websocket",
"Connection": "Upgrade",
@@ -291,6 +294,7 @@ class SecureKubeletPortHunter(Hunter):
# executes one command and returns output
def test_run_container(self):
config = get_config()
run_url = self.path + KubeletHandlers.RUN.value.format(
pod_namespace="test", pod_id="test", container_name="test", cmd="",
)
@@ -299,12 +303,14 @@ class SecureKubeletPortHunter(Hunter):
# returns list of currently running pods
def test_running_pods(self):
config = get_config()
pods_url = self.path + KubeletHandlers.RUNNINGPODS.value
r = self.session.get(pods_url, verify=False, timeout=config.network_timeout)
return r.json() if r.status_code == 200 else False
# need further investigation on the differences between attach and exec
def test_attach_container(self):
config = get_config()
# headers={"X-Stream-Protocol-Version": "v2.channel.k8s.io"}
attach_url = self.path + KubeletHandlers.ATTACH.value.format(
pod_namespace=self.pod["namespace"],
@@ -321,6 +327,7 @@ class SecureKubeletPortHunter(Hunter):
# checks access to logs endpoint
def test_logs_endpoint(self):
config = get_config()
logs_url = self.session.get(
self.path + KubeletHandlers.LOGS.value.format(path=""), timeout=config.network_timeout,
).text
@@ -328,6 +335,7 @@ class SecureKubeletPortHunter(Hunter):
# returns the cmd line used to run the kubelet
def test_pprof_cmdline(self):
config = get_config()
cmd = self.session.get(
self.path + KubeletHandlers.PPROF_CMDLINE.value, verify=False, timeout=config.network_timeout,
)
@@ -350,11 +358,13 @@ class SecureKubeletPortHunter(Hunter):
self.pods_endpoint_data = ""
def get_pods_endpoint(self):
config = get_config()
response = self.session.get(f"{self.path}/pods", verify=False, timeout=config.network_timeout)
if "items" in response.text:
return response.json()
def check_healthz_endpoint(self):
config = get_config()
r = requests.get(f"{self.path}/healthz", verify=False, timeout=config.network_timeout)
return r.text if r.status_code == 200 else False
@@ -371,6 +381,7 @@ class SecureKubeletPortHunter(Hunter):
self.test_handlers()
def test_handlers(self):
config = get_config()
# if kube-hunter runs in a pod, we test with kube-hunter's pod
pod = self.kubehunter_pod if config.pod else self.get_random_pod()
if pod:
@@ -434,6 +445,7 @@ class ProveRunHandler(ActiveHunter):
self.base_path = f"https://{self.event.host}:{self.event.port}"
def run(self, command, container):
config = get_config()
run_url = KubeletHandlers.RUN.value.format(
pod_namespace=container["namespace"],
pod_id=container["pod"],
@@ -445,6 +457,7 @@ class ProveRunHandler(ActiveHunter):
).text
def execute(self):
config = get_config()
r = self.event.session.get(
self.base_path + KubeletHandlers.PODS.value, verify=False, timeout=config.network_timeout,
)
@@ -478,6 +491,7 @@ class ProveContainerLogsHandler(ActiveHunter):
self.base_url = f"{protocol}://{self.event.host}:{self.event.port}/"
def execute(self):
config = get_config()
pods_raw = self.event.session.get(
self.base_url + KubeletHandlers.PODS.value, verify=False, timeout=config.network_timeout,
).text
@@ -513,6 +527,7 @@ class ProveSystemLogs(ActiveHunter):
self.base_url = f"https://{self.event.host}:{self.event.port}"
def execute(self):
config = get_config()
audit_logs = self.event.session.get(
f"{self.base_url}/" + KubeletHandlers.LOGS.value.format(path="audit/audit.log"),
verify=False,

View File

@@ -2,7 +2,7 @@ import logging
import re
import uuid
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Event, Vulnerability
from kube_hunter.core.types import (
@@ -88,6 +88,7 @@ class ProveVarLogMount(ActiveHunter):
# TODO: replace with multiple subscription to WriteMountToVarLog as well
def get_varlog_mounters(self):
config = get_config()
logger.debug("accessing /pods manually on ProveVarLogMount")
pods = self.event.session.get(
f"{self.base_path}/" + KubeletHandlers.PODS.value, verify=False, timeout=config.network_timeout,
@@ -107,6 +108,7 @@ class ProveVarLogMount(ActiveHunter):
def traverse_read(self, host_file, container, mount_path, host_path):
"""Returns content of file on the host, and cleans trails"""
config = get_config()
symlink_name = str(uuid.uuid4())
# creating symlink to file
self.run(f"ln -s {host_file} {mount_path}/{symlink_name}", container)

View File

@@ -3,7 +3,7 @@ import requests
from enum import Enum
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Event, Vulnerability, K8sVersionDisclosure
from kube_hunter.core.types import (
@@ -53,11 +53,13 @@ class KubeProxy(Hunter):
@property
def namespaces(self):
config = get_config()
resource_json = requests.get(f"{self.api_url}/namespaces", timeout=config.network_timeout).json()
return self.extract_names(resource_json)
@property
def services(self):
config = get_config()
# map between namespaces and service names
services = dict()
for namespace in self.namespaces:
@@ -85,6 +87,7 @@ class ProveProxyExposed(ActiveHunter):
self.event = event
def execute(self):
config = get_config()
version_metadata = requests.get(
f"http://{self.event.host}:{self.event.port}/version", verify=False, timeout=config.network_timeout,
).json()
@@ -102,6 +105,7 @@ class K8sVersionDisclosureProve(ActiveHunter):
self.event = event
def execute(self):
config = get_config()
version_metadata = requests.get(
f"http://{self.event.host}:{self.event.port}/version", verify=False, timeout=config.network_timeout,
).json()

View File

@@ -50,7 +50,7 @@ class AccessSecrets(Hunter):
for dirname, _, files in os.walk("/var/run/secrets/"):
for f in files:
self.secrets_evidence.append(os.path.join(dirname, f))
return True if (len(self.secrets_evidence) > 0) else False
return len(self.secrets_evidence) > 0
def execute(self):
if self.event.auth_token is not None:

View File

@@ -1,3 +1,2 @@
# flake8: noqa: E402
from kube_hunter.modules.report.factory import get_reporter, get_dispatcher
__all__ = [get_reporter, get_dispatcher]

View File

@@ -8,7 +8,7 @@ from kube_hunter.modules.report.collector import (
)
class BaseReporter(object):
class BaseReporter:
def get_nodes(self):
nodes = list()
node_locations = set()

View File

@@ -1,7 +1,7 @@
import logging
import threading
from kube_hunter.conf import config
from kube_hunter.conf import get_config
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import (
Event,
@@ -14,20 +14,16 @@ from kube_hunter.core.events.types import (
logger = logging.getLogger(__name__)
global services_lock
services_lock = threading.Lock()
services = list()
global vulnerabilities_lock
vulnerabilities_lock = threading.Lock()
vulnerabilities = list()
hunters = handler.all_hunters
@handler.subscribe(Service)
@handler.subscribe(Vulnerability)
class Collector(object):
class Collector:
def __init__(self, event=None):
self.event = event
@@ -51,11 +47,12 @@ class TablesPrinted(Event):
@handler.subscribe(HuntFinished)
class SendFullReport(object):
class SendFullReport:
def __init__(self, event):
self.event = event
def execute(self):
config = get_config()
report = config.reporter.get_report(statistics=config.statistics, mapping=config.mapping)
config.dispatcher.dispatch(report)
handler.publish_event(ReportDispatched())
@@ -63,7 +60,7 @@ class SendFullReport(object):
@handler.subscribe(HuntStarted)
class StartedInfo(object):
class StartedInfo:
def __init__(self, event):
self.event = event

View File

@@ -5,7 +5,7 @@ import requests
logger = logging.getLogger(__name__)
class HTTPDispatcher(object):
class HTTPDispatcher:
def dispatch(self, report):
logger.debug("Dispatching report via HTTP")
dispatch_method = os.environ.get("KUBEHUNTER_HTTP_DISPATCH_METHOD", "POST").upper()
@@ -24,7 +24,7 @@ class HTTPDispatcher(object):
logger.exception(f"Could not dispatch report to {dispatch_url}")
class STDOUTDispatcher(object):
class STDOUTDispatcher:
def dispatch(self, report):
logger.debug("Dispatching report via stdout")
print(report)

View File

@@ -1,10 +1,10 @@
import logging
from kube_hunter.modules.report.json import JSONReporter
from kube_hunter.modules.report.yaml import YAMLReporter
from kube_hunter.modules.report.plain import PlainReporter
from kube_hunter.modules.report.dispatchers import STDOUTDispatcher, HTTPDispatcher
import logging
logger = logging.getLogger(__name__)
DEFAULT_REPORTER = "plain"

View File

@@ -1,4 +1,5 @@
import json
from kube_hunter.modules.report.base import BaseReporter

View File

@@ -1,5 +1,3 @@
from __future__ import print_function
from prettytable import ALL, PrettyTable
from kube_hunter.modules.report.base import BaseReporter
@@ -46,7 +44,6 @@ class PlainReporter(BaseReporter):
if vulnerabilities_len:
output += self.vulns_table()
output += "\nKube Hunter couldn't find any clusters"
# print("\nKube Hunter couldn't find any clusters. {}".format("Maybe try with --active?" if not config.active else ""))
return output
def nodes_table(self):

View File

@@ -0,0 +1,23 @@
import pluggy
from kube_hunter.plugins import hookspecs
hookimpl = pluggy.HookimplMarker("kube-hunter")
def initialize_plugin_manager():
"""
Initializes and loads all default and setup implementations for registered plugins
@return: initialized plugin manager
"""
pm = pluggy.PluginManager("kube-hunter")
pm.add_hookspecs(hookspecs)
pm.load_setuptools_entrypoints("kube_hunter")
# default registration of builtin implemented plugins
from kube_hunter.conf import parser
pm.register(parser)
return pm

View File

@@ -0,0 +1,24 @@
import pluggy
from argparse import ArgumentParser
hookspec = pluggy.HookspecMarker("kube-hunter")
@hookspec
def parser_add_arguments(parser: ArgumentParser):
"""Add arguments to the ArgumentParser.
If a plugin requires an aditional argument, it should implement this hook
and add the argument to the Argument Parser
@param parser: an ArgumentParser, calls parser.add_argument on it
"""
@hookspec
def load_plugin(args):
"""Plugins that wish to execute code after the argument parsing
should implement this hook.
@param args: all parsed arguments passed to kube-hunter
"""

2
mypy.ini Normal file
View File

@@ -0,0 +1,2 @@
[mypy]
ignore_missing_imports = True

View File

@@ -13,3 +13,5 @@ staticx
black
pre-commit
flake8-bugbear
flake8-mypy
pluggy

View File

@@ -37,6 +37,8 @@ install_requires =
ruamel.yaml
future
packaging
dataclasses
pluggy
setup_requires =
setuptools>=30.3.0
setuptools_scm

View File

@@ -1,6 +1,7 @@
from subprocess import check_call
from pkg_resources import parse_requirements
from configparser import ConfigParser
from pkg_resources import parse_requirements
from subprocess import check_call
from typing import Any, List
from setuptools import setup, Command
@@ -8,7 +9,7 @@ class ListDependenciesCommand(Command):
"""A custom command to list dependencies"""
description = "list package dependencies"
user_options = []
user_options: List[Any] = []
def initialize_options(self):
pass
@@ -27,7 +28,7 @@ class PyInstallerCommand(Command):
"""A custom command to run PyInstaller to build standalone executable."""
description = "run PyInstaller on kube-hunter entrypoint"
user_options = []
user_options: List[Any] = []
def initialize_options(self):
pass

View File

@@ -1,12 +1,16 @@
import requests_mock
import json
from kube_hunter.conf import Config, set_config
from kube_hunter.core.events.types import NewHostEvent
set_config(Config())
# Testing if it doesn't try to run get_cloud if the cloud type is already set.
# get_cloud(1.2.3.4) will result with an error
def test_presetcloud():
""" Testing if it doesn't try to run get_cloud if the cloud type is already set.
get_cloud(1.2.3.4) will result with an error
"""
expcted = "AWS"
hostEvent = NewHostEvent(host="1.2.3.4", cloud=expcted)
assert expcted == hostEvent.cloud

View File

@@ -1,3 +1,9 @@
# flake8: noqa: E402
from kube_hunter.conf import Config, set_config
set_config(Config(active=True))
from kube_hunter.core.events.handler import handler
from kube_hunter.modules.discovery.apiserver import ApiServiceDiscovery
from kube_hunter.modules.discovery.dashboard import KubeDashboard as KubeDashboardDiscovery
@@ -90,23 +96,20 @@ def test_passive_hunters_registered():
assert expected_odd == actual_odd, "Unexpected passive hunters are registered"
# TODO (#334): Active hunters registration cannot be tested since it requires `config.active` to be set
# def test_active_hunters_registered():
# expected_missing = set()
# expected_odd = set()
#
# registered_active = remove_test_hunters(handler.active_hunters.keys())
# actual_missing = ACTIVE_HUNTERS - registered_active
# actual_odd = registered_active - ACTIVE_HUNTERS
#
# assert expected_missing == actual_missing, "Active hunters are missing"
# assert expected_odd == actual_odd, "Unexpected active hunters are registered"
def test_active_hunters_registered():
expected_missing = set()
expected_odd = set()
registered_active = remove_test_hunters(handler.active_hunters.keys())
actual_missing = ACTIVE_HUNTERS - registered_active
actual_odd = registered_active - ACTIVE_HUNTERS
assert expected_missing == actual_missing, "Active hunters are missing"
assert expected_odd == actual_odd, "Unexpected active hunters are registered"
def test_all_hunters_registered():
# TODO: Enable active hunting mode in testing
# expected = PASSIVE_HUNTERS | ACTIVE_HUNTERS
expected = PASSIVE_HUNTERS
expected = PASSIVE_HUNTERS | ACTIVE_HUNTERS
actual = remove_test_hunters(handler.all_hunters.keys())
assert expected == actual

View File

@@ -1,10 +1,12 @@
import time
from kube_hunter.conf import Config, set_config
from kube_hunter.core.types import Hunter
from kube_hunter.core.events.types import Event, Service
from kube_hunter.core.events import handler
counter = 0
set_config(Config())
class OnceOnlyEvent(Service, Event):

View File

@@ -1,6 +1,11 @@
# flake8: noqa: E402
import requests_mock
import time
from kube_hunter.conf import Config, set_config
set_config(Config())
from kube_hunter.modules.discovery.apiserver import ApiServer, ApiServiceDiscovery
from kube_hunter.core.events.types import Event
from kube_hunter.core.events import handler

View File

@@ -1,78 +1,79 @@
# flake8: noqa: E402
import json
import requests_mock
import pytest
from netaddr import IPNetwork, IPAddress
from typing import List
from kube_hunter.conf import Config, get_config, set_config
set_config(Config())
from kube_hunter.core.events import handler
from kube_hunter.core.types import Hunter
from kube_hunter.modules.discovery.hosts import (
FromPodHostDiscovery,
RunningAsPodEvent,
HostScanEvent,
AzureMetadataApi,
HostDiscoveryHelpers,
)
from kube_hunter.core.events.types import NewHostEvent
from kube_hunter.core.events import handler
from kube_hunter.conf import config
def test_FromPodHostDiscovery():
with requests_mock.Mocker() as m:
e = RunningAsPodEvent()
config.azure = False
config.remote = None
config.cidr = None
m.get(
"http://169.254.169.254/metadata/instance?api-version=2017-08-01", status_code=404,
class TestFromPodHostDiscovery:
@staticmethod
def _make_response(*subnets: List[tuple]) -> str:
return json.dumps(
{
"network": {
"interface": [
{"ipv4": {"subnet": [{"address": address, "prefix": prefix} for address, prefix in subnets]}}
]
}
}
)
f = FromPodHostDiscovery(e)
assert not f.is_azure_pod()
# TODO For now we don't test the traceroute discovery version
# f.execute()
# Test that we generate NewHostEvent for the addresses reported by the Azure Metadata API
config.azure = True
m.get(
"http://169.254.169.254/metadata/instance?api-version=2017-08-01",
text='{"network":{"interface":[{"ipv4":{"subnet":[{"address": "3.4.5.6", "prefix": "255.255.255.252"}]}}]}}',
)
assert f.is_azure_pod()
def test_is_azure_pod_request_fail(self):
f = FromPodHostDiscovery(RunningAsPodEvent())
with requests_mock.Mocker() as m:
m.get("http://169.254.169.254/metadata/instance?api-version=2017-08-01", status_code=404)
result = f.is_azure_pod()
assert not result
def test_is_azure_pod_success(self):
f = FromPodHostDiscovery(RunningAsPodEvent())
with requests_mock.Mocker() as m:
m.get(
"http://169.254.169.254/metadata/instance?api-version=2017-08-01",
text=TestFromPodHostDiscovery._make_response(("3.4.5.6", "255.255.255.252")),
)
result = f.is_azure_pod()
assert result
def test_execute_scan_cidr(self):
set_config(Config(cidr="1.2.3.4/30"))
f = FromPodHostDiscovery(RunningAsPodEvent())
f.execute()
# Test that we don't trigger a HostScanEvent unless either config.remote or config.cidr are configured
config.remote = "1.2.3.4"
f.execute()
config.azure = False
config.remote = None
config.cidr = "1.2.3.4/24"
def test_execute_scan_remote(self):
set_config(Config(remote="1.2.3.4"))
f = FromPodHostDiscovery(RunningAsPodEvent())
f.execute()
# In this set of tests we should only trigger HostScanEvent when remote or cidr are set
@handler.subscribe(HostScanEvent)
class testHostDiscovery(object):
class HunterTestHostDiscovery(Hunter):
"""TestHostDiscovery
In this set of tests we should only trigger HostScanEvent when remote or cidr are set
"""
def __init__(self, event):
config = get_config()
assert config.remote is not None or config.cidr is not None
assert config.remote == "1.2.3.4" or config.cidr == "1.2.3.4/24"
# In this set of tests we should only get as far as finding a host if it's Azure
# because we're not running the code that would normally be triggered by a HostScanEvent
@handler.subscribe(NewHostEvent)
class testHostDiscoveryEvent(object):
def __init__(self, event):
assert config.azure
assert str(event.host).startswith("3.4.5.")
assert config.remote is None
assert config.cidr is None
# Test that we only report this event for Azure hosts
@handler.subscribe(AzureMetadataApi)
class testAzureMetadataApi(object):
def __init__(self, event):
assert config.azure
assert config.remote == "1.2.3.4" or config.cidr == "1.2.3.4/30"
class TestDiscoveryUtils:

View File

@@ -1,6 +1,11 @@
# flake8: noqa: E402
import requests_mock
import time
from kube_hunter.conf import Config, set_config
set_config(Config())
from kube_hunter.modules.hunting.apiserver import (
AccessApiServer,
AccessApiServerWithToken,

View File

@@ -0,0 +1,42 @@
# flake8: noqa: E402
from kube_hunter.conf import Config, set_config
set_config(Config())
from kube_hunter.core.events.types import Event
from kube_hunter.modules.hunting.certificates import CertificateDiscovery, CertificateEmail
from kube_hunter.core.events import handler
def test_CertificateDiscovery():
cert = """
-----BEGIN CERTIFICATE-----
MIIDZDCCAkwCCQCAzfCLqrJvuTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
UzELMAkGA1UECAwCQ0ExEDAOBgNVBAoMB05vZGUuanMxETAPBgNVBAsMCG5vZGUt
Z3lwMRIwEAYDVQQDDAlsb2NhbGhvc3QxHzAdBgkqhkiG9w0BCQEWEGJ1aWxkQG5v
ZGVqcy5vcmcwHhcNMTkwNjIyMDYyMjMzWhcNMjIwNDExMDYyMjMzWjB0MQswCQYD
VQQGEwJVUzELMAkGA1UECAwCQ0ExEDAOBgNVBAoMB05vZGUuanMxETAPBgNVBAsM
CG5vZGUtZ3lwMRIwEAYDVQQDDAlsb2NhbGhvc3QxHzAdBgkqhkiG9w0BCQEWEGJ1
aWxkQG5vZGVqcy5vcmcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS
CHjvtVW4HdbbUwZ/ZV9s6U4x0KSoyNQrsCZjB8kRpFPe50DS5mfmu2SNBGYKRgzk
4QEEwFB9N2o8YTWsCefSRl6ti4ToPZqulU4hhRKYrEGtMJcRzi3IN7s200JaO3UH
01Su8ruO0NESb5zEU1Ykfh8Lub8TGEAINmgI61d/5d5Aq3kDjUHQJt1Ekw03Ylnu
juQyCGZxLxnngu0mIvwzyL/UeeUgsfQLzvppUk6In7tC1zzMjSPWo0c8qu6KvrW4
bKYnkZkzdQifzbpO5ERMEsh5HWq0uHa6+dgcVHFvlhdqF4Uat87ygNplVf0txsZB
MNVqbz1k6xkZYMnzDoydAgMBAAEwDQYJKoZIhvcNAQELBQADggEBADspZGtKpWxy
J1W3FA1aeQhMvequQTcMRz4avkm4K4HfTdV1iVD4CbvdezBphouBlyLVLDFJP7RZ
m7dBJVgBwnxufoFLne8cR2MGqDRoySbFT1AtDJdxabE6Fg+QGUpgOQfeBJ6ANlSB
+qJ+HG4QA+Ouh5hxz9mgYwkIsMUABHiwENdZ/kT8Edw4xKgd3uH0YP4iiePMD66c
rzW3uXH5J1jnKgBlpxtog4P6dHCcoq+PZJ17W5bdXNyqC1LPzQqniZ2BNcEZ4ix3
slAZAOWD1zLLGJhBPMV1fa0sHNBWc6oicr3YK/IDb0cp9kiLvnUu1pHy+LWQGqtC
rceJuGsnJEQ=
-----END CERTIFICATE-----
"""
c = CertificateDiscovery(Event())
c.examine_certificate(cert)
@handler.subscribe(CertificateEmail)
class test_CertificateEmail(object):
def __init__(self, event):
assert event.email == b"build@nodejs.org0"

View File

@@ -1,5 +1,10 @@
# flake8: noqa: E402
import time
from kube_hunter.conf import Config, set_config
set_config(Config())
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import K8sVersionDisclosure
from kube_hunter.modules.hunting.cves import (

View File

@@ -2,7 +2,11 @@ import json
from types import SimpleNamespace
from requests_mock import Mocker
from kube_hunter.modules.hunting.dashboard import KubeDashboard
from kube_hunter.conf import Config, set_config
set_config(Config())
from kube_hunter.modules.hunting.dashboard import KubeDashboard # noqa: E402
class TestKubeDashboard:

View File

@@ -1,3 +1,8 @@
# flake8: noqa: E402
from kube_hunter.conf import Config, set_config
set_config(Config())
from kube_hunter.modules.report import get_reporter, get_dispatcher
from kube_hunter.modules.report.factory import (
YAMLReporter,

View File

@@ -0,0 +1,13 @@
from kube_hunter.plugins import hookimpl
return_string = "return_string"
@hookimpl
def parser_add_arguments(parser):
return return_string
@hookimpl
def load_plugin(args):
return return_string

View File

@@ -0,0 +1,17 @@
from argparse import ArgumentParser
from tests.plugins import test_hooks
from kube_hunter.plugins import initialize_plugin_manager
def test_all_plugin_hooks():
pm = initialize_plugin_manager()
pm.register(test_hooks)
# Testing parser_add_arguments
parser = ArgumentParser("Test Argument Parser")
results = pm.hook.parser_add_arguments(parser=parser)
assert test_hooks.return_string in results
# Testing load_plugin
results = pm.hook.load_plugin(args=[])
assert test_hooks.return_string in results