mirror of
https://github.com/aquasecurity/kube-hunter.git
synced 2026-02-16 02:49:58 +00:00
Compare commits
2 Commits
add_plugin
...
fix-pretty
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3687d8fbd1 | ||
|
|
a7f700c96c |
5
.flake8
5
.flake8
@@ -1,6 +1,5 @@
|
||||
[flake8]
|
||||
ignore = E203, E266, E501, W503, B903, T499
|
||||
ignore = E203, E266, E501, W503, B903
|
||||
max-line-length = 120
|
||||
max-complexity = 18
|
||||
select = B,C,E,F,W,B9,T4
|
||||
mypy_config=mypy.ini
|
||||
select = B,C,E,F,W,B9
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -29,4 +29,3 @@ var/
|
||||
.DS_Store
|
||||
thumbs.db
|
||||
__pycache__
|
||||
.mypy_cache
|
||||
|
||||
2
Makefile
2
Makefile
@@ -63,5 +63,5 @@ publish:
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf build/ dist/ *.egg-info/ .eggs/ .pytest_cache/ .mypy_cache .coverage *.spec
|
||||
rm -rf build/ dist/ *.egg-info/ .eggs/ .pytest_cache/ .coverage *.spec
|
||||
find . -type d -name __pycache__ -exec rm -rf '{}' +
|
||||
|
||||
@@ -75,10 +75,10 @@ in order to prevent circular dependency bug.
|
||||
|
||||
Following the above example, let's figure out the imports:
|
||||
```python
|
||||
from kube_hunter.core.types import Hunter
|
||||
from kube_hunter.core.events import handler
|
||||
from ...core.types import Hunter
|
||||
from ...core.events import handler
|
||||
|
||||
from kube_hunter.core.events.types import OpenPortEvent
|
||||
from ...core.events.types import OpenPortEvent
|
||||
|
||||
@handler.subscribe(OpenPortEvent, predicate=lambda event: event.port == 30000)
|
||||
class KubeDashboardDiscovery(Hunter):
|
||||
@@ -90,13 +90,13 @@ class KubeDashboardDiscovery(Hunter):
|
||||
As you can see, all of the types here come from the `core` module.
|
||||
|
||||
### Core Imports
|
||||
Absolute import: `kube_hunter.core.events`
|
||||
relative import: `...core.events`
|
||||
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|handler|Core object for using events, every module should import this object|
|
||||
|
||||
Absolute import `kube_hunter.core.events.types`
|
||||
relative import `...core.events.types`
|
||||
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
@@ -104,7 +104,7 @@ Absolute import `kube_hunter.core.events.types`
|
||||
|Vulnerability|Base class for defining a new vulnerability|
|
||||
|OpenPortEvent|Published when a new port is discovered. open port is assigned to the `port ` attribute|
|
||||
|
||||
Absolute import: `kube_hunter.core.types`
|
||||
relative import: `...core.types`
|
||||
|
||||
|Type|Description|
|
||||
|---|---|
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
from . import core
|
||||
from . import modules
|
||||
|
||||
__all__ = [core, modules]
|
||||
|
||||
@@ -1,44 +1,19 @@
|
||||
#!/usr/bin/env python3
|
||||
# flake8: noqa: E402
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from kube_hunter.conf import Config, set_config
|
||||
from kube_hunter.conf.parser import parse_args
|
||||
from kube_hunter.conf.logging import setup_logger
|
||||
|
||||
from kube_hunter.plugins import initialize_plugin_manager
|
||||
|
||||
pm = initialize_plugin_manager()
|
||||
# Using a plugin hook for adding arguments before parsing
|
||||
args = parse_args(add_args_hook=pm.hook.parser_add_arguments)
|
||||
config = Config(
|
||||
active=args.active,
|
||||
cidr=args.cidr,
|
||||
include_patched_versions=args.include_patched_versions,
|
||||
interface=args.interface,
|
||||
mapping=args.mapping,
|
||||
network_timeout=args.network_timeout,
|
||||
pod=args.pod,
|
||||
quick=args.quick,
|
||||
remote=args.remote,
|
||||
statistics=args.statistics,
|
||||
)
|
||||
setup_logger(args.log)
|
||||
set_config(config)
|
||||
|
||||
# Running all other registered plugins before execution
|
||||
pm.hook.load_plugin(args=args)
|
||||
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import HuntFinished, HuntStarted
|
||||
from kube_hunter.modules.discovery.hosts import RunningAsPodEvent, HostScanEvent
|
||||
from kube_hunter.modules.report import get_reporter, get_dispatcher
|
||||
|
||||
config.reporter = get_reporter(config.report)
|
||||
config.dispatcher = get_dispatcher(config.dispatch)
|
||||
logger = logging.getLogger(__name__)
|
||||
config.dispatcher = get_dispatcher(args.dispatch)
|
||||
config.reporter = get_reporter(args.report)
|
||||
|
||||
import kube_hunter # noqa
|
||||
|
||||
|
||||
def interactive_set_config():
|
||||
@@ -81,6 +56,7 @@ def list_hunters():
|
||||
print("* {}\n {}\n".format(name, doc))
|
||||
|
||||
|
||||
global hunt_started_lock
|
||||
hunt_started_lock = threading.Lock()
|
||||
hunt_started = False
|
||||
|
||||
@@ -89,7 +65,7 @@ def main():
|
||||
global hunt_started
|
||||
scan_options = [config.pod, config.cidr, config.remote, config.interface]
|
||||
try:
|
||||
if args.list:
|
||||
if config.list:
|
||||
list_hunters()
|
||||
return
|
||||
|
||||
|
||||
@@ -1,50 +1,8 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Optional
|
||||
from kube_hunter.conf.parser import parse_args
|
||||
from kube_hunter.conf.logging import setup_logger
|
||||
|
||||
|
||||
@dataclass
|
||||
class Config:
|
||||
""" Config is a configuration container.
|
||||
It contains the following fields:
|
||||
- active: Enable active hunters
|
||||
- cidr: Network subnets to scan
|
||||
- dispatcher: Dispatcher object
|
||||
- include_patched_version: Include patches in version comparison
|
||||
- interface: Interface scanning mode
|
||||
- list_hunters: Print a list of existing hunters
|
||||
- log_level: Log level
|
||||
- mapping: Report only found components
|
||||
- network_timeout: Timeout for network operations
|
||||
- pod: From pod scanning mode
|
||||
- quick: Quick scanning mode
|
||||
- remote: Hosts to scan
|
||||
- report: Output format
|
||||
- statistics: Include hunters statistics
|
||||
"""
|
||||
config = parse_args()
|
||||
setup_logger(config.log)
|
||||
|
||||
active: bool = False
|
||||
cidr: Optional[str] = None
|
||||
dispatcher: Optional[Any] = None
|
||||
include_patched_versions: bool = False
|
||||
interface: bool = False
|
||||
mapping: bool = False
|
||||
network_timeout: float = 5.0
|
||||
pod: bool = False
|
||||
quick: bool = False
|
||||
remote: Optional[str] = None
|
||||
reporter: Optional[Any] = None
|
||||
statistics: bool = False
|
||||
|
||||
|
||||
_config: Optional[Config] = None
|
||||
|
||||
|
||||
def get_config() -> Config:
|
||||
if not _config:
|
||||
raise ValueError("Configuration is not initialized")
|
||||
return _config
|
||||
|
||||
|
||||
def set_config(new_config: Config) -> None:
|
||||
global _config
|
||||
_config = new_config
|
||||
__all__ = [config]
|
||||
|
||||
@@ -21,7 +21,7 @@ def setup_logger(level_name):
|
||||
logging.disable(logging.CRITICAL)
|
||||
else:
|
||||
log_level = getattr(logging, level_name.upper(), None)
|
||||
log_level = log_level if isinstance(log_level, int) else None
|
||||
log_level = log_level if type(log_level) is int else None
|
||||
logging.basicConfig(level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
|
||||
if not log_level:
|
||||
logging.warning(f"Unknown log level '{level_name}', using {DEFAULT_LEVEL_NAME}")
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
from argparse import ArgumentParser
|
||||
from kube_hunter.plugins import hookimpl
|
||||
|
||||
|
||||
@hookimpl
|
||||
def parser_add_arguments(parser):
|
||||
"""
|
||||
This is the default hook implementation for parse_add_argument
|
||||
Contains initialization for all default arguments
|
||||
"""
|
||||
def parse_args():
|
||||
parser = ArgumentParser(description="kube-hunter - hunt for security weaknesses in Kubernetes clusters")
|
||||
|
||||
parser.add_argument(
|
||||
"--list", action="store_true", help="Displays all tests in kubehunter (add --active flag to see active tests)",
|
||||
)
|
||||
@@ -63,18 +59,6 @@ def parser_add_arguments(parser):
|
||||
|
||||
parser.add_argument("--network-timeout", type=float, default=5.0, help="network operations timeout")
|
||||
|
||||
|
||||
def parse_args(add_args_hook):
|
||||
"""
|
||||
Function handles all argument parsing
|
||||
|
||||
@param add_arguments: hook for adding arguments to it's given ArgumentParser parameter
|
||||
@return: parsed arguments dict
|
||||
"""
|
||||
parser = ArgumentParser(description="kube-hunter - hunt for security weaknesses in Kubernetes clusters")
|
||||
# adding all arguments to the parser
|
||||
add_args_hook(parser=parser)
|
||||
|
||||
args = parser.parse_args()
|
||||
if args.cidr:
|
||||
args.cidr = args.cidr.replace(" ", "").split(",")
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# flake8: noqa: E402
|
||||
from . import types
|
||||
from . import events
|
||||
|
||||
__all__ = [types, events]
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# flake8: noqa: E402
|
||||
from .handler import EventQueue, handler
|
||||
from . import types
|
||||
|
||||
__all__ = [EventQueue, handler, types]
|
||||
|
||||
@@ -4,7 +4,7 @@ from collections import defaultdict
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.types import ActiveHunter, HunterBase
|
||||
from kube_hunter.core.events.types import Vulnerability, EventFilterBase
|
||||
|
||||
@@ -12,7 +12,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Inherits Queue object, handles events asynchronously
|
||||
class EventQueue(Queue):
|
||||
class EventQueue(Queue, object):
|
||||
def __init__(self, num_worker=10):
|
||||
super(EventQueue, self).__init__()
|
||||
self.passive_hunters = dict()
|
||||
@@ -60,7 +60,6 @@ class EventQueue(Queue):
|
||||
|
||||
# getting uninstantiated event object
|
||||
def subscribe_event(self, event, hook=None, predicate=None):
|
||||
config = get_config()
|
||||
if ActiveHunter in hook.__mro__:
|
||||
if not config.active:
|
||||
return
|
||||
@@ -99,8 +98,6 @@ class EventQueue(Queue):
|
||||
|
||||
# getting instantiated event object
|
||||
def publish_event(self, event, caller=None):
|
||||
config = get_config()
|
||||
|
||||
# setting event chain
|
||||
if caller:
|
||||
event.previous = caller.event
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import logging
|
||||
import threading
|
||||
import requests
|
||||
import logging
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.types import (
|
||||
InformationDisclosure,
|
||||
DenialOfService,
|
||||
@@ -17,7 +17,7 @@ from kube_hunter.core.types import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EventFilterBase:
|
||||
class EventFilterBase(object):
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
@@ -28,7 +28,7 @@ class EventFilterBase:
|
||||
return self.event
|
||||
|
||||
|
||||
class Event:
|
||||
class Event(object):
|
||||
def __init__(self):
|
||||
self.previous = None
|
||||
self.hunter = None
|
||||
@@ -62,7 +62,7 @@ class Event:
|
||||
return history
|
||||
|
||||
|
||||
class Service:
|
||||
class Service(object):
|
||||
def __init__(self, name, path="", secure=True):
|
||||
self.name = name
|
||||
self.secure = secure
|
||||
@@ -79,7 +79,7 @@ class Service:
|
||||
return self.__doc__
|
||||
|
||||
|
||||
class Vulnerability:
|
||||
class Vulnerability(object):
|
||||
severity = dict(
|
||||
{
|
||||
InformationDisclosure: "medium",
|
||||
@@ -118,6 +118,7 @@ class Vulnerability:
|
||||
return self.severity.get(self.category, "low")
|
||||
|
||||
|
||||
global event_id_count_lock
|
||||
event_id_count_lock = threading.Lock()
|
||||
event_id_count = 0
|
||||
|
||||
@@ -139,7 +140,6 @@ class NewHostEvent(Event):
|
||||
return self.cloud_type
|
||||
|
||||
def get_cloud(self):
|
||||
config = get_config()
|
||||
try:
|
||||
logger.debug("Checking whether the cluster is deployed on azure's cloud")
|
||||
# Leverage 3rd tool https://github.com/blrchen/AzureSpeed for Azure cloud ip detection
|
||||
@@ -148,7 +148,7 @@ class NewHostEvent(Event):
|
||||
).json()
|
||||
return result["cloud"] or "NoCloud"
|
||||
except requests.ConnectionError:
|
||||
logger.info("Failed to connect cloud type service", exc_info=True)
|
||||
logger.info(f"Failed to connect cloud type service", exc_info=True)
|
||||
except Exception:
|
||||
logger.warning(f"Unable to check cloud of {self.host}", exc_info=True)
|
||||
return "NoCloud"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
class HunterBase:
|
||||
class HunterBase(object):
|
||||
publishedVulnerabilities = 0
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# flake8: noqa: E402
|
||||
from . import report
|
||||
from . import discovery
|
||||
from . import hunting
|
||||
|
||||
__all__ = [report, discovery, hunting]
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# flake8: noqa: E402
|
||||
from . import (
|
||||
apiserver,
|
||||
dashboard,
|
||||
@@ -9,3 +8,14 @@ from . import (
|
||||
ports,
|
||||
proxy,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
apiserver,
|
||||
dashboard,
|
||||
etcd,
|
||||
hosts,
|
||||
kubectl,
|
||||
kubelet,
|
||||
ports,
|
||||
proxy,
|
||||
]
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import logging
|
||||
import requests
|
||||
import logging
|
||||
|
||||
from kube_hunter.core.types import Discovery
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import OpenPortEvent, Service, Event, EventFilterBase
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
|
||||
KNOWN_API_PORTS = [443, 6443, 8080]
|
||||
|
||||
@@ -57,7 +57,6 @@ class ApiServiceDiscovery(Discovery):
|
||||
self.publish_event(K8sApiService(protocol))
|
||||
|
||||
def has_api_behaviour(self, protocol):
|
||||
config = get_config()
|
||||
try:
|
||||
r = self.session.get(f"{protocol}://{self.event.host}:{self.event.port}", timeout=config.network_timeout)
|
||||
if ("k8s" in r.text) or ('"code"' in r.text and r.status_code != 200):
|
||||
@@ -94,7 +93,6 @@ class ApiServiceClassify(EventFilterBase):
|
||||
|
||||
def classify_using_version_endpoint(self):
|
||||
"""Tries to classify by accessing /version. if could not access succeded, returns"""
|
||||
config = get_config()
|
||||
try:
|
||||
endpoint = f"{self.event.protocol}://{self.event.host}:{self.event.port}/version"
|
||||
versions = self.session.get(endpoint, timeout=config.network_timeout).json()
|
||||
|
||||
@@ -2,7 +2,7 @@ import json
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Event, OpenPortEvent, Service
|
||||
from kube_hunter.core.types import Discovery
|
||||
@@ -28,7 +28,6 @@ class KubeDashboard(Discovery):
|
||||
|
||||
@property
|
||||
def secure(self):
|
||||
config = get_config()
|
||||
endpoint = f"http://{self.event.host}:{self.event.port}/api/v1/service/default"
|
||||
logger.debug("Attempting to discover an Api server to access dashboard")
|
||||
try:
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
import os
|
||||
import logging
|
||||
import itertools
|
||||
import requests
|
||||
import itertools
|
||||
|
||||
from enum import Enum
|
||||
from netaddr import IPNetwork, IPAddress, AddrFormatError
|
||||
from netifaces import AF_INET, ifaddresses, interfaces
|
||||
from scapy.all import ICMP, IP, Ether, srp1
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Event, NewHostEvent, Vulnerability
|
||||
from kube_hunter.core.types import Discovery, InformationDisclosure, Azure
|
||||
@@ -95,7 +95,6 @@ class FromPodHostDiscovery(Discovery):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
# Scan any hosts that the user specified
|
||||
if config.remote or config.cidr:
|
||||
self.publish_event(HostScanEvent())
|
||||
@@ -105,7 +104,7 @@ class FromPodHostDiscovery(Discovery):
|
||||
if self.is_azure_pod():
|
||||
subnets, cloud = self.azure_metadata_discovery()
|
||||
else:
|
||||
subnets = self.traceroute_discovery()
|
||||
subnets, ext_ip = self.traceroute_discovery()
|
||||
|
||||
should_scan_apiserver = False
|
||||
if self.event.kubeservicehost:
|
||||
@@ -120,7 +119,6 @@ class FromPodHostDiscovery(Discovery):
|
||||
self.publish_event(NewHostEvent(host=IPAddress(self.event.kubeservicehost), cloud=cloud))
|
||||
|
||||
def is_azure_pod(self):
|
||||
config = get_config()
|
||||
try:
|
||||
logger.debug("From pod attempting to access Azure Metadata API")
|
||||
if (
|
||||
@@ -138,15 +136,16 @@ class FromPodHostDiscovery(Discovery):
|
||||
|
||||
# for pod scanning
|
||||
def traceroute_discovery(self):
|
||||
config = get_config()
|
||||
# getting external ip, to determine if cloud cluster
|
||||
external_ip = requests.get("https://canhazip.com", timeout=config.network_timeout).text
|
||||
|
||||
node_internal_ip = srp1(
|
||||
Ether() / IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout,
|
||||
)[IP].src
|
||||
return [[node_internal_ip, "24"]]
|
||||
return [[node_internal_ip, "24"]], external_ip
|
||||
|
||||
# querying azure's interface metadata api | works only from a pod
|
||||
def azure_metadata_discovery(self):
|
||||
config = get_config()
|
||||
logger.debug("From pod attempting to access azure's metadata")
|
||||
machine_metadata = requests.get(
|
||||
"http://169.254.169.254/metadata/instance?api-version=2017-08-01",
|
||||
@@ -179,7 +178,6 @@ class HostDiscovery(Discovery):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
if config.cidr:
|
||||
for ip in HostDiscoveryHelpers.generate_hosts(config.cidr):
|
||||
self.publish_event(NewHostEvent(host=ip))
|
||||
|
||||
@@ -3,7 +3,7 @@ import requests
|
||||
import urllib3
|
||||
from enum import Enum
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.types import Discovery
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import OpenPortEvent, Event, Service
|
||||
@@ -47,7 +47,6 @@ class KubeletDiscovery(Discovery):
|
||||
self.event = event
|
||||
|
||||
def get_read_only_access(self):
|
||||
config = get_config()
|
||||
endpoint = f"http://{self.event.host}:{self.event.port}/pods"
|
||||
logger.debug(f"Trying to get kubelet read access at {endpoint}")
|
||||
r = requests.get(endpoint, timeout=config.network_timeout)
|
||||
@@ -65,7 +64,6 @@ class KubeletDiscovery(Discovery):
|
||||
self.publish_event(SecureKubeletEvent(secure=True, anonymous_auth=False))
|
||||
|
||||
def ping_kubelet(self):
|
||||
config = get_config()
|
||||
endpoint = f"https://{self.event.host}:{self.event.port}/pods"
|
||||
logger.debug("Attempting to get pods info from kubelet")
|
||||
try:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.types import Discovery
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Service, Event, OpenPortEvent
|
||||
@@ -29,7 +29,6 @@ class KubeProxy(Discovery):
|
||||
|
||||
@property
|
||||
def accesible(self):
|
||||
config = get_config()
|
||||
endpoint = f"http://{self.host}:{self.port}/api/v1"
|
||||
logger.debug("Attempting to discover a proxy service")
|
||||
try:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# flake8: noqa: E402
|
||||
from . import (
|
||||
aks,
|
||||
apiserver,
|
||||
@@ -14,3 +13,19 @@ from . import (
|
||||
proxy,
|
||||
secrets,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
aks,
|
||||
apiserver,
|
||||
arp,
|
||||
capabilities,
|
||||
certificates,
|
||||
cves,
|
||||
dashboard,
|
||||
dns,
|
||||
etcd,
|
||||
kubelet,
|
||||
mounts,
|
||||
proxy,
|
||||
secrets,
|
||||
]
|
||||
|
||||
@@ -2,7 +2,7 @@ import json
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.modules.hunting.kubelet import ExposedRunHandler
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability
|
||||
@@ -33,7 +33,6 @@ class AzureSpnHunter(Hunter):
|
||||
|
||||
# getting a container that has access to the azure.json file
|
||||
def get_key_container(self):
|
||||
config = get_config()
|
||||
endpoint = f"{self.base_url}/pods"
|
||||
logger.debug("Trying to find container with access to azure.json file")
|
||||
try:
|
||||
@@ -70,7 +69,6 @@ class ProveAzureSpnExposure(ActiveHunter):
|
||||
self.base_url = f"https://{self.event.host}:{self.event.port}"
|
||||
|
||||
def run(self, command, container):
|
||||
config = get_config()
|
||||
run_url = "/".join(self.base_url, "run", container["namespace"], container["pod"], container["name"])
|
||||
return requests.post(run_url, verify=False, params={"cmd": command}, timeout=config.network_timeout)
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import logging
|
||||
import json
|
||||
import uuid
|
||||
import requests
|
||||
import uuid
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.modules.discovery.apiserver import ApiServer
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
|
||||
@@ -236,7 +236,6 @@ class AccessApiServer(Hunter):
|
||||
self.with_token = False
|
||||
|
||||
def access_api_server(self):
|
||||
config = get_config()
|
||||
logger.debug(f"Passive Hunter is attempting to access the API at {self.path}")
|
||||
try:
|
||||
r = requests.get(f"{self.path}/api", headers=self.headers, verify=False, timeout=config.network_timeout)
|
||||
@@ -247,7 +246,6 @@ class AccessApiServer(Hunter):
|
||||
return False
|
||||
|
||||
def get_items(self, path):
|
||||
config = get_config()
|
||||
try:
|
||||
items = []
|
||||
r = requests.get(path, headers=self.headers, verify=False, timeout=config.network_timeout)
|
||||
@@ -263,7 +261,6 @@ class AccessApiServer(Hunter):
|
||||
return None
|
||||
|
||||
def get_pods(self, namespace=None):
|
||||
config = get_config()
|
||||
pods = []
|
||||
try:
|
||||
if not namespace:
|
||||
@@ -343,7 +340,6 @@ class AccessApiServerActive(ActiveHunter):
|
||||
self.path = f"{self.event.protocol}://{self.event.host}:{self.event.port}"
|
||||
|
||||
def create_item(self, path, data):
|
||||
config = get_config()
|
||||
headers = {"Content-Type": "application/json"}
|
||||
if self.event.auth_token:
|
||||
headers["Authorization"] = f"Bearer {self.event.auth_token}"
|
||||
@@ -358,7 +354,6 @@ class AccessApiServerActive(ActiveHunter):
|
||||
return None
|
||||
|
||||
def patch_item(self, path, data):
|
||||
config = get_config()
|
||||
headers = {"Content-Type": "application/json-patch+json"}
|
||||
if self.event.auth_token:
|
||||
headers["Authorization"] = f"Bearer {self.event.auth_token}"
|
||||
@@ -374,7 +369,6 @@ class AccessApiServerActive(ActiveHunter):
|
||||
return None
|
||||
|
||||
def delete_item(self, path):
|
||||
config = get_config()
|
||||
headers = {}
|
||||
if self.event.auth_token:
|
||||
headers["Authorization"] = f"Bearer {self.event.auth_token}"
|
||||
@@ -576,7 +570,6 @@ class ApiVersionHunter(Hunter):
|
||||
self.session.headers.update({"Authorization": f"Bearer {self.event.auth_token}"})
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
if self.event.auth_token:
|
||||
logger.debug(
|
||||
"Trying to access the API server version endpoint using pod's"
|
||||
|
||||
@@ -2,7 +2,7 @@ import logging
|
||||
|
||||
from scapy.all import ARP, IP, ICMP, Ether, sr1, srp
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability
|
||||
from kube_hunter.core.types import ActiveHunter, KubernetesCluster, IdentityTheft
|
||||
@@ -32,7 +32,6 @@ class ArpSpoofHunter(ActiveHunter):
|
||||
self.event = event
|
||||
|
||||
def try_getting_mac(self, ip):
|
||||
config = get_config()
|
||||
ans = sr1(ARP(op=1, pdst=ip), timeout=config.network_timeout, verbose=0)
|
||||
return ans[ARP].hwsrc if ans else None
|
||||
|
||||
@@ -52,10 +51,9 @@ class ArpSpoofHunter(ActiveHunter):
|
||||
return False
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
self_ip = sr1(IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout)[IP].dst
|
||||
arp_responses, _ = srp(
|
||||
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=f"{self_ip}/24"), timeout=config.network_timeout, verbose=0,
|
||||
Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(op=1, pdst=f"{self_ip}/24"), timeout=config.netork_timeout, verbose=0,
|
||||
)
|
||||
|
||||
# arp enabled on cluster and more than one pod on node
|
||||
|
||||
@@ -8,7 +8,7 @@ from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, Service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
email_pattern = re.compile(rb"([a-z0-9]+@[a-z0-9]+\.[a-z0-9]+)")
|
||||
email_pattern = re.compile(r"([a-z0-9]+@[a-z0-9]+\.[a-z0-9]+)")
|
||||
|
||||
|
||||
class CertificateEmail(Vulnerability, Event):
|
||||
@@ -16,7 +16,7 @@ class CertificateEmail(Vulnerability, Event):
|
||||
|
||||
def __init__(self, email):
|
||||
Vulnerability.__init__(
|
||||
self, KubernetesCluster, "Certificate Includes Email Address", category=InformationDisclosure, vid="KHV021",
|
||||
self, KubernetesCluster, "Certificate Includes Email Address", category=InformationDisclosure, khv="KHV021",
|
||||
)
|
||||
self.email = email
|
||||
self.evidence = "email: {}".format(self.email)
|
||||
@@ -39,11 +39,8 @@ class CertificateDiscovery(Hunter):
|
||||
except ssl.SSLError:
|
||||
# If the server doesn't offer SSL on this port we won't get a certificate
|
||||
return
|
||||
self.examine_certificate(cert)
|
||||
|
||||
def examine_certificate(self, cert):
|
||||
c = cert.strip(ssl.PEM_HEADER).strip(ssl.PEM_FOOTER)
|
||||
certdata = base64.b64decode(c)
|
||||
certdata = base64.decodebytes(c)
|
||||
emails = re.findall(email_pattern, certdata)
|
||||
for email in emails:
|
||||
self.publish_event(CertificateEmail(email=email))
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
from packaging import version
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
|
||||
from kube_hunter.core.types import (
|
||||
@@ -111,7 +111,7 @@ class CveUtils:
|
||||
@staticmethod
|
||||
def get_base_release(full_ver):
|
||||
# if LegacyVersion, converting manually to a base version
|
||||
if isinstance(full_ver, version.LegacyVersion):
|
||||
if type(full_ver) == version.LegacyVersion:
|
||||
return version.parse(".".join(full_ver._version.split(".")[:2]))
|
||||
return version.parse(".".join(map(str, full_ver._version.release[:2])))
|
||||
|
||||
@@ -122,7 +122,7 @@ class CveUtils:
|
||||
|
||||
@staticmethod
|
||||
def to_raw_version(v):
|
||||
if not isinstance(v, version.LegacyVersion):
|
||||
if type(v) != version.LegacyVersion:
|
||||
return ".".join(map(str, v._version.release))
|
||||
return v._version
|
||||
|
||||
@@ -159,7 +159,7 @@ class CveUtils:
|
||||
|
||||
# default to classic compare, unless the check_version is legacy.
|
||||
version_compare_func = CveUtils.basic_compare
|
||||
if isinstance(check_v, version.LegacyVersion):
|
||||
if type(check_v) == version.LegacyVersion:
|
||||
version_compare_func = CveUtils.version_compare
|
||||
|
||||
if check_version not in fix_versions:
|
||||
@@ -170,7 +170,7 @@ class CveUtils:
|
||||
|
||||
# if the check version and the current fix has the same base release
|
||||
if base_check_v == base_fix_v:
|
||||
# when check_version is legacy, we use a custom compare func, to handle differences between versions
|
||||
# when check_version is legacy, we use a custom compare func, to handle differences between versions.
|
||||
if version_compare_func(check_v, fix_v) == -1:
|
||||
# determine vulnerable if smaller and with same base version
|
||||
vulnerable = True
|
||||
@@ -194,7 +194,6 @@ class K8sClusterCveHunter(Hunter):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
logger.debug(f"Checking known CVEs for k8s API version: {self.event.version}")
|
||||
cve_mapping = {
|
||||
ServerApiVersionEndPointAccessPE: ["1.10.11", "1.11.5", "1.12.3"],
|
||||
@@ -218,7 +217,6 @@ class KubectlCVEHunter(Hunter):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
cve_mapping = {
|
||||
KubectlCpVulnerability: ["1.11.9", "1.12.7", "1.13.5", "1.14.0"],
|
||||
IncompleteFixToKubectlCpVulnerability: ["1.12.9", "1.13.6", "1.14.2"],
|
||||
|
||||
@@ -2,7 +2,7 @@ import logging
|
||||
import json
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.types import Hunter, RemoteCodeExec, KubernetesCluster
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event
|
||||
@@ -31,7 +31,6 @@ class KubeDashboard(Hunter):
|
||||
self.event = event
|
||||
|
||||
def get_nodes(self):
|
||||
config = get_config()
|
||||
logger.debug("Passive hunter is attempting to get nodes types of the cluster")
|
||||
r = requests.get(f"http://{self.event.host}:{self.event.port}/api/v1/node", timeout=config.network_timeout)
|
||||
if r.status_code == 200 and "nodes" in r.text:
|
||||
|
||||
@@ -3,7 +3,7 @@ import logging
|
||||
|
||||
from scapy.all import IP, ICMP, UDP, DNS, DNSQR, ARP, Ether, sr1, srp1, srp
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability
|
||||
from kube_hunter.core.types import ActiveHunter, KubernetesCluster, IdentityTheft
|
||||
@@ -36,7 +36,6 @@ class DnsSpoofHunter(ActiveHunter):
|
||||
self.event = event
|
||||
|
||||
def get_cbr0_ip_mac(self):
|
||||
config = get_config()
|
||||
res = srp1(Ether() / IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.network_timeout)
|
||||
return res[IP].src, res.src
|
||||
|
||||
@@ -48,7 +47,6 @@ class DnsSpoofHunter(ActiveHunter):
|
||||
return match.group(1)
|
||||
|
||||
def get_kube_dns_ip_mac(self):
|
||||
config = get_config()
|
||||
kubedns_svc_ip = self.extract_nameserver_ip()
|
||||
|
||||
# getting actual pod ip of kube-dns service, by comparing the src mac of a dns response and arp scanning.
|
||||
@@ -68,7 +66,6 @@ class DnsSpoofHunter(ActiveHunter):
|
||||
return response[ARP].psrc, response.src
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
logger.debug("Attempting to get kube-dns pod ip")
|
||||
self_ip = sr1(IP(dst="1.1.1.1", ttl=1) / ICMP(), verbose=0, timeout=config.netork_timeout)[IP].dst
|
||||
cbr0_ip, cbr0_mac = self.get_cbr0_ip_mac()
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
import requests
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, OpenPortEvent
|
||||
from kube_hunter.core.types import (
|
||||
@@ -83,7 +83,6 @@ class EtcdRemoteAccessActive(ActiveHunter):
|
||||
self.write_evidence = ""
|
||||
|
||||
def db_keys_write_access(self):
|
||||
config = get_config()
|
||||
logger.debug(f"Trying to write keys remotely on host {self.event.host}")
|
||||
data = {"value": "remotely written data"}
|
||||
try:
|
||||
@@ -116,7 +115,6 @@ class EtcdRemoteAccess(Hunter):
|
||||
self.protocol = "https"
|
||||
|
||||
def db_keys_disclosure(self):
|
||||
config = get_config()
|
||||
logger.debug(f"{self.event.host} Passive hunter is attempting to read etcd keys remotely")
|
||||
try:
|
||||
r = requests.get(
|
||||
@@ -128,7 +126,6 @@ class EtcdRemoteAccess(Hunter):
|
||||
return False
|
||||
|
||||
def version_disclosure(self):
|
||||
config = get_config()
|
||||
logger.debug(f"Trying to check etcd version remotely at {self.event.host}")
|
||||
try:
|
||||
r = requests.get(
|
||||
@@ -142,7 +139,6 @@ class EtcdRemoteAccess(Hunter):
|
||||
return False
|
||||
|
||||
def insecure_access(self):
|
||||
config = get_config()
|
||||
logger.debug(f"Trying to access etcd insecurely at {self.event.host}")
|
||||
try:
|
||||
r = requests.get(
|
||||
|
||||
@@ -6,7 +6,7 @@ import re
|
||||
import requests
|
||||
import urllib3
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Vulnerability, Event, K8sVersionDisclosure
|
||||
from kube_hunter.core.types import (
|
||||
@@ -27,6 +27,9 @@ logger = logging.getLogger(__name__)
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
|
||||
""" Vulnerabilities """
|
||||
|
||||
|
||||
class ExposedPodsHandler(Vulnerability, Event):
|
||||
"""An attacker could view sensitive information about pods that are
|
||||
bound to a Node using the /pods endpoint"""
|
||||
@@ -184,7 +187,6 @@ class ReadOnlyKubeletPortHunter(Hunter):
|
||||
self.pods_endpoint_data = ""
|
||||
|
||||
def get_k8s_version(self):
|
||||
config = get_config()
|
||||
logger.debug("Passive hunter is attempting to find kubernetes version")
|
||||
metrics = requests.get(f"{self.path}/metrics", timeout=config.network_timeout).text
|
||||
for line in metrics.split("\n"):
|
||||
@@ -206,14 +208,12 @@ class ReadOnlyKubeletPortHunter(Hunter):
|
||||
return privileged_containers if len(privileged_containers) > 0 else None
|
||||
|
||||
def get_pods_endpoint(self):
|
||||
config = get_config()
|
||||
logger.debug("Attempting to find pods endpoints")
|
||||
response = requests.get(f"{self.path}/pods", timeout=config.network_timeout)
|
||||
if "items" in response.text:
|
||||
return response.json()
|
||||
|
||||
def check_healthz_endpoint(self):
|
||||
config = get_config()
|
||||
r = requests.get(f"{self.path}/healthz", verify=False, timeout=config.network_timeout)
|
||||
return r.text if r.status_code == 200 else False
|
||||
|
||||
@@ -240,7 +240,7 @@ class SecureKubeletPortHunter(Hunter):
|
||||
Hunts specific endpoints on an open secured Kubelet
|
||||
"""
|
||||
|
||||
class DebugHandlers:
|
||||
class DebugHandlers(object):
|
||||
""" all methods will return the handler name if successful """
|
||||
|
||||
def __init__(self, path, pod, session=None):
|
||||
@@ -250,7 +250,6 @@ class SecureKubeletPortHunter(Hunter):
|
||||
|
||||
# outputs logs from a specific container
|
||||
def test_container_logs(self):
|
||||
config = get_config()
|
||||
logs_url = self.path + KubeletHandlers.CONTAINERLOGS.value.format(
|
||||
pod_namespace=self.pod["namespace"], pod_id=self.pod["name"], container_name=self.pod["container"],
|
||||
)
|
||||
@@ -258,7 +257,6 @@ class SecureKubeletPortHunter(Hunter):
|
||||
|
||||
# need further investigation on websockets protocol for further implementation
|
||||
def test_exec_container(self):
|
||||
config = get_config()
|
||||
# opens a stream to connect to using a web socket
|
||||
headers = {"X-Stream-Protocol-Version": "v2.channel.k8s.io"}
|
||||
exec_url = self.path + KubeletHandlers.EXEC.value.format(
|
||||
@@ -276,7 +274,6 @@ class SecureKubeletPortHunter(Hunter):
|
||||
|
||||
# need further investigation on websockets protocol for further implementation
|
||||
def test_port_forward(self):
|
||||
config = get_config()
|
||||
headers = {
|
||||
"Upgrade": "websocket",
|
||||
"Connection": "Upgrade",
|
||||
@@ -294,7 +291,6 @@ class SecureKubeletPortHunter(Hunter):
|
||||
|
||||
# executes one command and returns output
|
||||
def test_run_container(self):
|
||||
config = get_config()
|
||||
run_url = self.path + KubeletHandlers.RUN.value.format(
|
||||
pod_namespace="test", pod_id="test", container_name="test", cmd="",
|
||||
)
|
||||
@@ -303,14 +299,12 @@ class SecureKubeletPortHunter(Hunter):
|
||||
|
||||
# returns list of currently running pods
|
||||
def test_running_pods(self):
|
||||
config = get_config()
|
||||
pods_url = self.path + KubeletHandlers.RUNNINGPODS.value
|
||||
r = self.session.get(pods_url, verify=False, timeout=config.network_timeout)
|
||||
return r.json() if r.status_code == 200 else False
|
||||
|
||||
# need further investigation on the differences between attach and exec
|
||||
def test_attach_container(self):
|
||||
config = get_config()
|
||||
# headers={"X-Stream-Protocol-Version": "v2.channel.k8s.io"}
|
||||
attach_url = self.path + KubeletHandlers.ATTACH.value.format(
|
||||
pod_namespace=self.pod["namespace"],
|
||||
@@ -327,7 +321,6 @@ class SecureKubeletPortHunter(Hunter):
|
||||
|
||||
# checks access to logs endpoint
|
||||
def test_logs_endpoint(self):
|
||||
config = get_config()
|
||||
logs_url = self.session.get(
|
||||
self.path + KubeletHandlers.LOGS.value.format(path=""), timeout=config.network_timeout,
|
||||
).text
|
||||
@@ -335,7 +328,6 @@ class SecureKubeletPortHunter(Hunter):
|
||||
|
||||
# returns the cmd line used to run the kubelet
|
||||
def test_pprof_cmdline(self):
|
||||
config = get_config()
|
||||
cmd = self.session.get(
|
||||
self.path + KubeletHandlers.PPROF_CMDLINE.value, verify=False, timeout=config.network_timeout,
|
||||
)
|
||||
@@ -358,13 +350,11 @@ class SecureKubeletPortHunter(Hunter):
|
||||
self.pods_endpoint_data = ""
|
||||
|
||||
def get_pods_endpoint(self):
|
||||
config = get_config()
|
||||
response = self.session.get(f"{self.path}/pods", verify=False, timeout=config.network_timeout)
|
||||
if "items" in response.text:
|
||||
return response.json()
|
||||
|
||||
def check_healthz_endpoint(self):
|
||||
config = get_config()
|
||||
r = requests.get(f"{self.path}/healthz", verify=False, timeout=config.network_timeout)
|
||||
return r.text if r.status_code == 200 else False
|
||||
|
||||
@@ -381,7 +371,6 @@ class SecureKubeletPortHunter(Hunter):
|
||||
self.test_handlers()
|
||||
|
||||
def test_handlers(self):
|
||||
config = get_config()
|
||||
# if kube-hunter runs in a pod, we test with kube-hunter's pod
|
||||
pod = self.kubehunter_pod if config.pod else self.get_random_pod()
|
||||
if pod:
|
||||
@@ -445,7 +434,6 @@ class ProveRunHandler(ActiveHunter):
|
||||
self.base_path = f"https://{self.event.host}:{self.event.port}"
|
||||
|
||||
def run(self, command, container):
|
||||
config = get_config()
|
||||
run_url = KubeletHandlers.RUN.value.format(
|
||||
pod_namespace=container["namespace"],
|
||||
pod_id=container["pod"],
|
||||
@@ -457,7 +445,6 @@ class ProveRunHandler(ActiveHunter):
|
||||
).text
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
r = self.event.session.get(
|
||||
self.base_path + KubeletHandlers.PODS.value, verify=False, timeout=config.network_timeout,
|
||||
)
|
||||
@@ -491,7 +478,6 @@ class ProveContainerLogsHandler(ActiveHunter):
|
||||
self.base_url = f"{protocol}://{self.event.host}:{self.event.port}/"
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
pods_raw = self.event.session.get(
|
||||
self.base_url + KubeletHandlers.PODS.value, verify=False, timeout=config.network_timeout,
|
||||
).text
|
||||
@@ -527,7 +513,6 @@ class ProveSystemLogs(ActiveHunter):
|
||||
self.base_url = f"https://{self.event.host}:{self.event.port}"
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
audit_logs = self.event.session.get(
|
||||
f"{self.base_url}/" + KubeletHandlers.LOGS.value.format(path="audit/audit.log"),
|
||||
verify=False,
|
||||
|
||||
@@ -2,7 +2,7 @@ import logging
|
||||
import re
|
||||
import uuid
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability
|
||||
from kube_hunter.core.types import (
|
||||
@@ -88,7 +88,6 @@ class ProveVarLogMount(ActiveHunter):
|
||||
|
||||
# TODO: replace with multiple subscription to WriteMountToVarLog as well
|
||||
def get_varlog_mounters(self):
|
||||
config = get_config()
|
||||
logger.debug("accessing /pods manually on ProveVarLogMount")
|
||||
pods = self.event.session.get(
|
||||
f"{self.base_path}/" + KubeletHandlers.PODS.value, verify=False, timeout=config.network_timeout,
|
||||
@@ -108,7 +107,6 @@ class ProveVarLogMount(ActiveHunter):
|
||||
|
||||
def traverse_read(self, host_file, container, mount_path, host_path):
|
||||
"""Returns content of file on the host, and cleans trails"""
|
||||
config = get_config()
|
||||
symlink_name = str(uuid.uuid4())
|
||||
# creating symlink to file
|
||||
self.run(f"ln -s {host_file} {mount_path}/{symlink_name}", container)
|
||||
|
||||
@@ -3,7 +3,7 @@ import requests
|
||||
|
||||
from enum import Enum
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Event, Vulnerability, K8sVersionDisclosure
|
||||
from kube_hunter.core.types import (
|
||||
@@ -53,13 +53,11 @@ class KubeProxy(Hunter):
|
||||
|
||||
@property
|
||||
def namespaces(self):
|
||||
config = get_config()
|
||||
resource_json = requests.get(f"{self.api_url}/namespaces", timeout=config.network_timeout).json()
|
||||
return self.extract_names(resource_json)
|
||||
|
||||
@property
|
||||
def services(self):
|
||||
config = get_config()
|
||||
# map between namespaces and service names
|
||||
services = dict()
|
||||
for namespace in self.namespaces:
|
||||
@@ -87,7 +85,6 @@ class ProveProxyExposed(ActiveHunter):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
version_metadata = requests.get(
|
||||
f"http://{self.event.host}:{self.event.port}/version", verify=False, timeout=config.network_timeout,
|
||||
).json()
|
||||
@@ -105,7 +102,6 @@ class K8sVersionDisclosureProve(ActiveHunter):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
version_metadata = requests.get(
|
||||
f"http://{self.event.host}:{self.event.port}/version", verify=False, timeout=config.network_timeout,
|
||||
).json()
|
||||
|
||||
@@ -50,7 +50,7 @@ class AccessSecrets(Hunter):
|
||||
for dirname, _, files in os.walk("/var/run/secrets/"):
|
||||
for f in files:
|
||||
self.secrets_evidence.append(os.path.join(dirname, f))
|
||||
return len(self.secrets_evidence) > 0
|
||||
return True if (len(self.secrets_evidence) > 0) else False
|
||||
|
||||
def execute(self):
|
||||
if self.event.auth_token is not None:
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
# flake8: noqa: E402
|
||||
from kube_hunter.modules.report.factory import get_reporter, get_dispatcher
|
||||
|
||||
__all__ = [get_reporter, get_dispatcher]
|
||||
|
||||
@@ -8,7 +8,7 @@ from kube_hunter.modules.report.collector import (
|
||||
)
|
||||
|
||||
|
||||
class BaseReporter:
|
||||
class BaseReporter(object):
|
||||
def get_nodes(self):
|
||||
nodes = list()
|
||||
node_locations = set()
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.conf import config
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import (
|
||||
Event,
|
||||
@@ -14,16 +14,20 @@ from kube_hunter.core.events.types import (
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
global services_lock
|
||||
services_lock = threading.Lock()
|
||||
services = list()
|
||||
|
||||
global vulnerabilities_lock
|
||||
vulnerabilities_lock = threading.Lock()
|
||||
vulnerabilities = list()
|
||||
|
||||
hunters = handler.all_hunters
|
||||
|
||||
|
||||
@handler.subscribe(Service)
|
||||
@handler.subscribe(Vulnerability)
|
||||
class Collector:
|
||||
class Collector(object):
|
||||
def __init__(self, event=None):
|
||||
self.event = event
|
||||
|
||||
@@ -47,12 +51,11 @@ class TablesPrinted(Event):
|
||||
|
||||
|
||||
@handler.subscribe(HuntFinished)
|
||||
class SendFullReport:
|
||||
class SendFullReport(object):
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
report = config.reporter.get_report(statistics=config.statistics, mapping=config.mapping)
|
||||
config.dispatcher.dispatch(report)
|
||||
handler.publish_event(ReportDispatched())
|
||||
@@ -60,7 +63,7 @@ class SendFullReport:
|
||||
|
||||
|
||||
@handler.subscribe(HuntStarted)
|
||||
class StartedInfo:
|
||||
class StartedInfo(object):
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import requests
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HTTPDispatcher:
|
||||
class HTTPDispatcher(object):
|
||||
def dispatch(self, report):
|
||||
logger.debug("Dispatching report via HTTP")
|
||||
dispatch_method = os.environ.get("KUBEHUNTER_HTTP_DISPATCH_METHOD", "POST").upper()
|
||||
@@ -24,7 +24,7 @@ class HTTPDispatcher:
|
||||
logger.exception(f"Could not dispatch report to {dispatch_url}")
|
||||
|
||||
|
||||
class STDOUTDispatcher:
|
||||
class STDOUTDispatcher(object):
|
||||
def dispatch(self, report):
|
||||
logger.debug("Dispatching report via stdout")
|
||||
print(report)
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import logging
|
||||
|
||||
from kube_hunter.modules.report.json import JSONReporter
|
||||
from kube_hunter.modules.report.yaml import YAMLReporter
|
||||
from kube_hunter.modules.report.plain import PlainReporter
|
||||
from kube_hunter.modules.report.dispatchers import STDOUTDispatcher, HTTPDispatcher
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_REPORTER = "plain"
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import json
|
||||
|
||||
from kube_hunter.modules.report.base import BaseReporter
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from __future__ import print_function
|
||||
|
||||
from prettytable import ALL, PrettyTable
|
||||
|
||||
from kube_hunter.modules.report.base import BaseReporter
|
||||
@@ -44,6 +46,7 @@ class PlainReporter(BaseReporter):
|
||||
if vulnerabilities_len:
|
||||
output += self.vulns_table()
|
||||
output += "\nKube Hunter couldn't find any clusters"
|
||||
# print("\nKube Hunter couldn't find any clusters. {}".format("Maybe try with --active?" if not config.active else ""))
|
||||
return output
|
||||
|
||||
def nodes_table(self):
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
import pluggy
|
||||
|
||||
from kube_hunter.plugins import hookspecs
|
||||
|
||||
hookimpl = pluggy.HookimplMarker("kube-hunter")
|
||||
|
||||
|
||||
def initialize_plugin_manager():
|
||||
"""
|
||||
Initializes and loads all default and setup implementations for registered plugins
|
||||
|
||||
@return: initialized plugin manager
|
||||
"""
|
||||
pm = pluggy.PluginManager("kube-hunter")
|
||||
pm.add_hookspecs(hookspecs)
|
||||
pm.load_setuptools_entrypoints("kube_hunter")
|
||||
|
||||
# default registration of builtin implemented plugins
|
||||
from kube_hunter.conf import parser
|
||||
|
||||
pm.register(parser)
|
||||
|
||||
return pm
|
||||
@@ -1,24 +0,0 @@
|
||||
import pluggy
|
||||
from argparse import ArgumentParser
|
||||
|
||||
hookspec = pluggy.HookspecMarker("kube-hunter")
|
||||
|
||||
|
||||
@hookspec
|
||||
def parser_add_arguments(parser: ArgumentParser):
|
||||
"""Add arguments to the ArgumentParser.
|
||||
|
||||
If a plugin requires an aditional argument, it should implement this hook
|
||||
and add the argument to the Argument Parser
|
||||
|
||||
@param parser: an ArgumentParser, calls parser.add_argument on it
|
||||
"""
|
||||
|
||||
|
||||
@hookspec
|
||||
def load_plugin(args):
|
||||
"""Plugins that wish to execute code after the argument parsing
|
||||
should implement this hook.
|
||||
|
||||
@param args: all parsed arguments passed to kube-hunter
|
||||
"""
|
||||
@@ -13,5 +13,4 @@ staticx
|
||||
black
|
||||
pre-commit
|
||||
flake8-bugbear
|
||||
flake8-mypy
|
||||
pluggy
|
||||
PrettyTable == 0.7.2
|
||||
|
||||
@@ -32,13 +32,11 @@ install_requires =
|
||||
netifaces
|
||||
scapy>=2.4.3
|
||||
requests
|
||||
PrettyTable
|
||||
PrettyTable==0.7.2
|
||||
urllib3>=1.24.3
|
||||
ruamel.yaml
|
||||
future
|
||||
packaging
|
||||
dataclasses
|
||||
pluggy
|
||||
setup_requires =
|
||||
setuptools>=30.3.0
|
||||
setuptools_scm
|
||||
|
||||
9
setup.py
9
setup.py
@@ -1,7 +1,6 @@
|
||||
from configparser import ConfigParser
|
||||
from pkg_resources import parse_requirements
|
||||
from subprocess import check_call
|
||||
from typing import Any, List
|
||||
from pkg_resources import parse_requirements
|
||||
from configparser import ConfigParser
|
||||
from setuptools import setup, Command
|
||||
|
||||
|
||||
@@ -9,7 +8,7 @@ class ListDependenciesCommand(Command):
|
||||
"""A custom command to list dependencies"""
|
||||
|
||||
description = "list package dependencies"
|
||||
user_options: List[Any] = []
|
||||
user_options = []
|
||||
|
||||
def initialize_options(self):
|
||||
pass
|
||||
@@ -28,7 +27,7 @@ class PyInstallerCommand(Command):
|
||||
"""A custom command to run PyInstaller to build standalone executable."""
|
||||
|
||||
description = "run PyInstaller on kube-hunter entrypoint"
|
||||
user_options: List[Any] = []
|
||||
user_options = []
|
||||
|
||||
def initialize_options(self):
|
||||
pass
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
import requests_mock
|
||||
import json
|
||||
|
||||
from kube_hunter.conf import Config, set_config
|
||||
from kube_hunter.core.events.types import NewHostEvent
|
||||
|
||||
set_config(Config())
|
||||
|
||||
|
||||
# Testing if it doesn't try to run get_cloud if the cloud type is already set.
|
||||
# get_cloud(1.2.3.4) will result with an error
|
||||
def test_presetcloud():
|
||||
""" Testing if it doesn't try to run get_cloud if the cloud type is already set.
|
||||
get_cloud(1.2.3.4) will result with an error
|
||||
"""
|
||||
expcted = "AWS"
|
||||
hostEvent = NewHostEvent(host="1.2.3.4", cloud=expcted)
|
||||
assert expcted == hostEvent.cloud
|
||||
|
||||
@@ -1,9 +1,3 @@
|
||||
# flake8: noqa: E402
|
||||
|
||||
from kube_hunter.conf import Config, set_config
|
||||
|
||||
set_config(Config(active=True))
|
||||
|
||||
from kube_hunter.core.events.handler import handler
|
||||
from kube_hunter.modules.discovery.apiserver import ApiServiceDiscovery
|
||||
from kube_hunter.modules.discovery.dashboard import KubeDashboard as KubeDashboardDiscovery
|
||||
@@ -96,20 +90,23 @@ def test_passive_hunters_registered():
|
||||
assert expected_odd == actual_odd, "Unexpected passive hunters are registered"
|
||||
|
||||
|
||||
def test_active_hunters_registered():
|
||||
expected_missing = set()
|
||||
expected_odd = set()
|
||||
|
||||
registered_active = remove_test_hunters(handler.active_hunters.keys())
|
||||
actual_missing = ACTIVE_HUNTERS - registered_active
|
||||
actual_odd = registered_active - ACTIVE_HUNTERS
|
||||
|
||||
assert expected_missing == actual_missing, "Active hunters are missing"
|
||||
assert expected_odd == actual_odd, "Unexpected active hunters are registered"
|
||||
# TODO (#334): Active hunters registration cannot be tested since it requires `config.active` to be set
|
||||
# def test_active_hunters_registered():
|
||||
# expected_missing = set()
|
||||
# expected_odd = set()
|
||||
#
|
||||
# registered_active = remove_test_hunters(handler.active_hunters.keys())
|
||||
# actual_missing = ACTIVE_HUNTERS - registered_active
|
||||
# actual_odd = registered_active - ACTIVE_HUNTERS
|
||||
#
|
||||
# assert expected_missing == actual_missing, "Active hunters are missing"
|
||||
# assert expected_odd == actual_odd, "Unexpected active hunters are registered"
|
||||
|
||||
|
||||
def test_all_hunters_registered():
|
||||
expected = PASSIVE_HUNTERS | ACTIVE_HUNTERS
|
||||
# TODO: Enable active hunting mode in testing
|
||||
# expected = PASSIVE_HUNTERS | ACTIVE_HUNTERS
|
||||
expected = PASSIVE_HUNTERS
|
||||
actual = remove_test_hunters(handler.all_hunters.keys())
|
||||
|
||||
assert expected == actual
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
import time
|
||||
|
||||
from kube_hunter.conf import Config, set_config
|
||||
from kube_hunter.core.types import Hunter
|
||||
from kube_hunter.core.events.types import Event, Service
|
||||
from kube_hunter.core.events import handler
|
||||
|
||||
counter = 0
|
||||
set_config(Config())
|
||||
|
||||
|
||||
class OnceOnlyEvent(Service, Event):
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
# flake8: noqa: E402
|
||||
import requests_mock
|
||||
import time
|
||||
|
||||
from kube_hunter.conf import Config, set_config
|
||||
|
||||
set_config(Config())
|
||||
|
||||
from kube_hunter.modules.discovery.apiserver import ApiServer, ApiServiceDiscovery
|
||||
from kube_hunter.core.events.types import Event
|
||||
from kube_hunter.core.events import handler
|
||||
|
||||
@@ -1,79 +1,78 @@
|
||||
# flake8: noqa: E402
|
||||
import json
|
||||
import requests_mock
|
||||
import pytest
|
||||
|
||||
from netaddr import IPNetwork, IPAddress
|
||||
from typing import List
|
||||
from kube_hunter.conf import Config, get_config, set_config
|
||||
|
||||
set_config(Config())
|
||||
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.types import Hunter
|
||||
from kube_hunter.modules.discovery.hosts import (
|
||||
FromPodHostDiscovery,
|
||||
RunningAsPodEvent,
|
||||
HostScanEvent,
|
||||
AzureMetadataApi,
|
||||
HostDiscoveryHelpers,
|
||||
)
|
||||
from kube_hunter.core.events.types import NewHostEvent
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.conf import config
|
||||
|
||||
|
||||
class TestFromPodHostDiscovery:
|
||||
@staticmethod
|
||||
def _make_response(*subnets: List[tuple]) -> str:
|
||||
return json.dumps(
|
||||
{
|
||||
"network": {
|
||||
"interface": [
|
||||
{"ipv4": {"subnet": [{"address": address, "prefix": prefix} for address, prefix in subnets]}}
|
||||
]
|
||||
}
|
||||
}
|
||||
def test_FromPodHostDiscovery():
|
||||
|
||||
with requests_mock.Mocker() as m:
|
||||
e = RunningAsPodEvent()
|
||||
|
||||
config.azure = False
|
||||
config.remote = None
|
||||
config.cidr = None
|
||||
m.get(
|
||||
"http://169.254.169.254/metadata/instance?api-version=2017-08-01", status_code=404,
|
||||
)
|
||||
f = FromPodHostDiscovery(e)
|
||||
assert not f.is_azure_pod()
|
||||
# TODO For now we don't test the traceroute discovery version
|
||||
# f.execute()
|
||||
|
||||
def test_is_azure_pod_request_fail(self):
|
||||
f = FromPodHostDiscovery(RunningAsPodEvent())
|
||||
|
||||
with requests_mock.Mocker() as m:
|
||||
m.get("http://169.254.169.254/metadata/instance?api-version=2017-08-01", status_code=404)
|
||||
result = f.is_azure_pod()
|
||||
|
||||
assert not result
|
||||
|
||||
def test_is_azure_pod_success(self):
|
||||
f = FromPodHostDiscovery(RunningAsPodEvent())
|
||||
|
||||
with requests_mock.Mocker() as m:
|
||||
m.get(
|
||||
"http://169.254.169.254/metadata/instance?api-version=2017-08-01",
|
||||
text=TestFromPodHostDiscovery._make_response(("3.4.5.6", "255.255.255.252")),
|
||||
)
|
||||
result = f.is_azure_pod()
|
||||
|
||||
assert result
|
||||
|
||||
def test_execute_scan_cidr(self):
|
||||
set_config(Config(cidr="1.2.3.4/30"))
|
||||
f = FromPodHostDiscovery(RunningAsPodEvent())
|
||||
# Test that we generate NewHostEvent for the addresses reported by the Azure Metadata API
|
||||
config.azure = True
|
||||
m.get(
|
||||
"http://169.254.169.254/metadata/instance?api-version=2017-08-01",
|
||||
text='{"network":{"interface":[{"ipv4":{"subnet":[{"address": "3.4.5.6", "prefix": "255.255.255.252"}]}}]}}',
|
||||
)
|
||||
assert f.is_azure_pod()
|
||||
f.execute()
|
||||
|
||||
def test_execute_scan_remote(self):
|
||||
set_config(Config(remote="1.2.3.4"))
|
||||
f = FromPodHostDiscovery(RunningAsPodEvent())
|
||||
# Test that we don't trigger a HostScanEvent unless either config.remote or config.cidr are configured
|
||||
config.remote = "1.2.3.4"
|
||||
f.execute()
|
||||
|
||||
config.azure = False
|
||||
config.remote = None
|
||||
config.cidr = "1.2.3.4/24"
|
||||
f.execute()
|
||||
|
||||
|
||||
# In this set of tests we should only trigger HostScanEvent when remote or cidr are set
|
||||
@handler.subscribe(HostScanEvent)
|
||||
class HunterTestHostDiscovery(Hunter):
|
||||
"""TestHostDiscovery
|
||||
In this set of tests we should only trigger HostScanEvent when remote or cidr are set
|
||||
"""
|
||||
|
||||
class testHostDiscovery(object):
|
||||
def __init__(self, event):
|
||||
config = get_config()
|
||||
assert config.remote is not None or config.cidr is not None
|
||||
assert config.remote == "1.2.3.4" or config.cidr == "1.2.3.4/30"
|
||||
assert config.remote == "1.2.3.4" or config.cidr == "1.2.3.4/24"
|
||||
|
||||
|
||||
# In this set of tests we should only get as far as finding a host if it's Azure
|
||||
# because we're not running the code that would normally be triggered by a HostScanEvent
|
||||
@handler.subscribe(NewHostEvent)
|
||||
class testHostDiscoveryEvent(object):
|
||||
def __init__(self, event):
|
||||
assert config.azure
|
||||
assert str(event.host).startswith("3.4.5.")
|
||||
assert config.remote is None
|
||||
assert config.cidr is None
|
||||
|
||||
|
||||
# Test that we only report this event for Azure hosts
|
||||
@handler.subscribe(AzureMetadataApi)
|
||||
class testAzureMetadataApi(object):
|
||||
def __init__(self, event):
|
||||
assert config.azure
|
||||
|
||||
|
||||
class TestDiscoveryUtils:
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
# flake8: noqa: E402
|
||||
import requests_mock
|
||||
import time
|
||||
|
||||
from kube_hunter.conf import Config, set_config
|
||||
|
||||
set_config(Config())
|
||||
|
||||
from kube_hunter.modules.hunting.apiserver import (
|
||||
AccessApiServer,
|
||||
AccessApiServerWithToken,
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
# flake8: noqa: E402
|
||||
from kube_hunter.conf import Config, set_config
|
||||
|
||||
set_config(Config())
|
||||
|
||||
from kube_hunter.core.events.types import Event
|
||||
from kube_hunter.modules.hunting.certificates import CertificateDiscovery, CertificateEmail
|
||||
from kube_hunter.core.events import handler
|
||||
|
||||
|
||||
def test_CertificateDiscovery():
|
||||
cert = """
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDZDCCAkwCCQCAzfCLqrJvuTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
|
||||
UzELMAkGA1UECAwCQ0ExEDAOBgNVBAoMB05vZGUuanMxETAPBgNVBAsMCG5vZGUt
|
||||
Z3lwMRIwEAYDVQQDDAlsb2NhbGhvc3QxHzAdBgkqhkiG9w0BCQEWEGJ1aWxkQG5v
|
||||
ZGVqcy5vcmcwHhcNMTkwNjIyMDYyMjMzWhcNMjIwNDExMDYyMjMzWjB0MQswCQYD
|
||||
VQQGEwJVUzELMAkGA1UECAwCQ0ExEDAOBgNVBAoMB05vZGUuanMxETAPBgNVBAsM
|
||||
CG5vZGUtZ3lwMRIwEAYDVQQDDAlsb2NhbGhvc3QxHzAdBgkqhkiG9w0BCQEWEGJ1
|
||||
aWxkQG5vZGVqcy5vcmcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS
|
||||
CHjvtVW4HdbbUwZ/ZV9s6U4x0KSoyNQrsCZjB8kRpFPe50DS5mfmu2SNBGYKRgzk
|
||||
4QEEwFB9N2o8YTWsCefSRl6ti4ToPZqulU4hhRKYrEGtMJcRzi3IN7s200JaO3UH
|
||||
01Su8ruO0NESb5zEU1Ykfh8Lub8TGEAINmgI61d/5d5Aq3kDjUHQJt1Ekw03Ylnu
|
||||
juQyCGZxLxnngu0mIvwzyL/UeeUgsfQLzvppUk6In7tC1zzMjSPWo0c8qu6KvrW4
|
||||
bKYnkZkzdQifzbpO5ERMEsh5HWq0uHa6+dgcVHFvlhdqF4Uat87ygNplVf0txsZB
|
||||
MNVqbz1k6xkZYMnzDoydAgMBAAEwDQYJKoZIhvcNAQELBQADggEBADspZGtKpWxy
|
||||
J1W3FA1aeQhMvequQTcMRz4avkm4K4HfTdV1iVD4CbvdezBphouBlyLVLDFJP7RZ
|
||||
m7dBJVgBwnxufoFLne8cR2MGqDRoySbFT1AtDJdxabE6Fg+QGUpgOQfeBJ6ANlSB
|
||||
+qJ+HG4QA+Ouh5hxz9mgYwkIsMUABHiwENdZ/kT8Edw4xKgd3uH0YP4iiePMD66c
|
||||
rzW3uXH5J1jnKgBlpxtog4P6dHCcoq+PZJ17W5bdXNyqC1LPzQqniZ2BNcEZ4ix3
|
||||
slAZAOWD1zLLGJhBPMV1fa0sHNBWc6oicr3YK/IDb0cp9kiLvnUu1pHy+LWQGqtC
|
||||
rceJuGsnJEQ=
|
||||
-----END CERTIFICATE-----
|
||||
"""
|
||||
c = CertificateDiscovery(Event())
|
||||
c.examine_certificate(cert)
|
||||
|
||||
|
||||
@handler.subscribe(CertificateEmail)
|
||||
class test_CertificateEmail(object):
|
||||
def __init__(self, event):
|
||||
assert event.email == b"build@nodejs.org0"
|
||||
@@ -1,10 +1,5 @@
|
||||
# flake8: noqa: E402
|
||||
import time
|
||||
|
||||
from kube_hunter.conf import Config, set_config
|
||||
|
||||
set_config(Config())
|
||||
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import K8sVersionDisclosure
|
||||
from kube_hunter.modules.hunting.cves import (
|
||||
|
||||
@@ -2,11 +2,7 @@ import json
|
||||
|
||||
from types import SimpleNamespace
|
||||
from requests_mock import Mocker
|
||||
from kube_hunter.conf import Config, set_config
|
||||
|
||||
set_config(Config())
|
||||
|
||||
from kube_hunter.modules.hunting.dashboard import KubeDashboard # noqa: E402
|
||||
from kube_hunter.modules.hunting.dashboard import KubeDashboard
|
||||
|
||||
|
||||
class TestKubeDashboard:
|
||||
|
||||
@@ -1,8 +1,3 @@
|
||||
# flake8: noqa: E402
|
||||
from kube_hunter.conf import Config, set_config
|
||||
|
||||
set_config(Config())
|
||||
|
||||
from kube_hunter.modules.report import get_reporter, get_dispatcher
|
||||
from kube_hunter.modules.report.factory import (
|
||||
YAMLReporter,
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
from kube_hunter.plugins import hookimpl
|
||||
|
||||
return_string = "return_string"
|
||||
|
||||
|
||||
@hookimpl
|
||||
def parser_add_arguments(parser):
|
||||
return return_string
|
||||
|
||||
|
||||
@hookimpl
|
||||
def load_plugin(args):
|
||||
return return_string
|
||||
@@ -1,17 +0,0 @@
|
||||
from argparse import ArgumentParser
|
||||
from tests.plugins import test_hooks
|
||||
from kube_hunter.plugins import initialize_plugin_manager
|
||||
|
||||
|
||||
def test_all_plugin_hooks():
|
||||
pm = initialize_plugin_manager()
|
||||
pm.register(test_hooks)
|
||||
|
||||
# Testing parser_add_arguments
|
||||
parser = ArgumentParser("Test Argument Parser")
|
||||
results = pm.hook.parser_add_arguments(parser=parser)
|
||||
assert test_hooks.return_string in results
|
||||
|
||||
# Testing load_plugin
|
||||
results = pm.hook.load_plugin(args=[])
|
||||
assert test_hooks.return_string in results
|
||||
Reference in New Issue
Block a user