mirror of
https://github.com/aquasecurity/kube-hunter.git
synced 2026-02-28 08:43:54 +00:00
Compare commits
7 Commits
v0.5.0
...
bugfix/kub
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
078db779ae | ||
|
|
c96312b91e | ||
|
|
a7d26452fb | ||
|
|
e63efddf9f | ||
|
|
6689005544 | ||
|
|
0b90e0e43d | ||
|
|
65eefed721 |
58
README.md
58
README.md
@@ -27,21 +27,27 @@ kube-hunter hunts for security weaknesses in Kubernetes clusters. The tool was d
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
* [Hunting](#hunting)
|
||||
* [Where should I run kube-hunter?](#where-should-i-run-kube-hunter)
|
||||
* [Scanning options](#scanning-options)
|
||||
* [Active Hunting](#active-hunting)
|
||||
* [List of tests](#list-of-tests)
|
||||
* [Nodes Mapping](#nodes-mapping)
|
||||
* [Output](#output)
|
||||
* [Dispatching](#dispatching)
|
||||
* [Advanced Usage](#advanced-usage)
|
||||
* [Deployment](#deployment)
|
||||
* [On Machine](#on-machine)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Container](#container)
|
||||
* [Pod](#pod)
|
||||
* [Contribution](#contribution)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Hunting](#hunting)
|
||||
- [Where should I run kube-hunter?](#where-should-i-run-kube-hunter)
|
||||
- [Scanning options](#scanning-options)
|
||||
- [Authentication](#authentication)
|
||||
- [Active Hunting](#active-hunting)
|
||||
- [List of tests](#list-of-tests)
|
||||
- [Nodes Mapping](#nodes-mapping)
|
||||
- [Output](#output)
|
||||
- [Dispatching](#dispatching)
|
||||
- [Advanced Usage](#advanced-usage)
|
||||
- [Azure Quick Scanning](#azure-quick-scanning)
|
||||
- [Deployment](#deployment)
|
||||
- [On Machine](#on-machine)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Install with pip](#install-with-pip)
|
||||
- [Run from source](#run-from-source)
|
||||
- [Container](#container)
|
||||
- [Pod](#pod)
|
||||
- [Contribution](#contribution)
|
||||
- [License](#license)
|
||||
|
||||
## Hunting
|
||||
|
||||
@@ -53,7 +59,7 @@ Run kube-hunter on any machine (including your laptop), select Remote scanning a
|
||||
|
||||
You can run kube-hunter directly on a machine in the cluster, and select the option to probe all the local network interfaces.
|
||||
|
||||
You can also run kube-hunter in a pod within the cluster. This indicates how exposed your cluster would be if one of your application pods is compromised (through a software vulnerability, for example).
|
||||
You can also run kube-hunter in a pod within the cluster. This indicates how exposed your cluster would be if one of your application pods is compromised (through a software vulnerability, for example). (_`--pod` flag_)
|
||||
|
||||
### Scanning options
|
||||
|
||||
@@ -76,6 +82,26 @@ To specify interface scanning, you can use the `--interface` option (this will s
|
||||
To specify a specific CIDR to scan, use the `--cidr` option. Example:
|
||||
`kube-hunter --cidr 192.168.0.0/24`
|
||||
|
||||
4. **Kubernetes node auto-discovery**
|
||||
|
||||
Set `--k8s-auto-discover-nodes` flag to query Kubernetes for all nodes in the cluster, and then attempt to scan them all. By default, it will use [in-cluster config](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) to connect to the Kubernetes API. If you'd like to use an explicit kubeconfig file, set `--kubeconfig /location/of/kubeconfig/file`.
|
||||
|
||||
Also note, that this is always done when using `--pod` mode.
|
||||
|
||||
### Authentication
|
||||
In order to mimic an attacker in it's early stages, kube-hunter requires no authentication for the hunt.
|
||||
|
||||
* **Impersonate** - You can provide kube-hunter with a specific service account token to use when hunting by manually passing the JWT Bearer token of the service-account secret with the `--service-account-token` flag.
|
||||
|
||||
Example:
|
||||
```bash
|
||||
$ kube-hunter --active --service-account-token eyJhbGciOiJSUzI1Ni...
|
||||
```
|
||||
|
||||
* When runing with `--pod` flag, kube-hunter uses the service account token [mounted inside the pod](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/) to authenticate to services it finds during the hunt.
|
||||
* if specified, `--service-account-token` flag takes priority when running as a pod
|
||||
|
||||
|
||||
### Active Hunting
|
||||
|
||||
Active hunting is an option in which kube-hunter will exploit vulnerabilities it finds, to explore for further vulnerabilities.
|
||||
|
||||
@@ -197,9 +197,9 @@ GEM
|
||||
html-pipeline (~> 2.2)
|
||||
jekyll (>= 3.0, < 5.0)
|
||||
kramdown (2.3.0)
|
||||
rexml
|
||||
rexml (>= 3.2.5)
|
||||
kramdown-parser-gfm (1.1.0)
|
||||
kramdown (~> 2.0)
|
||||
kramdown (>= 2.3.1)
|
||||
liquid (4.0.3)
|
||||
listen (3.4.0)
|
||||
rb-fsevent (~> 0.10, >= 0.10.3)
|
||||
@@ -212,7 +212,7 @@ GEM
|
||||
jekyll-seo-tag (~> 2.1)
|
||||
minitest (5.14.3)
|
||||
multipart-post (2.1.1)
|
||||
nokogiri (1.11.1)
|
||||
nokogiri (>= 1.11.4)
|
||||
mini_portile2 (~> 2.5.0)
|
||||
racc (~> 1.4)
|
||||
octokit (4.20.0)
|
||||
|
||||
@@ -25,6 +25,9 @@ config = Config(
|
||||
quick=args.quick,
|
||||
remote=args.remote,
|
||||
statistics=args.statistics,
|
||||
k8s_auto_discover_nodes=args.k8s_auto_discover_nodes,
|
||||
service_account_token=args.service_account_token,
|
||||
kubeconfig=args.kubeconfig,
|
||||
)
|
||||
setup_logger(args.log, args.log_file)
|
||||
set_config(config)
|
||||
@@ -88,7 +91,7 @@ hunt_started = False
|
||||
|
||||
def main():
|
||||
global hunt_started
|
||||
scan_options = [config.pod, config.cidr, config.remote, config.interface]
|
||||
scan_options = [config.pod, config.cidr, config.remote, config.interface, config.k8s_auto_discover_nodes]
|
||||
try:
|
||||
if args.list:
|
||||
list_hunters()
|
||||
|
||||
@@ -36,6 +36,9 @@ class Config:
|
||||
remote: Optional[str] = None
|
||||
reporter: Optional[Any] = None
|
||||
statistics: bool = False
|
||||
k8s_auto_discover_nodes: bool = False
|
||||
service_account_token: Optional[str] = None
|
||||
kubeconfig: Optional[str] = None
|
||||
|
||||
|
||||
_config: Optional[Config] = None
|
||||
|
||||
@@ -46,6 +46,34 @@ def parser_add_arguments(parser):
|
||||
help="One or more remote ip/dns to hunt",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--k8s-auto-discover-nodes",
|
||||
action="store_true",
|
||||
help="Enables automatic detection of all nodes in a Kubernetes cluster "
|
||||
"by quering the Kubernetes API server. "
|
||||
"It supports both in-cluster config (when running as a pod), "
|
||||
"and a specific kubectl config file (use --kubeconfig to set this). "
|
||||
"By default, when this flag is set, it will use in-cluster config. "
|
||||
"NOTE: this is automatically switched on in --pod mode.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--service-account-token",
|
||||
type=str,
|
||||
metavar="JWT_TOKEN",
|
||||
help="Manually specify the service account jwt token to use for authenticating in the hunting process "
|
||||
"NOTE: This overrides the loading of the pod's bounded authentication when running in --pod mode",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--kubeconfig",
|
||||
type=str,
|
||||
metavar="KUBECONFIG",
|
||||
default=None,
|
||||
help="Specify the kubeconfig file to use for Kubernetes nodes auto discovery "
|
||||
" (to be used in conjuction with the --k8s-auto-discover-nodes flag.",
|
||||
)
|
||||
|
||||
parser.add_argument("--active", action="store_true", help="Enables active hunting")
|
||||
|
||||
parser.add_argument(
|
||||
|
||||
@@ -6,7 +6,7 @@ from threading import Thread
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.core.types import ActiveHunter, HunterBase
|
||||
from kube_hunter.core.events.types import Vulnerability, EventFilterBase
|
||||
from kube_hunter.core.events.types import Vulnerability, EventFilterBase, MultipleEventsContainer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -19,11 +19,33 @@ class EventQueue(Queue):
|
||||
self.active_hunters = dict()
|
||||
self.all_hunters = dict()
|
||||
|
||||
self.hooks = defaultdict(list)
|
||||
self.filters = defaultdict(list)
|
||||
self.running = True
|
||||
self.workers = list()
|
||||
|
||||
# -- Regular Subscription --
|
||||
# Structure: key: Event Class, value: tuple(Registered Hunter, Predicate Function)
|
||||
self.hooks = defaultdict(list)
|
||||
self.filters = defaultdict(list)
|
||||
# --------------------------
|
||||
|
||||
# -- Multiple Subscription --
|
||||
# Structure: key: Event Class, value: tuple(Registered Hunter, Predicate Function)
|
||||
self.multi_hooks = defaultdict(list)
|
||||
|
||||
# When subscribing to multiple events, this gets populated with required event classes
|
||||
# Structure: key: Hunter Class, value: set(RequiredEventClass1, RequiredEventClass2)
|
||||
self.hook_dependencies = defaultdict(set)
|
||||
|
||||
# To keep track of fulfilled dependencies. we need to have a structure which saves historical instanciated
|
||||
# events mapped to a registered hunter.
|
||||
# We used a 2 dimensional dictionary in order to fulfill two demands:
|
||||
# * correctly count published required events
|
||||
# * save historical events fired, easily sorted by their type
|
||||
#
|
||||
# Structure: hook_fulfilled_deps[hunter_class] -> fulfilled_events_for_hunter[event_class] -> [EventObject, EventObject2]
|
||||
self.hook_fulfilled_deps = defaultdict(lambda: defaultdict(list))
|
||||
# ---------------------------
|
||||
|
||||
for _ in range(num_worker):
|
||||
t = Thread(target=self.worker)
|
||||
t.daemon = True
|
||||
@@ -34,16 +56,66 @@ class EventQueue(Queue):
|
||||
t.daemon = True
|
||||
t.start()
|
||||
|
||||
# decorator wrapping for easy subscription
|
||||
"""
|
||||
######################################################
|
||||
+ ----------------- Public Methods ----------------- +
|
||||
######################################################
|
||||
"""
|
||||
|
||||
def subscribe(self, event, hook=None, predicate=None):
|
||||
"""
|
||||
The Subscribe Decorator - For Regular Registration
|
||||
Use this to register for one event only. Your hunter will execute each time this event is published
|
||||
|
||||
@param event - Event class to subscribe to
|
||||
@param predicate - Optional: Function that will be called with the published event as a parameter before trigger.
|
||||
If it's return value is False, the Hunter will not run (default=None).
|
||||
@param hook - Hunter class to register for (ignore when using as a decorator)
|
||||
"""
|
||||
|
||||
def wrapper(hook):
|
||||
self.subscribe_event(event, hook=hook, predicate=predicate)
|
||||
return hook
|
||||
|
||||
return wrapper
|
||||
|
||||
# wrapper takes care of the subscribe once mechanism
|
||||
def subscribe_many(self, events, hook=None, predicates=None):
|
||||
"""
|
||||
The Subscribe Many Decorator - For Multiple Registration,
|
||||
When your attack needs several prerequisites to exist in the cluster, You need to register for multiple events.
|
||||
Your hunter will execute once for every new combination of required events.
|
||||
For example:
|
||||
1. event A was published 3 times
|
||||
2. event B was published once.
|
||||
3. event B was published again
|
||||
Your hunter will execute 2 times:
|
||||
* (on step 2) with the newest version of A
|
||||
* (on step 3) with the newest version of A and newest version of B
|
||||
|
||||
@param events - List of event classes to subscribe to
|
||||
@param predicates - Optional: List of function that will be called with the published event as a parameter before trigger.
|
||||
If it's return value is False, the Hunter will not run (default=None).
|
||||
@param hook - Hunter class to register for (ignore when using as a decorator)
|
||||
"""
|
||||
|
||||
def wrapper(hook):
|
||||
self.subscribe_events(events, hook=hook, predicates=predicates)
|
||||
return hook
|
||||
|
||||
return wrapper
|
||||
|
||||
def subscribe_once(self, event, hook=None, predicate=None):
|
||||
"""
|
||||
The Subscribe Once Decorator - For Single Trigger Registration,
|
||||
Use this when you want your hunter to execute only in your entire program run
|
||||
wraps subscribe_event method
|
||||
|
||||
@param events - List of event classes to subscribe to
|
||||
@param predicates - Optional: List of function that will be called with the published event as a parameter before trigger.
|
||||
If it's return value is False, the Hunter will not run (default=None).
|
||||
@param hook - Hunter class to register for (ignore when using as a decorator)
|
||||
"""
|
||||
|
||||
def wrapper(hook):
|
||||
# installing a __new__ magic method on the hunter
|
||||
# which will remove the hunter from the list upon creation
|
||||
@@ -58,29 +130,160 @@ class EventQueue(Queue):
|
||||
|
||||
return wrapper
|
||||
|
||||
# getting uninstantiated event object
|
||||
def subscribe_event(self, event, hook=None, predicate=None):
|
||||
def publish_event(self, event, caller=None):
|
||||
"""
|
||||
The Publish Event Method - For Publishing Events To Kube-Hunter's Queue
|
||||
"""
|
||||
# Document that the hunter published a vulnerability (if it's indeed a vulnerability)
|
||||
# For statistics options
|
||||
self._increase_vuln_count(event, caller)
|
||||
|
||||
# sets the event's parent to be it's publisher hunter.
|
||||
self._set_event_chain(event, caller)
|
||||
|
||||
# applying filters on the event, before publishing it to subscribers.
|
||||
# if filter returned None, not proceeding to publish
|
||||
event = self.apply_filters(event)
|
||||
if event:
|
||||
# If event was rewritten, make sure it's linked again
|
||||
self._set_event_chain(event, caller)
|
||||
|
||||
# Regular Hunter registrations - publish logic
|
||||
# Here we iterate over all the registered-to events:
|
||||
for hooked_event in self.hooks.keys():
|
||||
# We check if the event we want to publish is an inherited class of the current registered-to iterated event
|
||||
# Meaning - if this is a relevant event:
|
||||
if hooked_event in event.__class__.__mro__:
|
||||
# If so, we want to publish to all registerd hunters.
|
||||
for hook, predicate in self.hooks[hooked_event]:
|
||||
if predicate and not predicate(event):
|
||||
continue
|
||||
|
||||
self.put(hook(event))
|
||||
logger.debug(f"Event {event.__class__} got published to hunter - {hook} with {event}")
|
||||
|
||||
# Multiple Hunter registrations - publish logic
|
||||
# Here we iterate over all the registered-to events:
|
||||
for hooked_event in self.multi_hooks.keys():
|
||||
# We check if the event we want to publish is an inherited class of the current registered-to iterated event
|
||||
# Meaning - if this is a relevant event:
|
||||
if hooked_event in event.__class__.__mro__:
|
||||
# now we iterate over the corresponding registered hunters.
|
||||
for hook, predicate in self.multi_hooks[hooked_event]:
|
||||
if predicate and not predicate(event):
|
||||
continue
|
||||
|
||||
self._update_multi_hooks(hook, event)
|
||||
|
||||
if self._is_all_fulfilled_for_hunter(hook):
|
||||
events_container = MultipleEventsContainer(self._get_latest_events_from_multi_hooks(hook))
|
||||
self.put(hook(events_container))
|
||||
logger.debug(
|
||||
f"Multiple subscription requirements were met for hunter {hook}. events container was \
|
||||
published with {self.hook_fulfilled_deps[hook].keys()}"
|
||||
)
|
||||
|
||||
"""
|
||||
######################################################
|
||||
+ ---------------- Private Methods ----------------- +
|
||||
+ ---------------- (Backend Logic) ----------------- +
|
||||
######################################################
|
||||
"""
|
||||
|
||||
def _get_latest_events_from_multi_hooks(self, hook):
|
||||
"""
|
||||
Iterates over fulfilled deps for the hunter, and fetching the latest appended events from history
|
||||
"""
|
||||
latest_events = list()
|
||||
for event_class in self.hook_fulfilled_deps[hook].keys():
|
||||
latest_events.append(self.hook_fulfilled_deps[hook][event_class][-1])
|
||||
return latest_events
|
||||
|
||||
def _update_multi_hooks(self, hook, event):
|
||||
"""
|
||||
Updates published events in the multi hooks fulfilled store.
|
||||
"""
|
||||
self.hook_fulfilled_deps[hook][event.__class__].append(event)
|
||||
|
||||
def _is_all_fulfilled_for_hunter(self, hook):
|
||||
"""
|
||||
Returns true for multi hook fulfilled, else oterwise
|
||||
"""
|
||||
# Check if the first dimension already contains all necessary event classes
|
||||
return len(self.hook_fulfilled_deps[hook].keys()) == len(self.hook_dependencies[hook])
|
||||
|
||||
def _set_event_chain(self, event, caller):
|
||||
"""
|
||||
Sets' events attribute chain.
|
||||
In here we link the event with it's publisher (Hunter),
|
||||
so in the next hunter that catches this event, we could access the previous one's attributes.
|
||||
|
||||
@param event: the event object to be chained
|
||||
@param caller: the Hunter object that published this event.
|
||||
"""
|
||||
if caller:
|
||||
event.previous = caller.event
|
||||
event.hunter = caller.__class__
|
||||
|
||||
def _register_hunters(self, hook=None):
|
||||
"""
|
||||
This method is called when a Hunter registers itself to the handler.
|
||||
this is done in order to track and correctly configure the current run of the program.
|
||||
|
||||
passive_hunters, active_hunters, all_hunters
|
||||
"""
|
||||
config = get_config()
|
||||
if ActiveHunter in hook.__mro__:
|
||||
if not config.active:
|
||||
return
|
||||
self.active_hunters[hook] = hook.__doc__
|
||||
return False
|
||||
else:
|
||||
self.active_hunters[hook] = hook.__doc__
|
||||
elif HunterBase in hook.__mro__:
|
||||
self.passive_hunters[hook] = hook.__doc__
|
||||
|
||||
if HunterBase in hook.__mro__:
|
||||
self.all_hunters[hook] = hook.__doc__
|
||||
|
||||
return True
|
||||
|
||||
def _register_filter(self, event, hook=None, predicate=None):
|
||||
if hook not in self.filters[event]:
|
||||
self.filters[event].append((hook, predicate))
|
||||
logging.debug("{} filter subscribed to {}".format(hook, event))
|
||||
|
||||
def _register_hook(self, event, hook=None, predicate=None):
|
||||
if hook not in self.hooks[event]:
|
||||
self.hooks[event].append((hook, predicate))
|
||||
logging.debug("{} subscribed to {}".format(hook, event))
|
||||
|
||||
def subscribe_event(self, event, hook=None, predicate=None):
|
||||
if not self._register_hunters(hook):
|
||||
return
|
||||
|
||||
# registering filters
|
||||
if EventFilterBase in hook.__mro__:
|
||||
if hook not in self.filters[event]:
|
||||
self.filters[event].append((hook, predicate))
|
||||
logger.debug(f"{hook} filter subscribed to {event}")
|
||||
|
||||
self._register_filter(event, hook, predicate)
|
||||
# registering hunters
|
||||
elif hook not in self.hooks[event]:
|
||||
self.hooks[event].append((hook, predicate))
|
||||
logger.debug(f"{hook} subscribed to {event}")
|
||||
else:
|
||||
self._register_hook(event, hook, predicate)
|
||||
|
||||
def subscribe_events(self, events, hook=None, predicates=None):
|
||||
if not self._register_hunters(hook):
|
||||
return False
|
||||
|
||||
if predicates is None:
|
||||
predicates = [None] * len(events)
|
||||
|
||||
# registering filters.
|
||||
if EventFilterBase in hook.__mro__:
|
||||
for event, predicate in zip(events, predicates):
|
||||
self._register_filter(event, hook, predicate)
|
||||
# registering hunters.
|
||||
else:
|
||||
for event, predicate in zip(events, predicates):
|
||||
self.multi_hooks[event].append((hook, predicate))
|
||||
|
||||
self.hook_dependencies[hook] = frozenset(events)
|
||||
|
||||
def apply_filters(self, event):
|
||||
# if filters are subscribed, apply them on the event
|
||||
@@ -97,36 +300,11 @@ class EventQueue(Queue):
|
||||
return None
|
||||
return event
|
||||
|
||||
# getting instantiated event object
|
||||
def publish_event(self, event, caller=None):
|
||||
def _increase_vuln_count(self, event, caller):
|
||||
config = get_config()
|
||||
|
||||
# setting event chain
|
||||
if caller:
|
||||
event.previous = caller.event
|
||||
event.hunter = caller.__class__
|
||||
|
||||
# applying filters on the event, before publishing it to subscribers.
|
||||
# if filter returned None, not proceeding to publish
|
||||
event = self.apply_filters(event)
|
||||
if event:
|
||||
# If event was rewritten, make sure it's linked to its parent ('previous') event
|
||||
if caller:
|
||||
event.previous = caller.event
|
||||
event.hunter = caller.__class__
|
||||
|
||||
for hooked_event in self.hooks.keys():
|
||||
if hooked_event in event.__class__.__mro__:
|
||||
for hook, predicate in self.hooks[hooked_event]:
|
||||
if predicate and not predicate(event):
|
||||
continue
|
||||
|
||||
if config.statistics and caller:
|
||||
if Vulnerability in event.__class__.__mro__:
|
||||
caller.__class__.publishedVulnerabilities += 1
|
||||
|
||||
logger.debug(f"Event {event.__class__} got published with {event}")
|
||||
self.put(hook(event))
|
||||
if config.statistics and caller:
|
||||
if Vulnerability in event.__class__.__mro__:
|
||||
caller.__class__.publishedVulnerabilities += 1
|
||||
|
||||
# executes callbacks on dedicated thread as a daemon
|
||||
def worker(self):
|
||||
|
||||
@@ -62,6 +62,20 @@ class Event:
|
||||
return history
|
||||
|
||||
|
||||
class MultipleEventsContainer(Event):
|
||||
"""
|
||||
This is the class of the object an hunter will get if he was registered to multiple events.
|
||||
"""
|
||||
|
||||
def __init__(self, events):
|
||||
self.events = events
|
||||
|
||||
def get_by_class(self, event_class):
|
||||
for event in self.events:
|
||||
if event.__class__ == event_class:
|
||||
return event
|
||||
|
||||
|
||||
class Service:
|
||||
def __init__(self, name, path="", secure=True):
|
||||
self.name = name
|
||||
@@ -69,6 +83,12 @@ class Service:
|
||||
self.path = path
|
||||
self.role = "Node"
|
||||
|
||||
# if a service account token was specified, we load it to the Service class
|
||||
# We load it here because generally all kuberentes services could be authenticated with the token
|
||||
config = get_config()
|
||||
if config.service_account_token:
|
||||
self.auth_token = config.service_account_token
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
@@ -191,7 +211,7 @@ class ReportDispatched(Event):
|
||||
|
||||
|
||||
class K8sVersionDisclosure(Vulnerability, Event):
|
||||
"""The kubernetes version could be obtained from the {} endpoint """
|
||||
"""The kubernetes version could be obtained from the {} endpoint"""
|
||||
|
||||
def __init__(self, version, from_endpoint, extra_info=""):
|
||||
Vulnerability.__init__(
|
||||
|
||||
@@ -8,6 +8,7 @@ from netaddr import IPNetwork, IPAddress, AddrFormatError
|
||||
from netifaces import AF_INET, ifaddresses, interfaces, gateways
|
||||
|
||||
from kube_hunter.conf import get_config
|
||||
from kube_hunter.modules.discovery.kubernetes_client import list_all_k8s_cluster_nodes
|
||||
from kube_hunter.core.events import handler
|
||||
from kube_hunter.core.events.types import Event, NewHostEvent, Vulnerability
|
||||
from kube_hunter.core.types import Discovery, InformationDisclosure, AWS, Azure
|
||||
@@ -18,11 +19,17 @@ logger = logging.getLogger(__name__)
|
||||
class RunningAsPodEvent(Event):
|
||||
def __init__(self):
|
||||
self.name = "Running from within a pod"
|
||||
self.auth_token = self.get_service_account_file("token")
|
||||
self.client_cert = self.get_service_account_file("ca.crt")
|
||||
self.namespace = self.get_service_account_file("namespace")
|
||||
self.kubeservicehost = os.environ.get("KUBERNETES_SERVICE_HOST", None)
|
||||
|
||||
# if service account token was manually specified, we don't load the token file
|
||||
config = get_config()
|
||||
if config.service_account_token:
|
||||
self.auth_token = config.service_account_token
|
||||
else:
|
||||
self.auth_token = self.get_service_account_file("token")
|
||||
|
||||
# Event's logical location to be used mainly for reports.
|
||||
def location(self):
|
||||
location = "Local to Pod"
|
||||
@@ -114,6 +121,9 @@ class FromPodHostDiscovery(Discovery):
|
||||
|
||||
def execute(self):
|
||||
config = get_config()
|
||||
# Attempt to read all hosts from the Kubernetes API
|
||||
for host in list_all_k8s_cluster_nodes(config.kubeconfig):
|
||||
self.publish_event(NewHostEvent(host=host))
|
||||
# Scan any hosts that the user specified
|
||||
if config.remote or config.cidr:
|
||||
self.publish_event(HostScanEvent())
|
||||
@@ -200,7 +210,7 @@ class FromPodHostDiscovery(Discovery):
|
||||
|
||||
# for pod scanning
|
||||
def gateway_discovery(self):
|
||||
""" Retrieving default gateway of pod, which is usually also a contact point with the host """
|
||||
"""Retrieving default gateway of pod, which is usually also a contact point with the host"""
|
||||
return [[gateways()["default"][AF_INET][0], "24"]]
|
||||
|
||||
# querying AWS's interface metadata api v1 | works only from a pod
|
||||
@@ -223,7 +233,7 @@ class FromPodHostDiscovery(Discovery):
|
||||
|
||||
self.publish_event(AWSMetadataApi(cidr=cidr))
|
||||
|
||||
return cidr, "AWS"
|
||||
return [(address, subnet)], "AWS"
|
||||
|
||||
# querying AWS's interface metadata api v2 | works only from a pod
|
||||
def aws_metadata_v2_discovery(self):
|
||||
@@ -252,7 +262,7 @@ class FromPodHostDiscovery(Discovery):
|
||||
|
||||
self.publish_event(AWSMetadataApi(cidr=cidr))
|
||||
|
||||
return cidr, "AWS"
|
||||
return [(address, subnet)], "AWS"
|
||||
|
||||
# querying azure's interface metadata api | works only from a pod
|
||||
def azure_metadata_discovery(self):
|
||||
@@ -298,6 +308,9 @@ class HostDiscovery(Discovery):
|
||||
elif len(config.remote) > 0:
|
||||
for host in config.remote:
|
||||
self.publish_event(NewHostEvent(host=host))
|
||||
elif config.k8s_auto_discover_nodes:
|
||||
for host in list_all_k8s_cluster_nodes(config.kubeconfig):
|
||||
self.publish_event(NewHostEvent(host=host))
|
||||
|
||||
# for normal scanning
|
||||
def scan_interfaces(self):
|
||||
|
||||
27
kube_hunter/modules/discovery/kubernetes_client.py
Normal file
27
kube_hunter/modules/discovery/kubernetes_client.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import logging
|
||||
import kubernetes
|
||||
|
||||
|
||||
def list_all_k8s_cluster_nodes(kube_config=None, client=None):
|
||||
logger = logging.getLogger(__name__)
|
||||
try:
|
||||
if kube_config:
|
||||
logger.debug("Attempting to use kubeconfig file: %s", kube_config)
|
||||
kubernetes.config.load_kube_config(config_file=kube_config)
|
||||
else:
|
||||
logger.debug("Attempting to use in cluster Kubernetes config")
|
||||
kubernetes.config.load_incluster_config()
|
||||
except kubernetes.config.config_exception.ConfigException as ex:
|
||||
logger.debug(f"Failed to initiate Kubernetes client: {ex}")
|
||||
return
|
||||
|
||||
try:
|
||||
if client is None:
|
||||
client = kubernetes.client.CoreV1Api()
|
||||
ret = client.list_node(watch=False)
|
||||
logger.info("Listed %d nodes in the cluster" % len(ret.items))
|
||||
for item in ret.items:
|
||||
for addr in item.status.addresses:
|
||||
yield addr.address
|
||||
except Exception as ex:
|
||||
logger.debug(f"Failed to list nodes from Kubernetes: {ex}")
|
||||
@@ -75,28 +75,28 @@ class ApiInfoDisclosure(Vulnerability, Event):
|
||||
|
||||
|
||||
class ListPodsAndNamespaces(ApiInfoDisclosure):
|
||||
""" Accessing pods might give an attacker valuable information"""
|
||||
"""Accessing pods might give an attacker valuable information"""
|
||||
|
||||
def __init__(self, evidence, using_token):
|
||||
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing pods")
|
||||
|
||||
|
||||
class ListNamespaces(ApiInfoDisclosure):
|
||||
""" Accessing namespaces might give an attacker valuable information """
|
||||
"""Accessing namespaces might give an attacker valuable information"""
|
||||
|
||||
def __init__(self, evidence, using_token):
|
||||
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing namespaces")
|
||||
|
||||
|
||||
class ListRoles(ApiInfoDisclosure):
|
||||
""" Accessing roles might give an attacker valuable information """
|
||||
"""Accessing roles might give an attacker valuable information"""
|
||||
|
||||
def __init__(self, evidence, using_token):
|
||||
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing roles")
|
||||
|
||||
|
||||
class ListClusterRoles(ApiInfoDisclosure):
|
||||
""" Accessing cluster roles might give an attacker valuable information """
|
||||
"""Accessing cluster roles might give an attacker valuable information"""
|
||||
|
||||
def __init__(self, evidence, using_token):
|
||||
ApiInfoDisclosure.__init__(self, evidence, using_token, "Listing cluster roles")
|
||||
@@ -118,7 +118,7 @@ class CreateANamespace(Vulnerability, Event):
|
||||
|
||||
class DeleteANamespace(Vulnerability, Event):
|
||||
|
||||
""" Deleting a namespace might give an attacker the option to affect application behavior """
|
||||
"""Deleting a namespace might give an attacker the option to affect application behavior"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
@@ -186,7 +186,7 @@ class PatchAClusterRole(Vulnerability, Event):
|
||||
|
||||
|
||||
class DeleteARole(Vulnerability, Event):
|
||||
""" Deleting a role might allow an attacker to affect access to resources in the namespace"""
|
||||
"""Deleting a role might allow an attacker to affect access to resources in the namespace"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
@@ -199,7 +199,7 @@ class DeleteARole(Vulnerability, Event):
|
||||
|
||||
|
||||
class DeleteAClusterRole(Vulnerability, Event):
|
||||
""" Deleting a cluster role might allow an attacker to affect access to resources in the cluster"""
|
||||
"""Deleting a cluster role might allow an attacker to affect access to resources in the cluster"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
@@ -212,7 +212,7 @@ class DeleteAClusterRole(Vulnerability, Event):
|
||||
|
||||
|
||||
class CreateAPod(Vulnerability, Event):
|
||||
""" Creating a new pod allows an attacker to run custom code"""
|
||||
"""Creating a new pod allows an attacker to run custom code"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
@@ -225,7 +225,7 @@ class CreateAPod(Vulnerability, Event):
|
||||
|
||||
|
||||
class CreateAPrivilegedPod(Vulnerability, Event):
|
||||
""" Creating a new PRIVILEGED pod would gain an attacker FULL CONTROL over the cluster"""
|
||||
"""Creating a new PRIVILEGED pod would gain an attacker FULL CONTROL over the cluster"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
@@ -238,7 +238,7 @@ class CreateAPrivilegedPod(Vulnerability, Event):
|
||||
|
||||
|
||||
class PatchAPod(Vulnerability, Event):
|
||||
""" Patching a pod allows an attacker to compromise and control it """
|
||||
"""Patching a pod allows an attacker to compromise and control it"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
@@ -251,7 +251,7 @@ class PatchAPod(Vulnerability, Event):
|
||||
|
||||
|
||||
class DeleteAPod(Vulnerability, Event):
|
||||
""" Deleting a pod allows an attacker to disturb applications on the cluster """
|
||||
"""Deleting a pod allows an attacker to disturb applications on the cluster"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
|
||||
@@ -41,7 +41,7 @@ class ArpSpoofHunter(ActiveHunter):
|
||||
return ans[ARP].hwsrc if ans else None
|
||||
|
||||
def detect_l3_on_host(self, arp_responses):
|
||||
""" returns True for an existence of an L3 network plugin """
|
||||
"""returns True for an existence of an L3 network plugin"""
|
||||
logger.debug("Attempting to detect L3 network plugin using ARP")
|
||||
unique_macs = list({response[ARP].hwsrc for _, response in arp_responses})
|
||||
|
||||
|
||||
@@ -303,7 +303,7 @@ class SecureKubeletPortHunter(Hunter):
|
||||
"""
|
||||
|
||||
class DebugHandlers:
|
||||
""" all methods will return the handler name if successful """
|
||||
"""all methods will return the handler name if successful"""
|
||||
|
||||
def __init__(self, path, pod, session=None):
|
||||
self.path = path + ("/" if not path.endswith("/") else "")
|
||||
|
||||
@@ -77,15 +77,17 @@ class VarLogMountHunter(Hunter):
|
||||
self.publish_event(WriteMountToVarLog(pods=pe_pods))
|
||||
|
||||
|
||||
@handler.subscribe(ExposedRunHandler)
|
||||
@handler.subscribe_many([ExposedRunHandler, WriteMountToVarLog])
|
||||
class ProveVarLogMount(ActiveHunter):
|
||||
"""Prove /var/log Mount Hunter
|
||||
Tries to read /etc/shadow on the host by running commands inside a pod with host mount to /var/log
|
||||
"""
|
||||
|
||||
def __init__(self, event):
|
||||
self.event = event
|
||||
self.base_path = f"https://{self.event.host}:{self.event.port}"
|
||||
self.write_mount_event = self.event.get_by_class(WriteMountToVarLog)
|
||||
self.event = self.write_mount_event
|
||||
|
||||
self.base_path = f"https://{self.write_mount_event.host}:{self.write_mount_event.port}"
|
||||
|
||||
def run(self, command, container):
|
||||
run_url = KubeletHandlers.RUN.value.format(
|
||||
@@ -96,20 +98,6 @@ class ProveVarLogMount(ActiveHunter):
|
||||
)
|
||||
return self.event.session.post(f"{self.base_path}/{run_url}", verify=False).text
|
||||
|
||||
# TODO: replace with multiple subscription to WriteMountToVarLog as well
|
||||
def get_varlog_mounters(self):
|
||||
config = get_config()
|
||||
logger.debug("accessing /pods manually on ProveVarLogMount")
|
||||
pods = self.event.session.get(
|
||||
f"{self.base_path}/" + KubeletHandlers.PODS.value,
|
||||
verify=False,
|
||||
timeout=config.network_timeout,
|
||||
).json()["items"]
|
||||
for pod in pods:
|
||||
volume = VarLogMountHunter(ExposedPodsHandler(pods=pods)).has_write_mount_to(pod, "/var/log")
|
||||
if volume:
|
||||
yield pod, volume
|
||||
|
||||
def mount_path_from_mountname(self, pod, mount_name):
|
||||
"""returns container name, and container mount path correlated to mount_name"""
|
||||
for container in pod["spec"]["containers"]:
|
||||
@@ -138,7 +126,7 @@ class ProveVarLogMount(ActiveHunter):
|
||||
return content
|
||||
|
||||
def execute(self):
|
||||
for pod, volume in self.get_varlog_mounters():
|
||||
for pod, volume in self.write_mount_event.pe_pods():
|
||||
for container, mount_path in self.mount_path_from_mountname(pod, volume["name"]):
|
||||
logger.debug("Correlated container to mount_name")
|
||||
cont = {
|
||||
|
||||
@@ -10,7 +10,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ServiceAccountTokenAccess(Vulnerability, Event):
|
||||
""" Accessing the pod service account token gives an attacker the option to use the server API """
|
||||
"""Accessing the pod service account token gives an attacker the option to use the server API"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
@@ -24,7 +24,7 @@ class ServiceAccountTokenAccess(Vulnerability, Event):
|
||||
|
||||
|
||||
class SecretsAccess(Vulnerability, Event):
|
||||
""" Accessing the pod's secrets within a compromised pod might disclose valuable data to a potential attacker"""
|
||||
"""Accessing the pod's secrets within a compromised pod might disclose valuable data to a potential attacker"""
|
||||
|
||||
def __init__(self, evidence):
|
||||
Vulnerability.__init__(
|
||||
|
||||
@@ -41,6 +41,7 @@ install_requires =
|
||||
packaging
|
||||
dataclasses
|
||||
pluggy
|
||||
kubernetes==12.0.1
|
||||
setup_requires =
|
||||
setuptools>=30.3.0
|
||||
setuptools_scm
|
||||
|
||||
@@ -6,6 +6,8 @@ from kube_hunter.core.events.types import Event, Service
|
||||
from kube_hunter.core.events import handler
|
||||
|
||||
counter = 0
|
||||
first_run = True
|
||||
|
||||
set_config(Config())
|
||||
|
||||
|
||||
@@ -19,6 +21,16 @@ class RegularEvent(Service, Event):
|
||||
Service.__init__(self, "Test Service")
|
||||
|
||||
|
||||
class AnotherRegularEvent(Service, Event):
|
||||
def __init__(self):
|
||||
Service.__init__(self, "Test Service (another)")
|
||||
|
||||
|
||||
class DifferentRegularEvent(Service, Event):
|
||||
def __init__(self):
|
||||
Service.__init__(self, "Test Service (different)")
|
||||
|
||||
|
||||
@handler.subscribe_once(OnceOnlyEvent)
|
||||
class OnceHunter(Hunter):
|
||||
def __init__(self, event):
|
||||
@@ -33,8 +45,36 @@ class RegularHunter(Hunter):
|
||||
counter += 1
|
||||
|
||||
|
||||
@handler.subscribe_many([DifferentRegularEvent, AnotherRegularEvent])
|
||||
class SmartHunter(Hunter):
|
||||
def __init__(self, events):
|
||||
global counter, first_run
|
||||
counter += 1
|
||||
|
||||
# we add an attribute on the second scan.
|
||||
# here we test that we get the latest event
|
||||
different_event = events.get_by_class(DifferentRegularEvent)
|
||||
if first_run:
|
||||
first_run = False
|
||||
assert not different_event.new_value
|
||||
else:
|
||||
assert different_event.new_value
|
||||
|
||||
|
||||
@handler.subscribe_many([DifferentRegularEvent, AnotherRegularEvent])
|
||||
class SmartHunter2(Hunter):
|
||||
def __init__(self, events):
|
||||
global counter
|
||||
counter += 1
|
||||
|
||||
# check if we can access the events
|
||||
assert events.get_by_class(DifferentRegularEvent).__class__ == DifferentRegularEvent
|
||||
assert events.get_by_class(AnotherRegularEvent).__class__ == AnotherRegularEvent
|
||||
|
||||
|
||||
def test_subscribe_mechanism():
|
||||
global counter
|
||||
counter = 0
|
||||
|
||||
# first test normal subscribe and publish works
|
||||
handler.publish_event(RegularEvent())
|
||||
@@ -43,13 +83,47 @@ def test_subscribe_mechanism():
|
||||
|
||||
time.sleep(0.02)
|
||||
assert counter == 3
|
||||
|
||||
|
||||
def test_subscribe_once_mechanism():
|
||||
global counter
|
||||
counter = 0
|
||||
|
||||
# testing the subscribe_once mechanism
|
||||
handler.publish_event(OnceOnlyEvent())
|
||||
handler.publish_event(OnceOnlyEvent())
|
||||
# testing the multiple subscription mechanism
|
||||
handler.publish_event(OnceOnlyEvent())
|
||||
|
||||
time.sleep(0.02)
|
||||
# should have been triggered once
|
||||
assert counter == 1
|
||||
counter = 0
|
||||
|
||||
handler.publish_event(OnceOnlyEvent())
|
||||
handler.publish_event(OnceOnlyEvent())
|
||||
handler.publish_event(OnceOnlyEvent())
|
||||
time.sleep(0.02)
|
||||
|
||||
assert counter == 0
|
||||
|
||||
|
||||
def test_subscribe_many_mechanism():
|
||||
global counter
|
||||
counter = 0
|
||||
|
||||
# testing the multiple subscription mechanism
|
||||
handler.publish_event(DifferentRegularEvent())
|
||||
handler.publish_event(DifferentRegularEvent())
|
||||
handler.publish_event(DifferentRegularEvent())
|
||||
handler.publish_event(DifferentRegularEvent())
|
||||
handler.publish_event(DifferentRegularEvent())
|
||||
handler.publish_event(AnotherRegularEvent())
|
||||
|
||||
time.sleep(0.02)
|
||||
# We expect SmartHunter and SmartHunter2 to be executed once. hence the counter should be 2
|
||||
assert counter == 2
|
||||
counter = 0
|
||||
|
||||
# Test using most recent event
|
||||
newer_version_event = DifferentRegularEvent()
|
||||
newer_version_event.new_value = True
|
||||
handler.publish_event(newer_version_event)
|
||||
|
||||
assert counter == 2
|
||||
|
||||
30
tests/discovery/test_k8s.py
Normal file
30
tests/discovery/test_k8s.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from kube_hunter.conf import Config, set_config
|
||||
|
||||
from kube_hunter.modules.discovery.kubernetes_client import list_all_k8s_cluster_nodes
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
set_config(Config())
|
||||
|
||||
|
||||
def test_client_yields_ips():
|
||||
client = MagicMock()
|
||||
response = MagicMock()
|
||||
client.list_node.return_value = response
|
||||
response.items = [MagicMock(), MagicMock()]
|
||||
response.items[0].status.addresses = [MagicMock(), MagicMock()]
|
||||
response.items[0].status.addresses[0].address = "127.0.0.1"
|
||||
response.items[0].status.addresses[1].address = "127.0.0.2"
|
||||
response.items[1].status.addresses = [MagicMock()]
|
||||
response.items[1].status.addresses[0].address = "127.0.0.3"
|
||||
|
||||
with patch("kubernetes.config.load_incluster_config") as m:
|
||||
output = list(list_all_k8s_cluster_nodes(client=client))
|
||||
m.assert_called_once()
|
||||
|
||||
assert output == ["127.0.0.1", "127.0.0.2", "127.0.0.3"]
|
||||
|
||||
|
||||
def test_client_uses_kubeconfig():
|
||||
with patch("kubernetes.config.load_kube_config") as m:
|
||||
list(list_all_k8s_cluster_nodes(kube_config="/location", client=MagicMock()))
|
||||
m.assert_called_once_with(config_file="/location")
|
||||
Reference in New Issue
Block a user