Replace host sensor with node agent sensing (#1916)

In this change I used both claude code and Antigravity.

---------

Signed-off-by: Bezalel Brandwine <bez@softwine.net>
This commit is contained in:
Bezbran
2026-02-01 13:17:03 +02:00
committed by GitHub
parent 6ce0121a03
commit 25bd51e8b4
20 changed files with 391 additions and 1328 deletions

View File

@@ -1,74 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app: kubescape-host-scanner
k8s-app: kubescape-host-scanner
kubernetes.io/metadata.name: kubescape-host-scanner
tier: kubescape-host-scanner-control-plane
name: kubescape
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: host-scanner
namespace: kubescape
labels:
app: host-scanner
k8s-app: kubescape-host-scanner
otel: enabled
spec:
selector:
matchLabels:
name: host-scanner
template:
metadata:
labels:
name: host-scanner
spec:
tolerations:
# this toleration is to have the DaemonDet runnable on all nodes (including masters)
# remove it if your masters can't run pods
- operator: Exists
containers:
- name: host-sensor
image: quay.io/kubescape/host-scanner:v1.0.61
securityContext:
allowPrivilegeEscalation: true
privileged: true
readOnlyRootFilesystem: true
ports:
- name: scanner # Do not change port name
containerPort: 7888
protocol: TCP
resources:
limits:
cpu: 0.1m
memory: 200Mi
requests:
cpu: 1m
memory: 200Mi
volumeMounts:
- mountPath: /host_fs
name: host-filesystem
startupProbe:
httpGet:
path: /readyz
port: 7888
failureThreshold: 30
periodSeconds: 1
livenessProbe:
httpGet:
path: /healthz
port: 7888
periodSeconds: 10
terminationGracePeriodSeconds: 120
dnsPolicy: ClusterFirstWithHostNet
automountServiceAccountToken: false
volumes:
- hostPath:
path: /
type: Directory
name: host-filesystem
hostPID: true
hostIPC: true

View File

@@ -18,6 +18,5 @@ func TestHostSensorHandlerMock(t *testing.T) {
require.Nil(t, status) require.Nil(t, status)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, h.GetNamespace())
require.NoError(t, h.TearDown()) require.NoError(t, h.TearDown())
} }

View File

@@ -0,0 +1,235 @@
package hostsensorutils
import (
"context"
stdjson "encoding/json"
"fmt"
"reflect"
"github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
k8shostsensor "github.com/kubescape/k8s-interface/hostsensor"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
"github.com/kubescape/opa-utils/reporthandling/apis"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// getCRDResources retrieves resources from CRDs and converts them to HostSensorDataEnvelope format
func (hsh *HostSensorHandler) getCRDResources(ctx context.Context, resourceType k8shostsensor.HostSensorResource) ([]hostsensor.HostSensorDataEnvelope, error) {
pluralName := k8shostsensor.MapResourceToPlural(resourceType)
if pluralName == "" {
return nil, fmt.Errorf("unsupported resource type: %s", resourceType)
}
// List CRD resources
items, err := hsh.listCRDResources(ctx, pluralName, resourceType.String())
if err != nil {
return nil, err
}
// Convert to HostSensorDataEnvelope format
result := make([]hostsensor.HostSensorDataEnvelope, 0, len(items))
for _, item := range items {
envelope, err := hsh.convertCRDToEnvelope(item, resourceType)
if err != nil {
logger.L().Warning("Failed to convert CRD to envelope",
helpers.String("kind", resourceType.String()),
helpers.String("name", item.GetName()),
helpers.Error(err))
continue
}
result = append(result, envelope)
}
logger.L().Debug("Retrieved resources from CRDs",
helpers.String("kind", resourceType.String()),
helpers.Int("count", len(result)))
return result, nil
}
// convertCRDToEnvelope converts a CRD unstructured object to HostSensorDataEnvelope
func (hsh *HostSensorHandler) convertCRDToEnvelope(item unstructured.Unstructured, resourceType k8shostsensor.HostSensorResource) (hostsensor.HostSensorDataEnvelope, error) {
envelope := hostsensor.HostSensorDataEnvelope{}
// Set API version and kind
envelope.SetApiVersion(k8sinterface.JoinGroupVersion(hostsensor.GroupHostSensor, hostsensor.Version))
envelope.SetKind(resourceType.String())
// Set name (node name)
nodeName := item.GetName()
envelope.SetName(nodeName)
// Extract content from spec.content
content, found, err := unstructured.NestedString(item.Object, "spec", "content")
if err != nil {
return envelope, fmt.Errorf("failed to extract spec.content: %w", err)
}
if !found {
// fallback to "spec" itself
contentI, found, err := unstructured.NestedFieldNoCopy(item.Object, "spec")
if err != nil {
return envelope, fmt.Errorf("failed to extract spec: %w", err)
}
if !found {
return envelope, fmt.Errorf("spec not found in CRD")
}
contentBytes, err := stdjson.Marshal(contentI)
if err != nil {
return envelope, fmt.Errorf("failed to marshal spec: %w", err)
}
content = string(contentBytes)
}
// Set data as raw bytes
envelope.SetData([]byte(content))
return envelope, nil
}
// getOsReleaseFile returns the list of osRelease metadata from CRDs.
func (hsh *HostSensorHandler) getOsReleaseFile(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
return hsh.getCRDResources(ctx, k8shostsensor.OsReleaseFile)
}
// getKernelVersion returns the list of kernelVersion metadata from CRDs.
func (hsh *HostSensorHandler) getKernelVersion(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
return hsh.getCRDResources(ctx, k8shostsensor.KernelVersion)
}
// getLinuxSecurityHardeningStatus returns the list of LinuxSecurityHardeningStatus metadata from CRDs.
func (hsh *HostSensorHandler) getLinuxSecurityHardeningStatus(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
return hsh.getCRDResources(ctx, k8shostsensor.LinuxSecurityHardeningStatus)
}
// getOpenPortsList returns the list of open ports from CRDs.
func (hsh *HostSensorHandler) getOpenPortsList(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
return hsh.getCRDResources(ctx, k8shostsensor.OpenPortsList)
}
// getKernelVariables returns the list of Linux Kernel variables from CRDs.
func (hsh *HostSensorHandler) getKernelVariables(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
return hsh.getCRDResources(ctx, k8shostsensor.LinuxKernelVariables)
}
// getKubeletInfo returns the list of kubelet metadata from CRDs.
func (hsh *HostSensorHandler) getKubeletInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
return hsh.getCRDResources(ctx, k8shostsensor.KubeletInfo)
}
// getKubeProxyInfo returns the list of kubeProxy metadata from CRDs.
func (hsh *HostSensorHandler) getKubeProxyInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
return hsh.getCRDResources(ctx, k8shostsensor.KubeProxyInfo)
}
// getControlPlaneInfo returns the list of controlPlaneInfo metadata from CRDs.
func (hsh *HostSensorHandler) getControlPlaneInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
return hsh.getCRDResources(ctx, k8shostsensor.ControlPlaneInfo)
}
// getCloudProviderInfo returns the list of cloudProviderInfo metadata from CRDs.
func (hsh *HostSensorHandler) getCloudProviderInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
return hsh.getCRDResources(ctx, k8shostsensor.CloudProviderInfo)
}
// getCNIInfo returns the list of CNI metadata from CRDs.
func (hsh *HostSensorHandler) getCNIInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
return hsh.getCRDResources(ctx, k8shostsensor.CNIInfo)
}
// hasCloudProviderInfo iterates over the []hostsensor.HostSensorDataEnvelope list to find info about the cloud provider.
//
// If information are found, then return true. Return false otherwise.
func hasCloudProviderInfo(cpi []hostsensor.HostSensorDataEnvelope) bool {
for index := range cpi {
if !reflect.DeepEqual(cpi[index].GetData(), stdjson.RawMessage("{}\\n")) {
return true
}
}
return false
}
// CollectResources collects all required information from CRDs.
func (hsh *HostSensorHandler) CollectResources(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
res := make([]hostsensor.HostSensorDataEnvelope, 0)
infoMap := make(map[string]apis.StatusInfo)
logger.L().Debug("Collecting host sensor data from CRDs")
var hasCloudProvider bool
for _, toPin := range []struct {
Resource k8shostsensor.HostSensorResource
Query func(context.Context) ([]hostsensor.HostSensorDataEnvelope, error)
}{
// queries to CRDs
{
Resource: k8shostsensor.OsReleaseFile,
Query: hsh.getOsReleaseFile,
},
{
Resource: k8shostsensor.KernelVersion,
Query: hsh.getKernelVersion,
},
{
Resource: k8shostsensor.LinuxSecurityHardeningStatus,
Query: hsh.getLinuxSecurityHardeningStatus,
},
{
Resource: k8shostsensor.OpenPortsList,
Query: hsh.getOpenPortsList,
},
{
Resource: k8shostsensor.LinuxKernelVariables,
Query: hsh.getKernelVariables,
},
{
Resource: k8shostsensor.KubeletInfo,
Query: hsh.getKubeletInfo,
},
{
Resource: k8shostsensor.KubeProxyInfo,
Query: hsh.getKubeProxyInfo,
},
{
Resource: k8shostsensor.CloudProviderInfo,
Query: hsh.getCloudProviderInfo,
},
{
Resource: k8shostsensor.CNIInfo,
Query: hsh.getCNIInfo,
},
{
// ControlPlaneInfo is queried _after_ CloudProviderInfo.
Resource: k8shostsensor.ControlPlaneInfo,
Query: hsh.getControlPlaneInfo,
},
} {
k8sInfo := toPin
if k8sInfo.Resource == k8shostsensor.ControlPlaneInfo && hasCloudProvider {
// we retrieve control plane info only if we are not using a cloud provider
continue
}
kcData, err := k8sInfo.Query(ctx)
if err != nil {
addInfoToMap(k8sInfo.Resource, infoMap, err)
logger.L().Ctx(ctx).Warning("Failed to get resource from CRD",
helpers.String("resource", k8sInfo.Resource.String()),
helpers.Error(err))
}
if k8sInfo.Resource == k8shostsensor.CloudProviderInfo {
hasCloudProvider = hasCloudProviderInfo(kcData)
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
}
logger.L().Debug("Done collecting information from CRDs", helpers.Int("totalResources", len(res)))
return res, infoMap, nil
}

View File

@@ -0,0 +1,122 @@
package hostsensorutils
import (
"context"
"fmt"
"github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
)
const (
// Host data CRD API group and version
hostDataGroup = "hostdata.kubescape.cloud"
hostDataVersion = "v1beta1"
)
// HostSensorHandler is a client that reads host sensor data from Kubernetes CRDs.
//
// The CRDs are created by the node-agent daemonset running on each node.
type HostSensorHandler struct {
k8sObj *k8sinterface.KubernetesApi
dynamicClient dynamic.Interface
}
// NewHostSensorHandler builds a new CRD-based host sensor handler.
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, _ string) (*HostSensorHandler, error) {
if k8sObj == nil {
return nil, fmt.Errorf("nil k8s interface received")
}
config := k8sinterface.GetK8sConfig()
if config == nil {
return nil, fmt.Errorf("failed to get k8s config")
}
// force GRPC
config.AcceptContentTypes = "application/vnd.kubernetes.protobuf"
config.ContentType = "application/vnd.kubernetes.protobuf"
// Create dynamic client for CRD access
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to create dynamic client: %w", err)
}
hsh := &HostSensorHandler{
k8sObj: k8sObj,
dynamicClient: dynamicClient,
}
// Verify we can access nodes (basic sanity check)
if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 {
if err == nil {
err = fmt.Errorf("no nodes to scan")
}
return hsh, fmt.Errorf("in NewHostSensorHandler, failed to get nodes list: %v", err)
}
return hsh, nil
}
// Init is a no-op for CRD-based implementation.
// The node-agent daemonset is expected to be already deployed and creating CRDs.
func (hsh *HostSensorHandler) Init(ctx context.Context) error {
logger.L().Info("Using CRD-based host sensor data collection (no deployment needed)")
// Verify that at least one CRD type exists
gvr := schema.GroupVersionResource{
Group: hostDataGroup,
Version: hostDataVersion,
Resource: "osreleasefiles",
}
list, err := hsh.dynamicClient.Resource(gvr).List(ctx, metav1.ListOptions{Limit: 1})
if err != nil {
logger.L().Warning("node-agent status: Failed to list OsReleaseFile CRDs - node-agent may not be deployed",
helpers.Error(err))
return fmt.Errorf("failed to verify CRD access: %w (ensure node-agent is deployed)", err)
}
if len(list.Items) == 0 {
logger.L().Warning("node-agent status: No OsReleaseFile CRDs found - node-agent may not be running or sensing yet")
} else {
logger.L().Info("node-agent status: Successfully verified CRD access", helpers.Int("osReleaseFiles", len(list.Items)))
}
return nil
}
// TearDown is a no-op for CRD-based implementation.
// CRDs are managed by the node-agent daemonset lifecycle.
func (hsh *HostSensorHandler) TearDown() error {
logger.L().Debug("CRD-based host sensor teardown (no-op)")
return nil
}
// listCRDResources is a generic function to list CRD resources and convert them to the expected format.
func (hsh *HostSensorHandler) listCRDResources(ctx context.Context, resourceName, kind string) ([]unstructured.Unstructured, error) {
gvr := schema.GroupVersionResource{
Group: hostDataGroup,
Version: hostDataVersion,
Resource: resourceName,
}
logger.L().Debug("Listing CRD resources",
helpers.String("resource", resourceName),
helpers.String("kind", kind))
list, err := hsh.dynamicClient.Resource(gvr).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to list %s CRDs: %w", kind, err)
}
logger.L().Debug("Retrieved CRD resources",
helpers.String("kind", kind),
helpers.Int("count", len(list.Items)))
return list.Items, nil
}

View File

@@ -1,457 +0,0 @@
package hostsensorutils
import (
"context"
_ "embed"
"fmt"
"os"
"sync"
"time"
"github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/kubescape/v3/core/cautils"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
)
var (
//go:embed hostsensor.yaml
hostSensorYAML string
namespaceWasPresent bool
)
const portName string = "scanner"
// HostSensorHandler is a client that interacts with a host-scanner component deployed on nodes.
//
// The API exposed by the host sensor is defined here: https://github.com/kubescape/host-scanner
type HostSensorHandler struct {
hostSensorPort int32
hostSensorPodNames map[string]string //map from pod names to node names
hostSensorUnscheduledPodNames map[string]string //map from pod names to node names
k8sObj *k8sinterface.KubernetesApi
daemonSet *appsv1.DaemonSet
podListLock sync.RWMutex
gracePeriod int64
workerPool workerPool
}
// NewHostSensorHandler builds a new http client to the host-scanner API.
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, hostSensorYAMLFile string) (*HostSensorHandler, error) {
if k8sObj == nil {
return nil, fmt.Errorf("nil k8s interface received")
}
if hostSensorYAMLFile != "" {
d, err := loadHostSensorFromFile(hostSensorYAMLFile)
if err != nil {
return nil, fmt.Errorf("failed to load host-scanner yaml file, reason: %w", err)
}
hostSensorYAML = d
}
hsh := &HostSensorHandler{
k8sObj: k8sObj,
hostSensorPodNames: map[string]string{},
hostSensorUnscheduledPodNames: map[string]string{},
gracePeriod: int64(15),
workerPool: newWorkerPool(),
}
// Don't deploy on a cluster with no nodes. Some cloud providers prevent the termination of K8s objects for cluster with no nodes!!!
if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 {
if err == nil {
err = fmt.Errorf("no nodes to scan")
}
return hsh, fmt.Errorf("in NewHostSensorHandler, failed to get nodes list: %v", err)
}
return hsh, nil
}
// Init deploys the host-scanner and start watching the pods on the host.
func (hsh *HostSensorHandler) Init(ctx context.Context) error {
// deploy the YAML
// store namespace + port
// store pod names
// make sure all pods are running, after X seconds treat has running anyway, and log an error on the pods not running yet
logger.L().Info("Installing host scanner")
// log is used to avoid log duplication
// coming from the different host-scanner instances
log := NewLogCoupling()
cautils.StartSpinner()
defer cautils.StopSpinner()
if err := hsh.applyYAML(ctx); err != nil {
return fmt.Errorf("failed to apply host scanner YAML, reason: %v", err)
}
hsh.populatePodNamesToNodeNames(ctx, log)
if err := hsh.checkPodForEachNode(); err != nil {
return fmt.Errorf("%s: %v", failedToValidateHostSensorPodStatus, err)
}
return nil
}
// checkNamespaceWasPresent check if the given namespace was already present on kubernetes and in "Active" state.
// Return true in case it find the namespace on the list, false otherwise.
// In case we have some error with the kubernetes APIs, it returns an error.
func (hsh *HostSensorHandler) checkNamespaceWasPresent(namespace string) bool {
ns, err := hsh.k8sObj.KubernetesClient.
CoreV1().
Namespaces().
Get(hsh.k8sObj.Context, namespace, metav1.GetOptions{})
if err != nil {
return false
}
// check also if it is in "Active" state.
if ns.Status.Phase != corev1.NamespaceActive {
return false
}
return true
}
// namespaceWasPresent return the namespaceWasPresent variable value.
func (hsh *HostSensorHandler) namespaceWasPresent() bool {
return namespaceWasPresent
}
func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
workloads, err := cautils.ReadFile([]byte(hostSensorYAML), cautils.YAML_FILE_FORMAT)
if err != nil {
return fmt.Errorf("failed to read YAML files, reason: %v", err)
}
// Get namespace name
namespaceName := cautils.GetConfigMapNamespace()
for i := range workloads {
if workloads[i].GetKind() == "Namespace" {
namespaceName = workloads[i].GetName()
break
}
}
// check if namespace was already present on kubernetes
namespaceWasPresent = hsh.checkNamespaceWasPresent(namespaceName)
// Update workload data before applying
for i := range workloads {
w := workloadinterface.NewWorkloadObj(workloads[i].GetObject())
if w == nil {
return fmt.Errorf("invalid workload: %v", workloads[i].GetObject())
}
// set namespace in all objects
if w.GetKind() != "Namespace" {
logger.L().Debug("Setting namespace", helpers.String("kind", w.GetKind()), helpers.String("name", w.GetName()), helpers.String("namespace", namespaceName))
w.SetNamespace(namespaceName)
}
// Get container port
if w.GetKind() == "DaemonSet" {
containers, err := w.GetContainers()
if err != nil {
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
}
return fmt.Errorf("container not found in DaemonSet: %v", err)
}
for j := range containers {
for k := range containers[j].Ports {
if containers[j].Ports[k].Name == portName {
hsh.hostSensorPort = containers[j].Ports[k].ContainerPort
}
}
}
}
// Apply workload
var newWorkload k8sinterface.IWorkload
var e error
if g, err := hsh.k8sObj.GetWorkload(w.GetNamespace(), w.GetKind(), w.GetName()); err == nil && g != nil {
newWorkload, e = hsh.k8sObj.UpdateWorkload(w)
} else {
newWorkload, e = hsh.k8sObj.CreateWorkload(w)
}
if e != nil {
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
}
return fmt.Errorf("failed to create/update YAML, reason: %v", e)
}
// Save DaemonSet
if newWorkload.GetKind() == "DaemonSet" {
b, err := json.Marshal(newWorkload.GetObject())
if err != nil {
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
}
return fmt.Errorf("failed to Marshal YAML of DaemonSet, reason: %v", err)
}
var ds appsv1.DaemonSet
if err := json.Unmarshal(b, &ds); err != nil {
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
}
return fmt.Errorf("failed to Unmarshal YAML of DaemonSet, reason: %v", err)
}
hsh.daemonSet = &ds
}
}
return nil
}
func (hsh *HostSensorHandler) checkPodForEachNode() error {
deadline := time.Now().Add(time.Second * 100)
for {
nodesList, err := hsh.k8sObj.KubernetesClient.CoreV1().Nodes().List(hsh.k8sObj.Context, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("in checkPodsForEveryNode, failed to get nodes list: %v", nodesList)
}
hsh.podListLock.RLock()
podsNum := len(hsh.hostSensorPodNames)
unschedPodNum := len(hsh.hostSensorUnscheduledPodNames)
hsh.podListLock.RUnlock()
if len(nodesList.Items) <= podsNum+unschedPodNum {
break
}
if time.Now().After(deadline) {
hsh.podListLock.RLock()
podsMap := hsh.hostSensorPodNames
hsh.podListLock.RUnlock()
return fmt.Errorf("host-scanner pods number (%d) differ than nodes number (%d) after deadline exceeded. Kubescape will take data only from the pods below: %v",
podsNum, len(nodesList.Items), podsMap)
}
time.Sleep(100 * time.Millisecond)
}
return nil
}
// initiating routine to keep pod list updated
func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context, log *LogsMap) {
go func() {
var watchRes watch.Interface
var err error
watchRes, err = hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.daemonSet.Namespace).Watch(hsh.k8sObj.Context, metav1.ListOptions{
Watch: true,
LabelSelector: fmt.Sprintf("name=%s", hsh.daemonSet.Spec.Template.Labels["name"]),
})
if err != nil {
logger.L().Ctx(ctx).Warning(failedToWatchOverDaemonSetPods, helpers.Error(err))
}
if watchRes == nil {
logger.L().Ctx(ctx).Error("failed to watch over DaemonSet pods, will not be able to get host-scanner data")
return
}
for eve := range watchRes.ResultChan() {
pod, ok := eve.Object.(*corev1.Pod)
if !ok {
continue
}
go hsh.updatePodInListAtomic(ctx, eve.Type, pod, log)
}
}()
}
func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventType watch.EventType, podObj *corev1.Pod, log *LogsMap) {
hsh.podListLock.Lock()
defer hsh.podListLock.Unlock()
switch eventType {
case watch.Added, watch.Modified:
if podObj.Status.Phase == corev1.PodRunning && len(podObj.Status.ContainerStatuses) > 0 &&
podObj.Status.ContainerStatuses[0].Ready {
hsh.hostSensorPodNames[podObj.ObjectMeta.Name] = podObj.Spec.NodeName
delete(hsh.hostSensorUnscheduledPodNames, podObj.ObjectMeta.Name)
} else {
if podObj.Status.Phase == corev1.PodPending && len(podObj.Status.Conditions) > 0 &&
podObj.Status.Conditions[0].Reason == corev1.PodReasonUnschedulable {
nodeName := ""
if podObj.Spec.Affinity != nil && podObj.Spec.Affinity.NodeAffinity != nil &&
podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) > 0 &&
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields) > 0 &&
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values) > 0 {
nodeName = podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
}
if !log.isDuplicated(oneHostSensorPodIsUnabledToSchedule) {
logger.L().Ctx(ctx).Warning(oneHostSensorPodIsUnabledToSchedule,
helpers.String("message", podObj.Status.Conditions[0].Message))
log.update(oneHostSensorPodIsUnabledToSchedule)
}
if nodeName != "" {
hsh.hostSensorUnscheduledPodNames[podObj.ObjectMeta.Name] = nodeName
}
} else {
delete(hsh.hostSensorPodNames, podObj.ObjectMeta.Name)
}
}
default:
delete(hsh.hostSensorPodNames, podObj.ObjectMeta.Name)
}
}
// tearDownHostScanner manage the host-scanner deletion.
func (hsh *HostSensorHandler) tearDownHostScanner(namespace string) error {
client := hsh.k8sObj.KubernetesClient
// delete host-scanner DaemonSet
err := client.AppsV1().
DaemonSets(namespace).
Delete(
hsh.k8sObj.Context,
hsh.daemonSet.Name,
metav1.DeleteOptions{
GracePeriodSeconds: &hsh.gracePeriod,
},
)
if err != nil {
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
}
// wait for DaemonSet to be deleted
err = hsh.waitHostScannerDeleted(hsh.k8sObj.Context)
if err != nil {
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
}
return nil
}
// tearDownNamespace manage the given namespace deletion.
// At first, it checks if the namespace was already present before installing host-scanner.
// In that case skips the deletion.
// If was not, then patches the namespace in order to remove the finalizers,
// and finally delete the it.
func (hsh *HostSensorHandler) tearDownNamespace(namespace string) error {
// if namespace was already present on kubernetes (before installing host-scanner),
// then we shouldn't delete it.
if hsh.namespaceWasPresent() {
return nil
}
// to make it more readable we store the object client in a variable
client := hsh.k8sObj.KubernetesClient
// prepare patch json to remove finalizers from namespace
patchData := `
[
{
"op": "replace",
"path": "/metadata/finalizers",
"value": []
}
]
`
// patch namespace object removing finalizers
_, err := client.CoreV1().
Namespaces().
Patch(
hsh.k8sObj.Context,
namespace,
types.JSONPatchType,
[]byte(patchData),
metav1.PatchOptions{},
)
if err != nil {
return fmt.Errorf("failed to remove finalizers from Namespace: %v", err)
}
// patch namespace object removing finalizers
// delete namespace object
err = client.CoreV1().
Namespaces().
Delete(
hsh.k8sObj.Context,
namespace,
metav1.DeleteOptions{
GracePeriodSeconds: &hsh.gracePeriod,
},
)
if err != nil {
return fmt.Errorf("failed to delete %s Namespace: %v", namespace, err)
}
return nil
}
func (hsh *HostSensorHandler) TearDown() error {
namespace := hsh.GetNamespace()
// delete DaemonSet
if err := hsh.tearDownHostScanner(namespace); err != nil {
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
}
// delete Namespace
if err := hsh.tearDownNamespace(namespace); err != nil {
return fmt.Errorf("failed to delete host-scanner Namespace: %v", err)
}
return nil
}
func (hsh *HostSensorHandler) GetNamespace() string {
if hsh.daemonSet == nil {
return ""
}
return hsh.daemonSet.Namespace
}
func loadHostSensorFromFile(hostSensorYAMLFile string) (string, error) {
dat, err := os.ReadFile(hostSensorYAMLFile)
if err != nil {
return "", err
}
if len(dat) == 0 {
return "", fmt.Errorf("empty file")
}
if !cautils.IsYaml(hostSensorYAMLFile) {
return "", fmt.Errorf("invalid file format")
}
return string(dat), err
}
// waitHostScannerDeleted watch for host-scanner deletion.
// In case it fails it returns an error.
func (hsh *HostSensorHandler) waitHostScannerDeleted(ctx context.Context) error {
labelSelector := fmt.Sprintf("name=%s", hsh.daemonSet.Name)
opts := metav1.ListOptions{
TypeMeta: metav1.TypeMeta{},
LabelSelector: labelSelector,
FieldSelector: "",
}
watcher, err := hsh.k8sObj.KubernetesClient.CoreV1().
Pods(hsh.daemonSet.Namespace).
Watch(ctx, opts)
if err != nil {
return err
}
defer watcher.Stop()
for {
select {
case event := <-watcher.ResultChan():
if event.Type == watch.Deleted {
return nil
}
case <-ctx.Done():
return nil
}
}
}

View File

@@ -1,232 +0,0 @@
package hostsensorutils
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/kubescape/kubescape/v3/internal/testutils"
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestHostSensorHandler(t *testing.T) {
t.Parallel()
ctx := context.Background()
t.Run("with default manifest", func(t *testing.T) {
t.Run("should build host sensor", func(t *testing.T) {
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponses()))
h, err := NewHostSensorHandler(k8s, "")
require.NoError(t, err)
require.NotNil(t, h)
t.Run("should initialize host sensor", func(t *testing.T) {
require.NoError(t, h.Init(ctx))
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
require.NoError(t, err)
w.Stop()
require.Len(t, h.hostSensorPodNames, 2)
})
t.Run("should return namespace", func(t *testing.T) {
require.Equal(t, "kubescape", h.GetNamespace())
})
t.Run("should collect resources from pods - happy path", func(t *testing.T) {
envelope, status, err := h.CollectResources(ctx)
require.NoError(t, err)
require.Len(t, envelope, 9*2) // has cloud provider, no control plane requested
require.Len(t, status, 0)
foundControl, foundProvider := false, false
for _, sensed := range envelope {
if sensed.Kind == ControlPlaneInfo.String() {
foundControl = true
}
if sensed.Kind == CloudProviderInfo.String() {
foundProvider = hasCloudProviderInfo([]hostsensor.HostSensorDataEnvelope{sensed})
}
}
require.False(t, foundControl)
require.True(t, foundProvider)
})
})
t.Run("should build host sensor without cloud provider", func(t *testing.T) {
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponsesNoCloudProvider()))
h, err := NewHostSensorHandler(k8s, "")
require.NoError(t, err)
require.NotNil(t, h)
t.Run("should initialize host sensor", func(t *testing.T) {
require.NoError(t, h.Init(ctx))
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
require.NoError(t, err)
w.Stop()
require.Len(t, h.hostSensorPodNames, 2)
})
t.Run("should get version", func(t *testing.T) {
version, err := h.getVersion()
require.NoError(t, err)
require.Equal(t, "v1.0.45", version)
})
t.Run("ForwardToPod is a stub, not implemented", func(t *testing.T) {
resp, err := h.forwardToPod("pod1", "/version")
require.Contains(t, err.Error(), "not implemented")
require.Nil(t, resp)
})
t.Run("should collect resources from pods", func(t *testing.T) {
envelope, status, err := h.CollectResources(ctx)
require.NoError(t, err)
require.Len(t, envelope, 10*2) // has empty cloud provider, has control plane info
require.Len(t, status, 0)
foundControl, foundProvider := false, false
for _, sensed := range envelope {
if sensed.Kind == ControlPlaneInfo.String() {
foundControl = true
}
if sensed.Kind == CloudProviderInfo.String() {
foundProvider = hasCloudProviderInfo([]hostsensor.HostSensorDataEnvelope{sensed})
}
}
require.True(t, foundControl)
require.False(t, foundProvider)
})
})
t.Run("should build host sensor with error in response from /version", func(t *testing.T) {
k8s := NewKubernetesApiMock(WithNode(mockNode1()),
WithPod(mockPod1()),
WithPod(mockPod2()),
WithResponses(mockResponsesNoCloudProvider()),
WithErrorResponse(RestURL{"http", "pod1", "7888", "/version"}), // this endpoint will return an error from this pod
WithErrorResponse(RestURL{"http", "pod2", "7888", "/version"}), // this endpoint will return an error from this pod
)
h, err := NewHostSensorHandler(k8s, "")
require.NoError(t, err)
require.NotNil(t, h)
t.Run("should initialize host sensor", func(t *testing.T) {
require.NoError(t, h.Init(ctx))
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
require.NoError(t, err)
w.Stop()
require.Len(t, h.hostSensorPodNames, 2)
})
t.Run("should NOT be able to get version", func(t *testing.T) {
// NOTE: GetVersion might be successful if only one pod responds successfully.
// In order to ensure an error, we need ALL pods to error.
_, err := h.getVersion()
require.Error(t, err)
require.Contains(t, err.Error(), "mock")
})
})
t.Run("should FAIL to build host sensor because there are no nodes", func(t *testing.T) {
h, err := NewHostSensorHandler(NewKubernetesApiMock(), "")
require.Error(t, err)
require.NotNil(t, h)
require.Contains(t, err.Error(), "no nodes to scan")
})
})
t.Run("should NOT build host sensor with nil k8s API", func(t *testing.T) {
h, err := NewHostSensorHandler(nil, "")
require.Error(t, err)
require.Nil(t, h)
})
t.Run("with manifest from YAML file", func(t *testing.T) {
t.Run("should build host sensor", func(t *testing.T) {
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponses()))
h, err := NewHostSensorHandler(k8s, filepath.Join(testutils.CurrentDir(), "hostsensor.yaml"))
require.NoError(t, err)
require.NotNil(t, h)
t.Run("should initialize host sensor", func(t *testing.T) {
require.NoError(t, h.Init(ctx))
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
require.NoError(t, err)
w.Stop()
require.Len(t, h.hostSensorPodNames, 2)
})
})
})
t.Run("with manifest from invalid YAML file", func(t *testing.T) {
t.Run("should NOT build host sensor", func(t *testing.T) {
var invalid string
t.Run("should create temp file", func(t *testing.T) {
file, err := os.CreateTemp("", "*.yaml")
require.NoError(t, err)
t.Cleanup(func() {
_ = os.Remove(file.Name())
})
_, err = file.Write([]byte(" x: 1"))
require.NoError(t, err)
invalid = file.Name()
require.NoError(t, file.Close())
})
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponses()))
_, err := NewHostSensorHandler(k8s, filepath.Join(testutils.CurrentDir(), invalid))
require.Error(t, err)
})
})
// TODO(test coverage): the following cases are not covered by tests yet.
//
// * applyYAML fails
// * checkPodForEachNode fails, or times out
// * non-active namespace
// * getPodList fails when GetVersion
// * getPodList fails when CollectResources
// * error cases that trigger a namespace tear-down
// * watch pods with a Delete event
// * explicit TearDown()
//
// Notice that the package doesn't current pass tests with the race detector enabled.
}
func TestLoadHostSensorFromFile_NoError(t *testing.T) {
content, err := loadHostSensorFromFile("testdata/hostsensor.yaml")
assert.NotEqual(t, "", content)
assert.Nil(t, err)
}
func TestLoadHostSensorFromFile_Error(t *testing.T) {
content, err := loadHostSensorFromFile("testdata/hostsensor_invalid.yaml")
assert.Equal(t, "", content)
assert.NotNil(t, err)
content, err = loadHostSensorFromFile("testdata/empty_hostsensor.yaml")
assert.Equal(t, "", content)
assert.NotNil(t, err)
content, err = loadHostSensorFromFile("testdata/notAYamlFile.txt")
assert.Equal(t, "", content)
assert.NotNil(t, err)
}

View File

@@ -1,293 +0,0 @@
package hostsensorutils
import (
"context"
stdjson "encoding/json"
"errors"
"fmt"
"reflect"
"strings"
"sync"
"github.com/kubescape/go-logger"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
"github.com/kubescape/opa-utils/reporthandling/apis"
)
// getPodList clones the internal list of pods being watched as a map of pod names.
func (hsh *HostSensorHandler) getPodList() map[string]string {
hsh.podListLock.RLock()
res := make(map[string]string, len(hsh.hostSensorPodNames))
for k, v := range hsh.hostSensorPodNames {
res[k] = v
}
hsh.podListLock.RUnlock()
return res
}
// httpGetToPod sends the request to a pod using the HostSensorPort.
func (hsh *HostSensorHandler) httpGetToPod(podName, path string) ([]byte, error) {
restProxy := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.daemonSet.Namespace).ProxyGet("http", podName, fmt.Sprintf("%d", hsh.hostSensorPort), path, map[string]string{})
return restProxy.DoRaw(hsh.k8sObj.Context)
}
func (hsh *HostSensorHandler) getResourcesFromPod(podName, nodeName string, resourceKind scannerResource, path string) (hostsensor.HostSensorDataEnvelope, error) {
// send the request and pack the response as an hostSensorDataEnvelope
resBytes, err := hsh.httpGetToPod(podName, path)
if err != nil {
return hostsensor.HostSensorDataEnvelope{}, err
}
hostSensorDataEnvelope := hostsensor.HostSensorDataEnvelope{}
hostSensorDataEnvelope.SetApiVersion(k8sinterface.JoinGroupVersion(hostsensor.GroupHostSensor, hostsensor.Version))
hostSensorDataEnvelope.SetKind(resourceKind.String())
hostSensorDataEnvelope.SetName(nodeName)
hostSensorDataEnvelope.SetData(resBytes)
return hostSensorDataEnvelope, nil
}
// forwardToPod is currently not implemented.
func (hsh *HostSensorHandler) forwardToPod(podName, path string) ([]byte, error) {
// NOT IN USE:
// ---
// spawn port forwarding
// req := hsh.k8sObj.KubernetesClient.CoreV1().RESTClient().Post()
// req = req.Name(podName)
// req = req.Namespace(hsh.DaemonSet.Namespace)
// req = req.Resource("pods")
// req = req.SubResource("portforward")
// ----
// https://github.com/gianarb/kube-port-forward
// fullPath := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward",
// hsh.DaemonSet.Namespace, podName)
// transport, upgrader, err := spdy.RoundTripperFor(hsh.k8sObj.KubernetesClient.config)
// if err != nil {
// return nil, err
// }
// hostIP := strings.TrimLeft(req.RestConfig.Host, "htps:/")
// dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, &url.URL{Scheme: "http", Path: path, Host: hostIP})
return nil, errors.New("not implemented")
}
// sendAllPodsHTTPGETRequest fills the raw bytes response in the envelope and the node name, but not the GroupVersionKind
// so the caller is responsible to convert the raw data to some structured data and add the GroupVersionKind details
//
// The function produces a worker-pool with a fixed number of workers.
//
// For each node the request is pushed to the jobs channel, the worker sends the request and pushes the result to the result channel.
// When all workers have finished, the function returns a list of results
func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(ctx context.Context, path string, requestKind scannerResource) ([]hostsensor.HostSensorDataEnvelope, error) {
podList := hsh.getPodList()
res := make([]hostsensor.HostSensorDataEnvelope, 0, len(podList))
var wg sync.WaitGroup
// initialization of the channels
hsh.workerPool.init(len(podList))
// log is used to avoid log duplication
// coming from the different host-scanner instances
log := NewLogCoupling()
hsh.workerPool.hostSensorApplyJobs(podList, path, requestKind)
hsh.workerPool.hostSensorGetResults(&res)
hsh.workerPool.createWorkerPool(ctx, hsh, &wg, log)
hsh.workerPool.waitForDone(&wg)
return res, nil
}
// getVersion returns the version of the deployed host scanner.
//
// NOTE: we pick the version from the first responding pod.
func (hsh *HostSensorHandler) getVersion() (string, error) {
// loop over pods and port-forward it to each of them
podList := hsh.getPodList()
// initialization of the channels
hsh.workerPool.init(len(podList))
hsh.workerPool.hostSensorApplyJobs(podList, "/version", "version")
for job := range hsh.workerPool.jobs {
resBytes, err := hsh.httpGetToPod(job.podName, job.path)
if err != nil {
return "", err
} else {
version := strings.ReplaceAll(string(resBytes), "\"", "")
version = strings.ReplaceAll(version, "\n", "")
return version, nil
}
}
return "", nil
}
// getKernelVariables returns the list of Linux Kernel variables.
func (hsh *HostSensorHandler) getKernelVariables(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/LinuxKernelVariables", LinuxKernelVariables)
}
// getOpenPortsList returns the list of open ports.
func (hsh *HostSensorHandler) getOpenPortsList(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/openedPorts", OpenPortsList)
}
// getLinuxSecurityHardeningStatus returns the list of LinuxSecurityHardeningStatus metadata.
func (hsh *HostSensorHandler) getLinuxSecurityHardeningStatus(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/linuxSecurityHardening", LinuxSecurityHardeningStatus)
}
// getKubeletInfo returns the list of kubelet metadata.
func (hsh *HostSensorHandler) getKubeletInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeletInfo", KubeletInfo)
}
// getKubeProxyInfo returns the list of kubeProxy metadata.
func (hsh *HostSensorHandler) getKubeProxyInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeProxyInfo", KubeProxyInfo)
}
// getControlPlaneInfo returns the list of controlPlaneInfo metadata
func (hsh *HostSensorHandler) getControlPlaneInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/controlPlaneInfo", ControlPlaneInfo)
}
// getCloudProviderInfo returns the list of cloudProviderInfo metadata.
func (hsh *HostSensorHandler) getCloudProviderInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/cloudProviderInfo", CloudProviderInfo)
}
// getCNIInfo returns the list of CNI metadata
func (hsh *HostSensorHandler) getCNIInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/CNIInfo", CNIInfo)
}
// getKernelVersion returns the list of kernelVersion metadata.
func (hsh *HostSensorHandler) getKernelVersion(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kernelVersion", "KernelVersion")
}
// getOsReleaseFile returns the list of osRelease metadata.
func (hsh *HostSensorHandler) getOsReleaseFile(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/osRelease", "OsReleaseFile")
}
// hasCloudProviderInfo iterates over the []hostsensor.HostSensorDataEnvelope list to find info about the cloud provider.
//
// If information are found, then return true. Return false otherwise.
func hasCloudProviderInfo(cpi []hostsensor.HostSensorDataEnvelope) bool {
for index := range cpi {
if !reflect.DeepEqual(cpi[index].GetData(), stdjson.RawMessage("{}\n")) {
return true
}
}
return false
}
// CollectResources collects all required information about all the pods for this host.
func (hsh *HostSensorHandler) CollectResources(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
res := make([]hostsensor.HostSensorDataEnvelope, 0)
infoMap := make(map[string]apis.StatusInfo)
if hsh.daemonSet == nil {
return res, nil, nil
}
logger.L().Debug("Accessing host scanner")
version, err := hsh.getVersion()
if err != nil {
logger.L().Ctx(ctx).Warning(err.Error())
}
if len(version) > 0 {
logger.L().Info("Host scanner version : " + version)
} else {
logger.L().Info("Unknown host scanner version")
}
var hasCloudProvider bool
for _, toPin := range []struct {
Resource scannerResource
Query func(context.Context) ([]hostsensor.HostSensorDataEnvelope, error)
}{
// queries to the deployed host-scanner
{
Resource: OsReleaseFile,
Query: hsh.getOsReleaseFile,
},
{
Resource: KernelVersion,
Query: hsh.getKernelVersion,
},
{
Resource: LinuxSecurityHardeningStatus,
Query: hsh.getLinuxSecurityHardeningStatus,
},
{
Resource: OpenPortsList,
Query: hsh.getOpenPortsList,
},
{
Resource: LinuxKernelVariables,
Query: hsh.getKernelVariables,
},
{
Resource: KubeletInfo,
Query: hsh.getKubeletInfo,
},
{
Resource: KubeProxyInfo,
Query: hsh.getKubeProxyInfo,
},
{
Resource: CloudProviderInfo,
Query: hsh.getCloudProviderInfo,
},
{
Resource: CNIInfo,
Query: hsh.getCNIInfo,
},
{
// ControlPlaneInfo is queried _after_ CloudProviderInfo.
Resource: ControlPlaneInfo,
Query: hsh.getControlPlaneInfo,
},
} {
k8sInfo := toPin
if k8sInfo.Resource == ControlPlaneInfo && hasCloudProvider {
// we retrieve control plane info only if we are not using a cloud provider
continue
}
kcData, err := k8sInfo.Query(ctx)
if err != nil {
addInfoToMap(k8sInfo.Resource, infoMap, err)
logger.L().Ctx(ctx).Warning(err.Error())
}
if k8sInfo.Resource == CloudProviderInfo {
hasCloudProvider = hasCloudProviderInfo(kcData)
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
}
logger.L().Debug("Done reading information from host scanner")
return res, infoMap, nil
}

View File

@@ -11,5 +11,4 @@ type IHostSensor interface {
Init(ctx context.Context) error Init(ctx context.Context) error
TearDown() error TearDown() error
CollectResources(context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) CollectResources(context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error)
GetNamespace() string
} }

View File

@@ -27,7 +27,3 @@ func (hshm *HostSensorHandlerMock) TearDown() error {
func (hshm *HostSensorHandlerMock) CollectResources(_ context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) { func (hshm *HostSensorHandlerMock) CollectResources(_ context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
return []hostsensor.HostSensorDataEnvelope{}, nil, nil return []hostsensor.HostSensorDataEnvelope{}, nil, nil
} }
func (hshm *HostSensorHandlerMock) GetNamespace() string {
return ""
}

View File

@@ -1,98 +0,0 @@
package hostsensorutils
import (
"context"
"sync"
"github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
)
const noOfWorkers int = 10
type job struct {
podName string
nodeName string
requestKind scannerResource
path string
}
type workerPool struct {
jobs chan job
results chan hostsensor.HostSensorDataEnvelope
done chan bool
noOfWorkers int
}
func newWorkerPool() workerPool {
wp := workerPool{}
wp.noOfWorkers = noOfWorkers
wp.init()
return wp
}
func (wp *workerPool) init(noOfPods ...int) {
if len(noOfPods) > 0 && noOfPods[0] < noOfWorkers {
wp.noOfWorkers = noOfPods[0]
}
// init the channels
wp.jobs = make(chan job, noOfWorkers)
wp.results = make(chan hostsensor.HostSensorDataEnvelope, noOfWorkers)
wp.done = make(chan bool)
}
// The worker takes a job out of the chan, executes the request, and pushes the result to the results chan
func (wp *workerPool) hostSensorWorker(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup, log *LogsMap) {
defer wg.Done()
for job := range wp.jobs {
hostSensorDataEnvelope, err := hsh.getResourcesFromPod(job.podName, job.nodeName, job.requestKind, job.path)
if err != nil && !log.isDuplicated(failedToGetData) {
logger.L().Ctx(ctx).Warning(failedToGetData, helpers.String("path", job.path), helpers.Error(err))
log.update(failedToGetData)
continue
}
wp.results <- hostSensorDataEnvelope
}
}
func (wp *workerPool) createWorkerPool(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup, log *LogsMap) {
for i := 0; i < noOfWorkers; i++ {
wg.Add(1)
go wp.hostSensorWorker(ctx, hsh, wg, log)
}
}
func (wp *workerPool) waitForDone(wg *sync.WaitGroup) {
// Waiting for workers to finish
wg.Wait()
close(wp.results)
// Waiting for the results to be processed
<-wp.done
}
func (wp *workerPool) hostSensorGetResults(result *[]hostsensor.HostSensorDataEnvelope) {
go func() {
for res := range wp.results {
*result = append(*result, res)
}
wp.done <- true
}()
}
func (wp *workerPool) hostSensorApplyJobs(podList map[string]string, path string, requestKind scannerResource) {
go func() {
for podName, nodeName := range podList {
thisJob := job{
podName: podName,
nodeName: nodeName,
requestKind: requestKind,
path: path,
}
wp.jobs <- thisJob
}
close(wp.jobs)
}()
}

View File

@@ -1,16 +0,0 @@
package hostsensorutils
import (
"testing"
"github.com/stretchr/testify/assert"
)
// Initializes a workerPool struct with default values and returns it
func TestNewWorkerPoolDefaultValues(t *testing.T) {
wp := newWorkerPool()
assert.Equal(t, noOfWorkers, wp.noOfWorkers)
assert.NotNil(t, wp.jobs)
assert.NotNil(t, wp.results)
assert.NotNil(t, wp.done)
}

View File

@@ -1,74 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app: kubescape-host-scanner
k8s-app: kubescape-host-scanner
kubernetes.io/metadata.name: kubescape-host-scanner
tier: kubescape-host-scanner-control-plane
name: kubescape
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: host-scanner
namespace: kubescape
labels:
app: host-scanner
k8s-app: kubescape-host-scanner
otel: enabled
spec:
selector:
matchLabels:
name: host-scanner
template:
metadata:
labels:
name: host-scanner
spec:
tolerations:
# this toleration is to have the DaemonDet runnable on all nodes (including masters)
# remove it if your masters can't run pods
- operator: Exists
containers:
- name: host-sensor
image: quay.io/kubescape/host-scanner:v1.0.61
securityContext:
allowPrivilegeEscalation: true
privileged: true
readOnlyRootFilesystem: true
ports:
- name: scanner # Do not change port name
containerPort: 7888
protocol: TCP
resources:
limits:
cpu: 0.1m
memory: 200Mi
requests:
cpu: 1m
memory: 200Mi
volumeMounts:
- mountPath: /host_fs
name: host-filesystem
startupProbe:
httpGet:
path: /readyz
port: 7888
failureThreshold: 30
periodSeconds: 1
livenessProbe:
httpGet:
path: /healthz
port: 7888
periodSeconds: 10
terminationGracePeriodSeconds: 120
dnsPolicy: ClusterFirstWithHostNet
automountServiceAccountToken: false
volumes:
- hostPath:
path: /
type: Directory
name: host-filesystem
hostPID: true
hostIPC: true

View File

@@ -1 +0,0 @@
Kubescape is Awesome!

View File

@@ -1,57 +1,13 @@
package hostsensorutils package hostsensorutils
import ( import (
"github.com/kubescape/k8s-interface/hostsensor"
"github.com/kubescape/k8s-interface/k8sinterface" "github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/opa-utils/reporthandling/apis" "github.com/kubescape/opa-utils/reporthandling/apis"
) )
// scannerResource is the enumerated type listing all resources from the host-scanner. func addInfoToMap(resource hostsensor.HostSensorResource, infoMap map[string]apis.StatusInfo, err error) {
type scannerResource string group, version := k8sinterface.SplitApiVersion(hostsensor.MapHostSensorResourceToApiGroup(resource))
const (
// host-scanner resources
KubeletConfiguration scannerResource = "KubeletConfiguration"
OsReleaseFile scannerResource = "OsReleaseFile"
KernelVersion scannerResource = "KernelVersion"
LinuxSecurityHardeningStatus scannerResource = "LinuxSecurityHardeningStatus"
OpenPortsList scannerResource = "OpenPortsList"
LinuxKernelVariables scannerResource = "LinuxKernelVariables"
KubeletCommandLine scannerResource = "KubeletCommandLine"
KubeletInfo scannerResource = "KubeletInfo"
KubeProxyInfo scannerResource = "KubeProxyInfo"
ControlPlaneInfo scannerResource = "ControlPlaneInfo"
CloudProviderInfo scannerResource = "CloudProviderInfo"
CNIInfo scannerResource = "CNIInfo"
)
func mapHostSensorResourceToApiGroup(r scannerResource) string {
switch r {
case
KubeletConfiguration,
OsReleaseFile,
KubeletCommandLine,
KernelVersion,
LinuxSecurityHardeningStatus,
OpenPortsList,
LinuxKernelVariables,
KubeletInfo,
KubeProxyInfo,
ControlPlaneInfo,
CloudProviderInfo,
CNIInfo:
return "hostdata.kubescape.cloud/v1beta0"
default:
return ""
}
}
func (r scannerResource) String() string {
return string(r)
}
func addInfoToMap(resource scannerResource, infoMap map[string]apis.StatusInfo, err error) {
group, version := k8sinterface.SplitApiVersion(mapHostSensorResourceToApiGroup(resource))
r := k8sinterface.JoinResourceTriplets(group, version, resource.String()) r := k8sinterface.JoinResourceTriplets(group, version, resource.String())
infoMap[r] = apis.StatusInfo{ infoMap[r] = apis.StatusInfo{
InnerStatus: apis.StatusSkipped, InnerStatus: apis.StatusSkipped,

View File

@@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/kubescape/k8s-interface/hostsensor"
"github.com/kubescape/opa-utils/reporthandling/apis" "github.com/kubescape/opa-utils/reporthandling/apis"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@@ -20,12 +21,12 @@ func TestAddInfoToMap(t *testing.T) {
testErr := errors.New("test error") testErr := errors.New("test error")
for _, toPin := range []struct { for _, toPin := range []struct {
Resource scannerResource Resource hostsensor.HostSensorResource
Err error Err error
Expected map[string]apis.StatusInfo Expected map[string]apis.StatusInfo
}{ }{
{ {
Resource: KubeletConfiguration, Resource: hostsensor.KubeletConfiguration,
Err: testErr, Err: testErr,
Expected: map[string]apis.StatusInfo{ Expected: map[string]apis.StatusInfo{
"hostdata.kubescape.cloud/v1beta0/KubeletConfiguration": { "hostdata.kubescape.cloud/v1beta0/KubeletConfiguration": {
@@ -35,7 +36,7 @@ func TestAddInfoToMap(t *testing.T) {
}, },
}, },
{ {
Resource: CNIInfo, Resource: hostsensor.CNIInfo,
Err: testErr, Err: testErr,
Expected: map[string]apis.StatusInfo{ Expected: map[string]apis.StatusInfo{
"hostdata.kubescape.cloud/v1beta0/CNIInfo": { "hostdata.kubescape.cloud/v1beta0/CNIInfo": {
@@ -45,7 +46,7 @@ func TestAddInfoToMap(t *testing.T) {
}, },
}, },
{ {
Resource: scannerResource("invalid"), Resource: hostsensor.HostSensorResource("invalid"),
Err: testErr, Err: testErr,
Expected: map[string]apis.StatusInfo{ Expected: map[string]apis.StatusInfo{
"//invalid": { // no group, no version "//invalid": { // no group, no version
@@ -72,55 +73,55 @@ func TestMapHostSensorResourceToApiGroup(t *testing.T) {
url := "hostdata.kubescape.cloud/v1beta0" url := "hostdata.kubescape.cloud/v1beta0"
tests := []struct { tests := []struct {
resource scannerResource resource hostsensor.HostSensorResource
want string want string
}{ }{
{ {
resource: KubeletConfiguration, resource: hostsensor.KubeletConfiguration,
want: url, want: url,
}, },
{ {
resource: OsReleaseFile, resource: hostsensor.OsReleaseFile,
want: url, want: url,
}, },
{ {
resource: KubeletCommandLine, resource: hostsensor.KubeletCommandLine,
want: url, want: url,
}, },
{ {
resource: KernelVersion, resource: hostsensor.KernelVersion,
want: url, want: url,
}, },
{ {
resource: LinuxSecurityHardeningStatus, resource: hostsensor.LinuxSecurityHardeningStatus,
want: url, want: url,
}, },
{ {
resource: OpenPortsList, resource: hostsensor.OpenPortsList,
want: url, want: url,
}, },
{ {
resource: LinuxKernelVariables, resource: hostsensor.LinuxKernelVariables,
want: url, want: url,
}, },
{ {
resource: KubeletInfo, resource: hostsensor.KubeletInfo,
want: url, want: url,
}, },
{ {
resource: KubeProxyInfo, resource: hostsensor.KubeProxyInfo,
want: url, want: url,
}, },
{ {
resource: ControlPlaneInfo, resource: hostsensor.ControlPlaneInfo,
want: url, want: url,
}, },
{ {
resource: CloudProviderInfo, resource: hostsensor.CloudProviderInfo,
want: url, want: url,
}, },
{ {
resource: CNIInfo, resource: hostsensor.CNIInfo,
want: url, want: url,
}, },
{ {
@@ -131,7 +132,7 @@ func TestMapHostSensorResourceToApiGroup(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.want, func(t *testing.T) { t.Run(tt.want, func(t *testing.T) {
assert.Equal(t, tt.want, mapHostSensorResourceToApiGroup(tt.resource)) assert.Equal(t, tt.want, hostsensor.MapHostSensorResourceToApiGroup(tt.resource))
}) })
} }
} }

4
go.mod
View File

@@ -9,7 +9,7 @@ require (
github.com/anchore/stereoscope v0.1.9 github.com/anchore/stereoscope v0.1.9
github.com/anchore/syft v1.32.0 github.com/anchore/syft v1.32.0
github.com/anubhav06/copa-grype v1.0.3-alpha.1 github.com/anubhav06/copa-grype v1.0.3-alpha.1
github.com/armosec/armoapi-go v0.0.562 github.com/armosec/armoapi-go v0.0.667
github.com/armosec/utils-go v0.0.58 github.com/armosec/utils-go v0.0.58
github.com/armosec/utils-k8s-go v0.0.30 github.com/armosec/utils-k8s-go v0.0.30
github.com/briandowns/spinner v1.23.2 github.com/briandowns/spinner v1.23.2
@@ -31,7 +31,7 @@ require (
github.com/kubescape/backend v0.0.20 github.com/kubescape/backend v0.0.20
github.com/kubescape/go-git-url v0.0.31 github.com/kubescape/go-git-url v0.0.31
github.com/kubescape/go-logger v0.0.25 github.com/kubescape/go-logger v0.0.25
github.com/kubescape/k8s-interface v0.0.195 github.com/kubescape/k8s-interface v0.0.202
github.com/kubescape/opa-utils v0.0.288 github.com/kubescape/opa-utils v0.0.288
github.com/kubescape/rbac-utils v0.0.21-0.20230806101615-07e36f555520 github.com/kubescape/rbac-utils v0.0.21-0.20230806101615-07e36f555520
github.com/kubescape/regolibrary/v2 v2.0.1 github.com/kubescape/regolibrary/v2 v2.0.1

8
go.sum
View File

@@ -874,8 +874,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/armosec/armoapi-go v0.0.562 h1:Ks8XHfD1WgqKGriSC/XVgBKQf80TNORGQp8De2EFj5g= github.com/armosec/armoapi-go v0.0.667 h1:LrFowKvthnL676Gx+hjhvqP4pQ2+CjykFO9SdIYDc/c=
github.com/armosec/armoapi-go v0.0.562/go.mod h1:/j4fBpolPI6U2PndLDlfjBsf0aRvcp4SQwra4LMOaz4= github.com/armosec/armoapi-go v0.0.667/go.mod h1:9jAH0g8ZsryhiBDd/aNMX4+n10bGwTx/doWCyyjSxts=
github.com/armosec/gojay v1.2.17 h1:VSkLBQzD1c2V+FMtlGFKqWXNsdNvIKygTKJI9ysY8eM= github.com/armosec/gojay v1.2.17 h1:VSkLBQzD1c2V+FMtlGFKqWXNsdNvIKygTKJI9ysY8eM=
github.com/armosec/gojay v1.2.17/go.mod h1:vuvX3DlY0nbVrJ0qCklSS733AWMoQboq3cFyuQW9ybc= github.com/armosec/gojay v1.2.17/go.mod h1:vuvX3DlY0nbVrJ0qCklSS733AWMoQboq3cFyuQW9ybc=
github.com/armosec/utils-go v0.0.58 h1:g9RnRkxZAmzTfPe2ruMo2OXSYLwVSegQSkSavOfmaIE= github.com/armosec/utils-go v0.0.58 h1:g9RnRkxZAmzTfPe2ruMo2OXSYLwVSegQSkSavOfmaIE=
@@ -1791,8 +1791,8 @@ github.com/kubescape/go-git-url v0.0.31 h1:VZnvtdGLVc42cQaR7llQeGZz0PnOxcs+eDig2
github.com/kubescape/go-git-url v0.0.31/go.mod h1:3ddc1HEflms1vMhD9owt/3FBES070UaYTUarcjx8jDk= github.com/kubescape/go-git-url v0.0.31/go.mod h1:3ddc1HEflms1vMhD9owt/3FBES070UaYTUarcjx8jDk=
github.com/kubescape/go-logger v0.0.25 h1:Bi6F0856LOlvjrbSKD+ZtKKzbfRXDifhVCjK8s3kI6U= github.com/kubescape/go-logger v0.0.25 h1:Bi6F0856LOlvjrbSKD+ZtKKzbfRXDifhVCjK8s3kI6U=
github.com/kubescape/go-logger v0.0.25/go.mod h1:lk+R5/lAVJo4AgD4eYUJJfVTHf7ZChS73X1MFFbeInY= github.com/kubescape/go-logger v0.0.25/go.mod h1:lk+R5/lAVJo4AgD4eYUJJfVTHf7ZChS73X1MFFbeInY=
github.com/kubescape/k8s-interface v0.0.195 h1:pJ1PT3x3fd1WatLjyZbKAfE64PWtEbvxiFjOBKSBwuU= github.com/kubescape/k8s-interface v0.0.202 h1:yu9x+07crFQAgrBatFFU2WuuxMJfHUMHVuCzuHE9Q4M=
github.com/kubescape/k8s-interface v0.0.195/go.mod h1:j9snZbH+RxOaa1yG/bWgTClj90q7To0rGgQepxy4b+k= github.com/kubescape/k8s-interface v0.0.202/go.mod h1:d4NVhL81bVXe8yEXlkT4ZHrt3iEppEIN39b8N1oXm5s=
github.com/kubescape/opa-utils v0.0.288 h1:X6kebUaVrM/fZt+XmeRw1mVJYkIrkyBdcbmjBs66gSc= github.com/kubescape/opa-utils v0.0.288 h1:X6kebUaVrM/fZt+XmeRw1mVJYkIrkyBdcbmjBs66gSc=
github.com/kubescape/opa-utils v0.0.288/go.mod h1:9ZmBd4xni0OLffuvcp4fKMmBo/glvgbwkCY5zggIKSw= github.com/kubescape/opa-utils v0.0.288/go.mod h1:9ZmBd4xni0OLffuvcp4fKMmBo/glvgbwkCY5zggIKSw=
github.com/kubescape/rbac-utils v0.0.21-0.20230806101615-07e36f555520 h1:SqlwF8G+oFazeYmZQKoPczLEflBQpwpHCU8DoLLyfj8= github.com/kubescape/rbac-utils v0.0.21-0.20230806101615-07e36f555520 h1:SqlwF8G+oFazeYmZQKoPczLEflBQpwpHCU8DoLLyfj8=

View File

@@ -5,7 +5,7 @@ go 1.25.5
replace github.com/kubescape/kubescape/v3 => ../ replace github.com/kubescape/kubescape/v3 => ../
require ( require (
github.com/armosec/armoapi-go v0.0.562 github.com/armosec/armoapi-go v0.0.667
github.com/armosec/utils-go v0.0.58 github.com/armosec/utils-go v0.0.58
github.com/armosec/utils-k8s-go v0.0.30 github.com/armosec/utils-k8s-go v0.0.30
github.com/go-openapi/runtime v0.29.2 github.com/go-openapi/runtime v0.29.2
@@ -14,7 +14,7 @@ require (
github.com/gorilla/schema v1.4.1 github.com/gorilla/schema v1.4.1
github.com/kubescape/backend v0.0.20 github.com/kubescape/backend v0.0.20
github.com/kubescape/go-logger v0.0.25 github.com/kubescape/go-logger v0.0.25
github.com/kubescape/k8s-interface v0.0.195 github.com/kubescape/k8s-interface v0.0.202
github.com/kubescape/kubescape/v3 v3.0.4 github.com/kubescape/kubescape/v3 v3.0.4
github.com/kubescape/opa-utils v0.0.288 github.com/kubescape/opa-utils v0.0.288
github.com/kubescape/storage v0.0.184 github.com/kubescape/storage v0.0.184

View File

@@ -876,8 +876,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/armosec/armoapi-go v0.0.562 h1:Ks8XHfD1WgqKGriSC/XVgBKQf80TNORGQp8De2EFj5g= github.com/armosec/armoapi-go v0.0.667 h1:LrFowKvthnL676Gx+hjhvqP4pQ2+CjykFO9SdIYDc/c=
github.com/armosec/armoapi-go v0.0.562/go.mod h1:/j4fBpolPI6U2PndLDlfjBsf0aRvcp4SQwra4LMOaz4= github.com/armosec/armoapi-go v0.0.667/go.mod h1:9jAH0g8ZsryhiBDd/aNMX4+n10bGwTx/doWCyyjSxts=
github.com/armosec/gojay v1.2.17 h1:VSkLBQzD1c2V+FMtlGFKqWXNsdNvIKygTKJI9ysY8eM= github.com/armosec/gojay v1.2.17 h1:VSkLBQzD1c2V+FMtlGFKqWXNsdNvIKygTKJI9ysY8eM=
github.com/armosec/gojay v1.2.17/go.mod h1:vuvX3DlY0nbVrJ0qCklSS733AWMoQboq3cFyuQW9ybc= github.com/armosec/gojay v1.2.17/go.mod h1:vuvX3DlY0nbVrJ0qCklSS733AWMoQboq3cFyuQW9ybc=
github.com/armosec/utils-go v0.0.58 h1:g9RnRkxZAmzTfPe2ruMo2OXSYLwVSegQSkSavOfmaIE= github.com/armosec/utils-go v0.0.58 h1:g9RnRkxZAmzTfPe2ruMo2OXSYLwVSegQSkSavOfmaIE=
@@ -1797,8 +1797,8 @@ github.com/kubescape/go-git-url v0.0.31 h1:VZnvtdGLVc42cQaR7llQeGZz0PnOxcs+eDig2
github.com/kubescape/go-git-url v0.0.31/go.mod h1:3ddc1HEflms1vMhD9owt/3FBES070UaYTUarcjx8jDk= github.com/kubescape/go-git-url v0.0.31/go.mod h1:3ddc1HEflms1vMhD9owt/3FBES070UaYTUarcjx8jDk=
github.com/kubescape/go-logger v0.0.25 h1:Bi6F0856LOlvjrbSKD+ZtKKzbfRXDifhVCjK8s3kI6U= github.com/kubescape/go-logger v0.0.25 h1:Bi6F0856LOlvjrbSKD+ZtKKzbfRXDifhVCjK8s3kI6U=
github.com/kubescape/go-logger v0.0.25/go.mod h1:lk+R5/lAVJo4AgD4eYUJJfVTHf7ZChS73X1MFFbeInY= github.com/kubescape/go-logger v0.0.25/go.mod h1:lk+R5/lAVJo4AgD4eYUJJfVTHf7ZChS73X1MFFbeInY=
github.com/kubescape/k8s-interface v0.0.195 h1:pJ1PT3x3fd1WatLjyZbKAfE64PWtEbvxiFjOBKSBwuU= github.com/kubescape/k8s-interface v0.0.202 h1:yu9x+07crFQAgrBatFFU2WuuxMJfHUMHVuCzuHE9Q4M=
github.com/kubescape/k8s-interface v0.0.195/go.mod h1:j9snZbH+RxOaa1yG/bWgTClj90q7To0rGgQepxy4b+k= github.com/kubescape/k8s-interface v0.0.202/go.mod h1:d4NVhL81bVXe8yEXlkT4ZHrt3iEppEIN39b8N1oXm5s=
github.com/kubescape/opa-utils v0.0.288 h1:X6kebUaVrM/fZt+XmeRw1mVJYkIrkyBdcbmjBs66gSc= github.com/kubescape/opa-utils v0.0.288 h1:X6kebUaVrM/fZt+XmeRw1mVJYkIrkyBdcbmjBs66gSc=
github.com/kubescape/opa-utils v0.0.288/go.mod h1:9ZmBd4xni0OLffuvcp4fKMmBo/glvgbwkCY5zggIKSw= github.com/kubescape/opa-utils v0.0.288/go.mod h1:9ZmBd4xni0OLffuvcp4fKMmBo/glvgbwkCY5zggIKSw=
github.com/kubescape/rbac-utils v0.0.21-0.20230806101615-07e36f555520 h1:SqlwF8G+oFazeYmZQKoPczLEflBQpwpHCU8DoLLyfj8= github.com/kubescape/rbac-utils v0.0.21-0.20230806101615-07e36f555520 h1:SqlwF8G+oFazeYmZQKoPczLEflBQpwpHCU8DoLLyfj8=