Compare commits

...

8 Commits

Author SHA1 Message Date
Alon Girmonsky
67006e2fc7 🔖 Bump the Helm chart version to 52.3.89 2024-11-10 15:04:27 -08:00
Alon Girmonsky
d0adbc357f if no scripting source folders, that's not an error 2024-11-06 11:34:44 -08:00
Volodymyr Stoiko
8e135d570b Remove pfring leftovers from ds (#1642)
Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-11-06 11:11:44 -08:00
Volodymyr Stoiko
f21f68a7e0 Fix frontend port (#1641)
Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-11-06 11:09:41 -08:00
Alon Girmonsky
5f13f7d28d Added an option to provide multiple script sources. (#1640) 2024-11-05 17:00:33 -08:00
Volodymyr Stoiko
80d23d62bd Remove PF_RING references (#1638)
* Remove PF_RING references

* Update values

---------

Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-11-05 14:13:50 -08:00
Volodymyr Stoiko
bba1bbd1fb Watch cm creation and sync scripts (#1637)
* Fix graceful shutdown

* add helpers

* Watch for configmap changes

---------

Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-11-05 13:35:17 -08:00
Volodymyr Stoiko
4a6628a3e8 Fix helm resource requests/limits templates (#1639) 2024-11-05 13:03:21 -08:00
16 changed files with 302 additions and 395 deletions

66
.gitignore vendored
View File

@@ -1,66 +0,0 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
.idea/
build
# Mac OS
.DS_Store
.vscode/
# Ignore the scripts that are created for development
*dev.*
# Environment variables
.env
# pprof
pprof/*
# Database Files
*.db
*.gob
# Nohup Files - https://man7.org/linux/man-pages/man1/nohup.1p.html
nohup.*
# Cypress tests
cypress.env.json
*/cypress/downloads
*/cypress/fixtures
*/cypress/plugins
*/cypress/screenshots
*/cypress/videos
*/cypress/support
# UI folders to ignore
**/node_modules/**
**/dist/**
*.editorconfig
# Ignore *.log files
*.log
# Object files
*.o
# Binaries
bin
# Scripts
scripts/
# CWD config YAML
kubeshark.yaml

BIN
bin/kubeshark__ Executable file

Binary file not shown.

1
bin/kubeshark__.sha256 Normal file
View File

@@ -0,0 +1 @@
1f82f0ead73917c529e84e1bde4aa706042a936f8e036a34e0c5eaa3e4740306 kubeshark__

View File

@@ -3,7 +3,12 @@ package cmd
import (
"context"
"encoding/json"
"errors"
"os"
"os/signal"
"strings"
"sync"
"time"
"github.com/creasty/defaults"
"github.com/fsnotify/fsnotify"
@@ -11,14 +16,16 @@ import (
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
)
var scriptsCmd = &cobra.Command{
Use: "scripts",
Short: "Watch the `scripting.source` directory for changes and update the scripts",
Short: "Watch the `scripting.source` and/or `scripting.sources` folders for changes and update the scripts",
RunE: func(cmd *cobra.Command, args []string) error {
runScripts()
return nil
@@ -39,8 +46,8 @@ func init() {
}
func runScripts() {
if config.Config.Scripting.Source == "" {
log.Error().Msg("`scripting.source` field is empty.")
if config.Config.Scripting.Source == "" && len(config.Config.Scripting.Sources) == 0 {
log.Error().Msg("Both `scripting.source` and `scripting.sources` fields are empty.")
return
}
@@ -50,39 +57,79 @@ func runScripts() {
return
}
watchScripts(kubernetesProvider, true)
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, os.Interrupt)
wg.Add(1)
go func() {
defer wg.Done()
watchConfigMap(ctx, kubernetesProvider)
}()
wg.Add(1)
go func() {
defer wg.Done()
watchScripts(ctx, kubernetesProvider, true)
}()
go func() {
<-signalChan
log.Debug().Msg("Received interrupt, stopping watchers.")
cancel()
}()
wg.Wait()
}
func createScript(provider *kubernetes.Provider, script misc.ConfigMapScript) (index int64, err error) {
const maxRetries = 5
var scripts map[int64]misc.ConfigMapScript
scripts, err = kubernetes.ConfigGetScripts(provider)
if err != nil {
return
}
script.Active = kubernetes.IsActiveScript(provider, script.Title)
index = int64(len(scripts))
if script.Title != "New Script" {
for i, v := range scripts {
if v.Title == script.Title {
index = int64(i)
for i := 0; i < maxRetries; i++ {
scripts, err = kubernetes.ConfigGetScripts(provider)
if err != nil {
return
}
script.Active = kubernetes.IsActiveScript(provider, script.Title)
index = int64(len(scripts))
if script.Title != "New Script" {
for i, v := range scripts {
if v.Title == script.Title {
index = int64(i)
}
}
}
}
scripts[index] = script
scripts[index] = script
log.Info().Str("title", script.Title).Bool("Active", script.Active).Int64("Index", index).Msg("Creating script")
var data []byte
data, err = json.Marshal(scripts)
if err != nil {
return
log.Info().Str("title", script.Title).Bool("Active", script.Active).Int64("Index", index).Msg("Creating script")
var data []byte
data, err = json.Marshal(scripts)
if err != nil {
return
}
_, err = kubernetes.SetConfig(provider, kubernetes.CONFIG_SCRIPTING_SCRIPTS, string(data))
if err == nil {
return index, nil
}
if k8serrors.IsConflict(err) {
log.Warn().Err(err).Msg("Conflict detected, retrying update...")
time.Sleep(500 * time.Millisecond)
continue
}
return 0, err
}
_, err = kubernetes.SetConfig(provider, kubernetes.CONFIG_SCRIPTING_SCRIPTS, string(data))
if err != nil {
return
}
return
log.Error().Msg("Max retries reached for creating script due to conflicts.")
return 0, errors.New("max retries reached due to conflicts while creating script")
}
func updateScript(provider *kubernetes.Provider, index int64, script misc.ConfigMapScript) (err error) {
@@ -134,7 +181,7 @@ func deleteScript(provider *kubernetes.Provider, index int64) (err error) {
return
}
func watchScripts(provider *kubernetes.Provider, block bool) {
func watchScripts(ctx context.Context, provider *kubernetes.Provider, block bool) {
files := make(map[string]int64)
scripts, err := config.Config.Scripting.GetScripts()
@@ -162,9 +209,31 @@ func watchScripts(provider *kubernetes.Provider, block bool) {
defer watcher.Close()
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, os.Interrupt)
go func() {
<-signalChan
log.Debug().Msg("Received interrupt, stopping script watch.")
cancel()
watcher.Close()
}()
if err := watcher.Add(config.Config.Scripting.Source); err != nil {
log.Error().Err(err).Msg("Failed to add scripting source to watcher")
return
}
go func() {
for {
select {
case <-ctx.Done():
log.Debug().Msg("Script watcher exiting gracefully.")
return
// watch for events
case event := <-watcher.Events:
if !strings.HasSuffix(event.Name, "js") {
@@ -213,9 +282,12 @@ func watchScripts(provider *kubernetes.Provider, block bool) {
// pass
}
// watch for errors
case err := <-watcher.Errors:
log.Error().Err(err).Send()
case err, ok := <-watcher.Errors:
if !ok {
log.Info().Msg("Watcher errors channel closed.")
return
}
log.Error().Err(err).Msg("Watcher error encountered")
}
}
}()
@@ -224,11 +296,79 @@ func watchScripts(provider *kubernetes.Provider, block bool) {
log.Error().Err(err).Send()
}
log.Info().Str("directory", config.Config.Scripting.Source).Msg("Watching scripts against changes:")
for _, source := range config.Config.Scripting.Sources {
if err := watcher.Add(source); err != nil {
log.Error().Err(err).Send()
}
}
log.Info().Str("folder", config.Config.Scripting.Source).Interface("folders", config.Config.Scripting.Sources).Msg("Watching scripts against changes:")
if block {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
utils.WaitForTermination(ctx, cancel)
<-ctx.Done()
}
}
func watchConfigMap(ctx context.Context, provider *kubernetes.Provider) {
clientset := provider.GetClientSet()
configMapName := kubernetes.SELF_RESOURCES_PREFIX + kubernetes.SUFFIX_CONFIG_MAP
for {
select {
case <-ctx.Done():
log.Info().Msg("ConfigMap watcher exiting gracefully.")
return
default:
watcher, err := clientset.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Watch(context.TODO(), metav1.ListOptions{
FieldSelector: "metadata.name=" + configMapName,
})
if err != nil {
log.Warn().Err(err).Msg("ConfigMap not found, retrying in 5 seconds...")
time.Sleep(5 * time.Second)
continue
}
for event := range watcher.ResultChan() {
select {
case <-ctx.Done():
log.Info().Msg("ConfigMap watcher loop exiting gracefully.")
watcher.Stop()
return
default:
if event.Type == watch.Added {
log.Info().Msg("ConfigMap created or modified")
runScriptsSync(provider)
} else if event.Type == watch.Deleted {
log.Warn().Msg("ConfigMap deleted, waiting for recreation...")
watcher.Stop()
break
}
}
}
time.Sleep(5 * time.Second)
}
}
}
func runScriptsSync(provider *kubernetes.Provider) {
files := make(map[string]int64)
scripts, err := config.Config.Scripting.GetScripts()
if err != nil {
log.Error().Err(err).Send()
return
}
for _, script := range scripts {
index, err := createScript(provider, script.ConfigMap())
if err != nil {
log.Error().Err(err).Send()
continue
}
files[script.Path] = index
}
log.Info().Msg("Synchronized scripts with ConfigMap.")
}

View File

@@ -424,8 +424,9 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid
time.Sleep(100 * time.Millisecond)
}
if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts {
watchScripts(kubernetesProvider, false)
if (config.Config.Scripting.Source != "" || len(config.Config.Scripting.Sources) > 0) && config.Config.Scripting.WatchScripts {
watchScripts(ctx, kubernetesProvider, false)
}
if config.Config.Scripting.Console {

View File

@@ -42,10 +42,6 @@ func CreateDefaultConfig() ConfigStruct {
// DAC_OVERRIDE is required to read /proc/PID/environ
"DAC_OVERRIDE",
},
KernelModule: []string{
// SYS_MODULE is required to install kernel modules
"SYS_MODULE",
},
EBPFCapture: []string{
// SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
"SYS_ADMIN",

View File

@@ -1,6 +1,7 @@
package configStructs
import (
"fmt"
"io/fs"
"os"
"path/filepath"
@@ -13,41 +14,79 @@ import (
type ScriptingConfig struct {
Env map[string]interface{} `yaml:"env" json:"env" default:"{}"`
Source string `yaml:"source" json:"source" default:""`
Sources []string `yaml:"sources" json:"sources" default:"[]"`
WatchScripts bool `yaml:"watchScripts" json:"watchScripts" default:"true"`
Active []string `yaml:"active" json:"active" default:"[]"`
Console bool `yaml:"console" json:"console" default:"true"`
}
func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error) {
if config.Source == "" {
return
// Check if both Source and Sources are empty
if config.Source == "" && len(config.Sources) == 0 {
return nil, nil
}
var files []fs.DirEntry
files, err = os.ReadDir(config.Source)
if err != nil {
return
var allFiles []struct {
Source string
File fs.DirEntry
}
for _, f := range files {
if f.IsDir() {
// Handle single Source directory
if config.Source != "" {
files, err := os.ReadDir(config.Source)
if err != nil {
return nil, fmt.Errorf("failed to read directory %s: %v", config.Source, err)
}
for _, file := range files {
allFiles = append(allFiles, struct {
Source string
File fs.DirEntry
}{Source: config.Source, File: file})
}
}
// Handle multiple Sources directories
if len(config.Sources) > 0 {
for _, source := range config.Sources {
files, err := os.ReadDir(source)
if err != nil {
return nil, fmt.Errorf("failed to read directory %s: %v", source, err)
}
for _, file := range files {
allFiles = append(allFiles, struct {
Source string
File fs.DirEntry
}{Source: source, File: file})
}
}
}
// Iterate over all collected files
for _, f := range allFiles {
if f.File.IsDir() {
continue
}
var script *misc.Script
path := filepath.Join(config.Source, f.Name())
if !strings.HasSuffix(path, ".js") {
// Construct the full path based on the relevant source directory
path := filepath.Join(f.Source, f.File.Name())
if !strings.HasSuffix(f.File.Name(), ".js") { // Use file name suffix for skipping non-JS files
log.Info().Str("path", path).Msg("Skipping non-JS file")
continue
}
// Read the script file
var script *misc.Script
script, err = misc.ReadScriptFile(path)
if err != nil {
return
return nil, fmt.Errorf("failed to read script file %s: %v", path, err)
}
// Append the valid script to the scripts slice
scripts = append(scripts, script)
log.Debug().Str("path", path).Msg("Found script:")
}
return
// Return the collected scripts and nil error if successful
return scripts, nil
}

View File

@@ -35,8 +35,8 @@ const (
PprofPortLabel = "pprof-port"
PprofViewLabel = "pprof-view"
DebugLabel = "debug"
ContainerPort = 80
ContainerPortStr = "80"
ContainerPort = 8080
ContainerPortStr = "8080"
PcapDest = "dest"
PcapMaxSize = "maxSize"
PcapMaxTime = "maxTime"
@@ -169,16 +169,9 @@ type SentryConfig struct {
type CapabilitiesConfig struct {
NetworkCapture []string `yaml:"networkCapture" json:"networkCapture" default:"[]"`
ServiceMeshCapture []string `yaml:"serviceMeshCapture" json:"serviceMeshCapture" default:"[]"`
KernelModule []string `yaml:"kernelModule" json:"kernelModule" default:"[]"`
EBPFCapture []string `yaml:"ebpfCapture" json:"ebpfCapture" default:"[]"`
}
type KernelModuleConfig struct {
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
Image string `yaml:"image" json:"image" default:"kubeshark/pf-ring-module:all"`
UnloadOnDestroy bool `yaml:"unloadOnDestroy" json:"unloadOnDestroy" default:"false"`
}
type MetricsConfig struct {
Port uint16 `yaml:"port" json:"port" default:"49100"`
}
@@ -238,7 +231,6 @@ type TapConfig struct {
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
IPv6 bool `yaml:"ipv6" json:"ipv6" default:"true"`
Debug bool `yaml:"debug" json:"debug" default:"false"`
KernelModule KernelModuleConfig `yaml:"kernelModule" json:"kernelModule"`
Telemetry TelemetryConfig `yaml:"telemetry" json:"telemetry"`
ResourceGuard ResourceGuardConfig `yaml:"resourceGuard" json:"resourceGuard"`
Sentry SentryConfig `yaml:"sentry" json:"sentry"`

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: kubeshark
version: "52.3.88"
version: "52.3.89"
description: The API Traffic Analyzer for Kubernetes
home: https://kubeshark.co
keywords:

View File

@@ -1,152 +0,0 @@
# PF_RING
<!-- TOC -->
- [PF\_RING](#pf_ring)
- [Overview](#overview)
- [Loading PF\_RING module on Kubernetes nodes](#loading-pf_ring-module-on-kubernetes-nodes)
- [Pre-built kernel module exists and external egress allowed](#pre-built-kernel-module-exists-and-external-egress-allowed)
- [Pre-built kernel module doesn't exist or external egress isn't allowed](#pre-built-kernel-module-doesnt-exist-or-external-egress-isnt-allowed)
- [Appendix A: PF\_RING kernel module compilation](#appendix-a-pf_ring-kernel-module-compilation)
- [Automated complilation](#automated-complilation)
- [Manual compilation](#manual-compilation)
<!-- /TOC -->
## Overview
PF_RING™ is an advanced Linux kernel module and user-space framework designed for high-speed packet processing. It offers a uniform API for packet processing applications, enabling efficient handling of large volumes of network data.
For comprehensive information on PF_RING™, please visit the [User's Guide]((https://www.ntop.org/guides/pf_ring) and access detailed [API Documentation](http://www.ntop.org/guides/pf_ring_api/files.html).
## Loading PF_RING module on Kubernetes nodes
PF_RING kernel module loading is performed via of the `worker` component pod.
The target container `tap.kernelModule.image` must contain `pf_ring.ko` file under path `/opt/lib/modules/<kernel version>/pf_ring.ko`.
Kubeshark provides ready to use containers with kernel modules for the most popular kernel versions running in different managed clouds.
Prior to deploying `kubeshark` with PF_RING enabled, it is essential to verify if a PF_RING kernel module is already built for your kernel version.
Kubeshark provides additional CLI tool for this purpose - [pf-ring-compiler](https://github.com/kubeshark/pf-ring-compiler).
Compatibility verification can be done by running:
```bash
pfring-compiler compatibility
```
This command checks for the availability of kernel modules for the kernel versions running across all nodes in the Kubernetes cluster.
Example output for a compatible cluster:
```bash
Node Kernel Version Supported
ip-192-168-77-230.us-west-2.compute.internal 5.10.199-190.747.amzn2.x86_64 true
ip-192-168-34-216.us-west-2.compute.internal 5.10.199-190.747.amzn2.x86_64 true
Cluster is compatible
```
Another option to verify availability of kernel modules is just inspecting available kernel module versions via:
```bash
curl https://api.kubeshark.co/kernel-modules/meta/versions.jso
```
Based on Kubernetes cluster compatibility and external connection capabilities, user has two options:
1. Use Kubeshark provided container `kubeshark/pf-ring-module`
2. Build custom container with required kernel module version.
### Pre-built kernel module exists and external egress allowed
In this case no additional configuration required.
Kubeshark will load PF_RING kernel module from the default `kubeshark/pf-ring-module:all` container.
### Pre-built kernel module doesn't exist or external egress isn't allowed
In this case building custom Docker image is required.
1. Compile PF_RING kernel module for target version
Skip if you have `pf_ring.ko` for the target kernel version.
Otherwise, follow [Appendix A](#appendix-a-pf_ring-kernel-module-compilation) for details.
2. Build container
The same build process Kubeshark has can be reused (follow [pfring-compilier](https://github.com/kubeshark/pf-ring-compiler/tree/main/modules) for details).
3. Configure Helm values
```yaml
tap:
kernelModule:
image: <container from stage 2>
```
## Appendix A: PF_RING kernel module compilation
PF_RING kernel module compilation can be completed automatically or manually.
### Automated complilation
In case your Kubernetes workers run supported Linux distribution, `kubeshark` CLI can be used to build PF_RING module:
```bash
pfring-compiler compile --target <distro>
```
This command requires:
- kubectl to be installed and configured with a proper context
- egress connection to Internet available
This command:
1. Runs Kubernetes job with build container
2. Waits for job to be completed
3. Downloads `pf-ring-<kernel version>.ko` file into the current folder.
4. Cleans up created job.
Currently supported distros:
- Ubuntu
- RHEL 9
- Amazon Linux 2
### Manual compilation
The process description is based on Ubuntu 22.04 distribution.
1. Get terminal access to the node with target kernel version
This can be done either via SSH directly to node or with debug container running on the target node:
```bash
kubectl debug node/<target node> -it --attach=true --image=ubuntu:22.04
```
2. Install build tools and kernel headers
```bash
apt update
apt install -y gcc build-essential make git wget tar gzip
apt install -y linux-headers-$(uname -r)
```
3. Download PF_RING source code
```bash
wget https://github.com/ntop/PF_RING/archive/refs/tags/8.4.0.tar.gz
tar -xf 8.4.0.tar.gz
cd PF_RING-8.4.0/kernel
```
4. Compile the kernel module
```bash
make KERNEL_SRC=/usr/src/linux-headers-$(uname -r)
```
5. Copy `pf_ring.ko` to the local file system.
Use `scp` or `kubectl cp` depending on type of access(SSH or debug pod).

View File

@@ -183,9 +183,6 @@ Example for overriding image names:
| `tap.ingress.annotations` | `Ingress` annotations | `{}` |
| `tap.ipv6` | Enable IPv6 support for the front-end | `true` |
| `tap.debug` | Enable debug mode | `false` |
| `tap.kernelModule.enabled` | Use PF_RING kernel module([details](PF_RING.md)) | `false` |
| `tap.kernelModule.image` | Container image containing PF_RING kernel module with supported kernel version([details](PF_RING.md)) | "kubeshark/pf-ring-module:all" |
| `tap.kernelModule.unloadOnDestroy` | Create additional container which watches for pod termination and unloads PF_RING kernel module. | `false`|
| `tap.telemetry.enabled` | Enable anonymous usage statistics collection | `true` |
| `tap.resourceGuard.enabled` | Enable resource guard worker process, which watches RAM/disk usage and enables/disables traffic capture based on available resources | `false` |
| `tap.sentry.enabled` | Enable sending of error logs to Sentry | `false` |

View File

@@ -81,17 +81,17 @@ spec:
port: 8080
resources:
limits:
{{ if ne .Values.tap.resources.hub.limits.cpu "0" }}
{{ if ne (toString .Values.tap.resources.hub.limits.cpu) "0" }}
cpu: {{ .Values.tap.resources.hub.limits.cpu }}
{{ end }}
{{ if ne .Values.tap.resources.hub.limits.memory "0" }}
{{ if ne (toString .Values.tap.resources.hub.limits.memory) "0" }}
memory: {{ .Values.tap.resources.hub.limits.memory }}
{{ end }}
requests:
{{ if ne .Values.tap.resources.hub.requests.cpu "0" }}
{{ if ne (toString .Values.tap.resources.hub.requests.cpu) "0" }}
cpu: {{ .Values.tap.resources.hub.requests.cpu }}
{{ end }}
{{ if ne .Values.tap.resources.hub.requests.memory "0" }}
{{ if ne (toString .Values.tap.resources.hub.requests.memor) "0" }}
memory: {{ .Values.tap.resources.hub.requests.memory }}
{{ end }}
volumeMounts:

View File

@@ -25,29 +25,6 @@ spec:
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
{{- if .Values.tap.kernelModule.enabled }}
initContainers:
- name: load-pf-ring
image: {{ .Values.tap.kernelModule.image }}
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
{{- if .Values.tap.docker.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.tap.docker.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
securityContext:
capabilities:
add:
{{- range .Values.tap.capabilities.kernelModule }}
{{ print "- " . }}
{{- end }}
drop:
- ALL
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
{{- end }}
containers:
- command:
- ./worker
@@ -67,9 +44,6 @@ spec:
{{- end }}
- -procfs
- /hostproc
{{- if .Values.tap.kernelModule.enabled }}
- -kernel-module
{{- end }}
{{- if ne .Values.tap.packetCapture "ebpf" }}
- -disable-ebpf
{{- end }}
@@ -125,17 +99,17 @@ spec:
value: '{{ .Values.tap.sentry.environment }}'
resources:
limits:
{{ if ne .Values.tap.resources.sniffer.limits.cpu "0" }}
{{ if ne (toString .Values.tap.resources.sniffer.limits.cpu) "0" }}
cpu: {{ .Values.tap.resources.sniffer.limits.cpu }}
{{ end }}
{{ if ne .Values.tap.resources.sniffer.limits.memory "0" }}
{{ if ne (toString .Values.tap.resources.sniffer.limits.memory) "0" }}
memory: {{ .Values.tap.resources.sniffer.limits.memory }}
{{ end }}
requests:
{{ if ne .Values.tap.resources.sniffer.requests.cpu "0" }}
{{ if ne (toString .Values.tap.resources.sniffer.requests.cpu) "0" }}
cpu: {{ .Values.tap.resources.sniffer.requests.cpu }}
{{ end }}
{{ if ne .Values.tap.resources.sniffer.requests.memory "0" }}
{{ if ne (toString .Values.tap.resources.sniffer.requests.memory) "0" }}
memory: {{ .Values.tap.resources.sniffer.requests.memory }}
{{ end }}
securityContext:
@@ -174,20 +148,6 @@ spec:
readOnly: true
- mountPath: /app/data
name: data
{{- if and (eq .Values.tap.kernelModule.enabled true) (eq .Values.tap.kernelModule.unloadOnDestroy true) }}
- name: unload-pf-ring
image: {{ .Values.tap.kernelModule.image }}
command: ["/bin/sh"]
args: ["-c", "trap 'rmmod pf_ring && sleep 3' SIGTERM; while true; do sleep 1; done"]
securityContext:
capabilities:
add:
{{- range .Values.tap.capabilities.kernelModule }}
{{ print "- " . }}
{{- end }}
drop:
- ALL
{{- end }}
{{- if .Values.tap.tls }}
- command:
- ./tracer
@@ -236,17 +196,17 @@ spec:
value: '{{ .Values.tap.sentry.environment }}'
resources:
limits:
{{ if ne .Values.tap.resources.tracer.limits.cpu "0" }}
{{ if ne (toString .Values.tap.resources.tracer.limits.cpu) "0" }}
cpu: {{ .Values.tap.resources.tracer.limits.cpu }}
{{ end }}
{{ if ne .Values.tap.resources.tracer.limits.memory "0" }}
{{ if ne (toString .Values.tap.resources.tracer.limits.memory) "0" }}
memory: {{ .Values.tap.resources.tracer.limits.memory }}
{{ end }}
requests:
{{ if ne .Values.tap.resources.tracer.requests.cpu "0" }}
{{ if ne (toString .Values.tap.resources.tracer.requests.cpu) "0" }}
cpu: {{ .Values.tap.resources.tracer.requests.cpu }}
{{ end }}
{{ if ne .Values.tap.resources.tracer.requests.memory "0" }}
{{ if ne (toString .Values.tap.resources.tracer.requests.memory) "0" }}
memory: {{ .Values.tap.resources.tracer.requests.memory }}
{{ end }}
securityContext:

View File

@@ -96,10 +96,6 @@ tap:
annotations: {}
ipv6: true
debug: false
kernelModule:
enabled: false
image: kubeshark/pf-ring-module:all
unloadOnDestroy: false
telemetry:
enabled: true
resourceGuard:
@@ -121,8 +117,6 @@ tap:
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
kernelModule:
- SYS_MODULE
ebpfCapture:
- SYS_ADMIN
- SYS_PTRACE
@@ -179,6 +173,7 @@ dissectorsUpdatingEnabled: true
scripting:
env: {}
source: ""
sources: []
watchScripts: true
active: []
console: true

View File

@@ -247,6 +247,10 @@ func (provider *Provider) GetNamespaces() (namespaces []string) {
return
}
func (provider *Provider) GetClientSet() *kubernetes.Clientset {
return provider.clientSet
}
func getClientSet(config *rest.Config) (*kubernetes.Clientset, error) {
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {

View File

@@ -4,10 +4,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub-network-policy
@@ -31,10 +31,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front-network-policy
@@ -58,10 +58,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-worker-network-policy
@@ -87,10 +87,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-service-account
@@ -104,10 +104,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
stringData:
LICENSE: ''
@@ -121,10 +121,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
stringData:
AUTH_SAML_X509_CRT: |
@@ -137,10 +137,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
stringData:
AUTH_SAML_X509_KEY: |
@@ -152,10 +152,10 @@ metadata:
name: kubeshark-nginx-config-map
namespace: default
labels:
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
data:
default.conf: |
@@ -216,10 +216,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
data:
POD_REGEX: '.*'
@@ -266,10 +266,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role-default
@@ -314,10 +314,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role-binding-default
@@ -336,10 +336,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-config-role
@@ -366,10 +366,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-config-role-binding
@@ -389,10 +389,10 @@ kind: Service
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub
@@ -411,10 +411,10 @@ apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front
@@ -433,10 +433,10 @@ kind: Service
apiVersion: v1
metadata:
labels:
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
prometheus.io/scrape: 'true'
@@ -446,10 +446,10 @@ metadata:
spec:
selector:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
ports:
- name: metrics
@@ -464,10 +464,10 @@ metadata:
labels:
app.kubeshark.co/app: worker
sidecar.istio.io/inject: "false"
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-worker-daemon-set
@@ -482,10 +482,10 @@ spec:
metadata:
labels:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
name: kubeshark-worker-daemon-set
namespace: kubeshark
@@ -510,7 +510,7 @@ spec:
- 'auto'
- -staletimeout
- '30'
image: 'docker.io/kubeshark/worker:v52.3.88'
image: 'docker.io/kubeshark/worker:v52.3.89'
imagePullPolicy: Always
name: sniffer
ports:
@@ -590,7 +590,7 @@ spec:
- /hostproc
- -disable-ebpf
- -disable-tls-log
image: 'docker.io/kubeshark/worker:v52.3.88'
image: 'docker.io/kubeshark/worker:v52.3.89'
imagePullPolicy: Always
name: tracer
env:
@@ -692,10 +692,10 @@ kind: Deployment
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub
@@ -711,10 +711,10 @@ spec:
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
spec:
dnsPolicy: ClusterFirstWithHostNet
@@ -742,7 +742,7 @@ spec:
value: 'https://api.kubeshark.co'
- name: PROFILING_ENABLED
value: 'false'
image: 'docker.io/kubeshark/hub:v52.3.88'
image: 'docker.io/kubeshark/hub:v52.3.89'
imagePullPolicy: Always
readinessProbe:
periodSeconds: 1
@@ -796,10 +796,10 @@ kind: Deployment
metadata:
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front
@@ -815,10 +815,10 @@ spec:
metadata:
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-52.3.88
helm.sh/chart: kubeshark-52.3.89
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/version: "52.3.89"
app.kubernetes.io/managed-by: Helm
spec:
containers:
@@ -853,7 +853,7 @@ spec:
value: 'false'
- name: REACT_APP_SENTRY_ENVIRONMENT
value: 'production'
image: 'docker.io/kubeshark/front:v52.3.88'
image: 'docker.io/kubeshark/front:v52.3.89'
imagePullPolicy: Always
name: kubeshark-front
livenessProbe: