mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-19 20:40:17 +00:00
Compare commits
26 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cd1d7e4a58 | ||
|
|
9b7e2e7144 | ||
|
|
80fa18cbba | ||
|
|
dfbb321084 | ||
|
|
d85dc58f20 | ||
|
|
993b8ae19e | ||
|
|
77f81c8ab3 | ||
|
|
a24b40a0c1 | ||
|
|
4817ed2a80 | ||
|
|
d66ec06928 | ||
|
|
125e3abe6c | ||
|
|
8221c4ef10 | ||
|
|
7f216b2958 | ||
|
|
67006e2fc7 | ||
|
|
d0adbc357f | ||
|
|
8e135d570b | ||
|
|
f21f68a7e0 | ||
|
|
5f13f7d28d | ||
|
|
80d23d62bd | ||
|
|
bba1bbd1fb | ||
|
|
4a6628a3e8 | ||
|
|
bec0b25daa | ||
|
|
9248f07af0 | ||
|
|
a1e05db4b0 | ||
|
|
b3f6fdc831 | ||
|
|
e0c010eb29 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -63,4 +63,4 @@ bin
|
||||
scripts/
|
||||
|
||||
# CWD config YAML
|
||||
kubeshark.yaml
|
||||
kubeshark.yaml
|
||||
12
Makefile
12
Makefile
@@ -189,6 +189,18 @@ release:
|
||||
@cd ../../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
|
||||
@cd ../kubeshark
|
||||
|
||||
soft-release:
|
||||
@cd ../worker && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../tracer && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../hub && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../kubeshark && git checkout master && git pull && sed -i 's/^version:.*/version: "$(VERSION)"/' helm-chart/Chart.yaml && make && make generate-helm-values && make generate-manifests
|
||||
@git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push
|
||||
# @git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
# @cd helm-chart && cp -r . ../../kubeshark.github.io/charts/chart
|
||||
# @cd ../../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
|
||||
# @cd ../kubeshark
|
||||
|
||||
branch:
|
||||
@cd ../worker && git checkout master && git pull && git checkout -b $(name); git push --set-upstream origin $(name)
|
||||
@cd ../hub && git checkout master && git pull && git checkout -b $(name); git push --set-upstream origin $(name)
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/internal/connect"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var exportCmd = &cobra.Command{
|
||||
Use: "export",
|
||||
Short: "Exports the captured traffic into a TAR file that contains PCAP files",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runExport()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(exportCmd)
|
||||
|
||||
defaultTapConfig := configStructs.TapConfig{}
|
||||
if err := defaults.Set(&defaultTapConfig); err != nil {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
exportCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark")
|
||||
exportCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark")
|
||||
exportCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
}
|
||||
|
||||
func runExport() {
|
||||
hubUrl := kubernetes.GetHubUrl()
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err != nil || response.StatusCode != 200 {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
|
||||
runProxy(false, true)
|
||||
}
|
||||
|
||||
dstPath, err := filepath.Abs(fmt.Sprintf("./%d.tar.gz", time.Now().Unix()))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
out, err := os.Create(dstPath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
connector := connect.NewConnector(kubernetes.GetHubUrl(), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
connector.PostPcapsMerge(out)
|
||||
}
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
// pcapDumpCmd represents the consolidated pcapdump command
|
||||
var pcapDumpCmd = &cobra.Command{
|
||||
Use: "pcapdump",
|
||||
Short: "Manage PCAP dump operations: start, stop, or copy PCAP files",
|
||||
Short: "Store all captured traffic (including decrypted TLS) in a PCAP file.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Retrieve the kubeconfig path from the flag
|
||||
kubeconfig, _ := cmd.Flags().GetString(configStructs.PcapKubeconfig)
|
||||
|
||||
215
cmd/scripts.go
215
cmd/scripts.go
@@ -3,7 +3,12 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
@@ -11,14 +16,16 @@ import (
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
var scriptsCmd = &cobra.Command{
|
||||
Use: "scripts",
|
||||
Short: "Watch the `scripting.source` directory for changes and update the scripts",
|
||||
Short: "Watch the `scripting.source` and/or `scripting.sources` folders for changes and update the scripts",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runScripts()
|
||||
return nil
|
||||
@@ -39,8 +46,8 @@ func init() {
|
||||
}
|
||||
|
||||
func runScripts() {
|
||||
if config.Config.Scripting.Source == "" {
|
||||
log.Error().Msg("`scripting.source` field is empty.")
|
||||
if config.Config.Scripting.Source == "" && len(config.Config.Scripting.Sources) == 0 {
|
||||
log.Error().Msg("Both `scripting.source` and `scripting.sources` fields are empty.")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -50,39 +57,82 @@ func runScripts() {
|
||||
return
|
||||
}
|
||||
|
||||
watchScripts(kubernetesProvider, true)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
watchConfigMap(ctx, kubernetesProvider)
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
watchScripts(ctx, kubernetesProvider, true)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
<-signalChan
|
||||
log.Debug().Msg("Received interrupt, stopping watchers.")
|
||||
cancel()
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
}
|
||||
|
||||
func createScript(provider *kubernetes.Provider, script misc.ConfigMapScript) (index int64, err error) {
|
||||
const maxRetries = 5
|
||||
var scripts map[int64]misc.ConfigMapScript
|
||||
scripts, err = kubernetes.ConfigGetScripts(provider)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
script.Active = kubernetes.IsActiveScript(provider, script.Title)
|
||||
index = int64(len(scripts))
|
||||
if script.Title != "New Script" {
|
||||
for i, v := range scripts {
|
||||
if v.Title == script.Title {
|
||||
index = int64(i)
|
||||
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
scripts, err = kubernetes.ConfigGetScripts(provider)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
script.Active = kubernetes.IsActiveScript(provider, script.Title)
|
||||
index = 0
|
||||
if script.Title != "New Script" {
|
||||
for i, v := range scripts {
|
||||
if index <= i {
|
||||
index = i + 1
|
||||
}
|
||||
if v.Title == script.Title {
|
||||
index = int64(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
scripts[index] = script
|
||||
scripts[index] = script
|
||||
|
||||
log.Info().Str("title", script.Title).Bool("Active", script.Active).Int64("Index", index).Msg("Creating script")
|
||||
var data []byte
|
||||
data, err = json.Marshal(scripts)
|
||||
if err != nil {
|
||||
return
|
||||
log.Info().Str("title", script.Title).Bool("Active", script.Active).Int64("Index", index).Msg("Creating script")
|
||||
var data []byte
|
||||
data, err = json.Marshal(scripts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = kubernetes.SetConfig(provider, kubernetes.CONFIG_SCRIPTING_SCRIPTS, string(data))
|
||||
if err == nil {
|
||||
return index, nil
|
||||
}
|
||||
|
||||
if k8serrors.IsConflict(err) {
|
||||
log.Warn().Err(err).Msg("Conflict detected, retrying update...")
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
_, err = kubernetes.SetConfig(provider, kubernetes.CONFIG_SCRIPTING_SCRIPTS, string(data))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
log.Error().Msg("Max retries reached for creating script due to conflicts.")
|
||||
return 0, errors.New("max retries reached due to conflicts while creating script")
|
||||
}
|
||||
|
||||
func updateScript(provider *kubernetes.Provider, index int64, script misc.ConfigMapScript) (err error) {
|
||||
@@ -134,7 +184,7 @@ func deleteScript(provider *kubernetes.Provider, index int64) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func watchScripts(provider *kubernetes.Provider, block bool) {
|
||||
func watchScripts(ctx context.Context, provider *kubernetes.Provider, block bool) {
|
||||
files := make(map[string]int64)
|
||||
|
||||
scripts, err := config.Config.Scripting.GetScripts()
|
||||
@@ -162,9 +212,31 @@ func watchScripts(provider *kubernetes.Provider, block bool) {
|
||||
defer watcher.Close()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
|
||||
go func() {
|
||||
<-signalChan
|
||||
log.Debug().Msg("Received interrupt, stopping script watch.")
|
||||
cancel()
|
||||
watcher.Close()
|
||||
}()
|
||||
|
||||
if err := watcher.Add(config.Config.Scripting.Source); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to add scripting source to watcher")
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Debug().Msg("Script watcher exiting gracefully.")
|
||||
return
|
||||
|
||||
// watch for events
|
||||
case event := <-watcher.Events:
|
||||
if !strings.HasSuffix(event.Name, "js") {
|
||||
@@ -213,9 +285,12 @@ func watchScripts(provider *kubernetes.Provider, block bool) {
|
||||
// pass
|
||||
}
|
||||
|
||||
// watch for errors
|
||||
case err := <-watcher.Errors:
|
||||
log.Error().Err(err).Send()
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
log.Info().Msg("Watcher errors channel closed.")
|
||||
return
|
||||
}
|
||||
log.Error().Err(err).Msg("Watcher error encountered")
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -224,11 +299,79 @@ func watchScripts(provider *kubernetes.Provider, block bool) {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
|
||||
log.Info().Str("directory", config.Config.Scripting.Source).Msg("Watching scripts against changes:")
|
||||
for _, source := range config.Config.Scripting.Sources {
|
||||
if err := watcher.Add(source); err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().Str("folder", config.Config.Scripting.Source).Interface("folders", config.Config.Scripting.Sources).Msg("Watching scripts against changes:")
|
||||
|
||||
if block {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
utils.WaitForTermination(ctx, cancel)
|
||||
<-ctx.Done()
|
||||
}
|
||||
}
|
||||
|
||||
func watchConfigMap(ctx context.Context, provider *kubernetes.Provider) {
|
||||
clientset := provider.GetClientSet()
|
||||
configMapName := kubernetes.SELF_RESOURCES_PREFIX + kubernetes.SUFFIX_CONFIG_MAP
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Info().Msg("ConfigMap watcher exiting gracefully.")
|
||||
return
|
||||
|
||||
default:
|
||||
watcher, err := clientset.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Watch(context.TODO(), metav1.ListOptions{
|
||||
FieldSelector: "metadata.name=" + configMapName,
|
||||
})
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("ConfigMap not found, retrying in 5 seconds...")
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
for event := range watcher.ResultChan() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Info().Msg("ConfigMap watcher loop exiting gracefully.")
|
||||
watcher.Stop()
|
||||
return
|
||||
|
||||
default:
|
||||
if event.Type == watch.Added {
|
||||
log.Info().Msg("ConfigMap created or modified")
|
||||
runScriptsSync(provider)
|
||||
} else if event.Type == watch.Deleted {
|
||||
log.Warn().Msg("ConfigMap deleted, waiting for recreation...")
|
||||
watcher.Stop()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runScriptsSync(provider *kubernetes.Provider) {
|
||||
files := make(map[string]int64)
|
||||
|
||||
scripts, err := config.Config.Scripting.GetScripts()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
for _, script := range scripts {
|
||||
index, err := createScript(provider, script.ConfigMap())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
files[script.Path] = index
|
||||
}
|
||||
log.Info().Msg("Synchronized scripts with ConfigMap.")
|
||||
}
|
||||
|
||||
@@ -424,8 +424,9 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts {
|
||||
watchScripts(kubernetesProvider, false)
|
||||
|
||||
if (config.Config.Scripting.Source != "" || len(config.Config.Scripting.Sources) > 0) && config.Config.Scripting.WatchScripts {
|
||||
watchScripts(ctx, kubernetesProvider, false)
|
||||
}
|
||||
|
||||
if config.Config.Scripting.Console {
|
||||
|
||||
@@ -42,10 +42,6 @@ func CreateDefaultConfig() ConfigStruct {
|
||||
// DAC_OVERRIDE is required to read /proc/PID/environ
|
||||
"DAC_OVERRIDE",
|
||||
},
|
||||
KernelModule: []string{
|
||||
// SYS_MODULE is required to install kernel modules
|
||||
"SYS_MODULE",
|
||||
},
|
||||
EBPFCapture: []string{
|
||||
// SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
|
||||
"SYS_ADMIN",
|
||||
@@ -85,7 +81,8 @@ func CreateDefaultConfig() ConfigStruct {
|
||||
// "tcp",
|
||||
// "udp",
|
||||
"ws",
|
||||
"tls",
|
||||
// "tlsx",
|
||||
"ldap",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package configStructs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -13,41 +14,79 @@ import (
|
||||
type ScriptingConfig struct {
|
||||
Env map[string]interface{} `yaml:"env" json:"env" default:"{}"`
|
||||
Source string `yaml:"source" json:"source" default:""`
|
||||
Sources []string `yaml:"sources" json:"sources" default:"[]"`
|
||||
WatchScripts bool `yaml:"watchScripts" json:"watchScripts" default:"true"`
|
||||
Active []string `yaml:"active" json:"active" default:"[]"`
|
||||
Console bool `yaml:"console" json:"console" default:"true"`
|
||||
}
|
||||
|
||||
func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error) {
|
||||
if config.Source == "" {
|
||||
return
|
||||
// Check if both Source and Sources are empty
|
||||
if config.Source == "" && len(config.Sources) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var files []fs.DirEntry
|
||||
files, err = os.ReadDir(config.Source)
|
||||
if err != nil {
|
||||
return
|
||||
var allFiles []struct {
|
||||
Source string
|
||||
File fs.DirEntry
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
// Handle single Source directory
|
||||
if config.Source != "" {
|
||||
files, err := os.ReadDir(config.Source)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read directory %s: %v", config.Source, err)
|
||||
}
|
||||
for _, file := range files {
|
||||
allFiles = append(allFiles, struct {
|
||||
Source string
|
||||
File fs.DirEntry
|
||||
}{Source: config.Source, File: file})
|
||||
}
|
||||
}
|
||||
|
||||
// Handle multiple Sources directories
|
||||
if len(config.Sources) > 0 {
|
||||
for _, source := range config.Sources {
|
||||
files, err := os.ReadDir(source)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read directory %s: %v", source, err)
|
||||
}
|
||||
for _, file := range files {
|
||||
allFiles = append(allFiles, struct {
|
||||
Source string
|
||||
File fs.DirEntry
|
||||
}{Source: source, File: file})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over all collected files
|
||||
for _, f := range allFiles {
|
||||
if f.File.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
var script *misc.Script
|
||||
path := filepath.Join(config.Source, f.Name())
|
||||
if !strings.HasSuffix(path, ".js") {
|
||||
// Construct the full path based on the relevant source directory
|
||||
path := filepath.Join(f.Source, f.File.Name())
|
||||
if !strings.HasSuffix(f.File.Name(), ".js") { // Use file name suffix for skipping non-JS files
|
||||
log.Info().Str("path", path).Msg("Skipping non-JS file")
|
||||
continue
|
||||
}
|
||||
|
||||
// Read the script file
|
||||
var script *misc.Script
|
||||
script, err = misc.ReadScriptFile(path)
|
||||
if err != nil {
|
||||
return
|
||||
return nil, fmt.Errorf("failed to read script file %s: %v", path, err)
|
||||
}
|
||||
|
||||
// Append the valid script to the scripts slice
|
||||
scripts = append(scripts, script)
|
||||
|
||||
log.Debug().Str("path", path).Msg("Found script:")
|
||||
}
|
||||
|
||||
return
|
||||
// Return the collected scripts and nil error if successful
|
||||
return scripts, nil
|
||||
}
|
||||
|
||||
@@ -35,8 +35,8 @@ const (
|
||||
PprofPortLabel = "pprof-port"
|
||||
PprofViewLabel = "pprof-view"
|
||||
DebugLabel = "debug"
|
||||
ContainerPort = 80
|
||||
ContainerPortStr = "80"
|
||||
ContainerPort = 8080
|
||||
ContainerPortStr = "8080"
|
||||
PcapDest = "dest"
|
||||
PcapMaxSize = "maxSize"
|
||||
PcapMaxTime = "maxTime"
|
||||
@@ -46,17 +46,17 @@ const (
|
||||
)
|
||||
|
||||
type ResourceLimitsHub struct {
|
||||
CPU string `yaml:"cpu" json:"cpu" default:""`
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"0"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"5Gi"`
|
||||
}
|
||||
|
||||
type ResourceLimitsWorker struct {
|
||||
CPU string `yaml:"cpu" json:"cpu" default:""`
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"0"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"3Gi"`
|
||||
}
|
||||
|
||||
type ResourceRequests struct {
|
||||
CPU string `yaml:"cpu" json:"cpu" default:""`
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"50m"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"50Mi"`
|
||||
}
|
||||
|
||||
@@ -89,6 +89,11 @@ type ProxyConfig struct {
|
||||
Host string `yaml:"host" json:"host" default:"127.0.0.1"`
|
||||
}
|
||||
|
||||
type OverrideImageConfig struct {
|
||||
Worker string `yaml:"worker" json:"worker"`
|
||||
Hub string `yaml:"hub" json:"hub"`
|
||||
Front string `yaml:"front" json:"front"`
|
||||
}
|
||||
type OverrideTagConfig struct {
|
||||
Worker string `yaml:"worker" json:"worker"`
|
||||
Hub string `yaml:"hub" json:"hub"`
|
||||
@@ -96,12 +101,13 @@ type OverrideTagConfig struct {
|
||||
}
|
||||
|
||||
type DockerConfig struct {
|
||||
Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"`
|
||||
Tag string `yaml:"tag" json:"tag" default:""`
|
||||
TagLocked bool `yaml:"tagLocked" json:"tagLocked" default:"true"`
|
||||
ImagePullPolicy string `yaml:"imagePullPolicy" json:"imagePullPolicy" default:"Always"`
|
||||
ImagePullSecrets []string `yaml:"imagePullSecrets" json:"imagePullSecrets"`
|
||||
OverrideTag OverrideTagConfig `yaml:"overrideTag" json:"overrideTag"`
|
||||
Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"`
|
||||
Tag string `yaml:"tag" json:"tag" default:""`
|
||||
TagLocked bool `yaml:"tagLocked" json:"tagLocked" default:"true"`
|
||||
ImagePullPolicy string `yaml:"imagePullPolicy" json:"imagePullPolicy" default:"Always"`
|
||||
ImagePullSecrets []string `yaml:"imagePullSecrets" json:"imagePullSecrets"`
|
||||
OverrideImage OverrideImageConfig `yaml:"overrideImage" json:"overrideImage"`
|
||||
OverrideTag OverrideTagConfig `yaml:"overrideTag" json:"overrideTag"`
|
||||
}
|
||||
|
||||
type ResourcesConfig struct {
|
||||
@@ -163,16 +169,9 @@ type SentryConfig struct {
|
||||
type CapabilitiesConfig struct {
|
||||
NetworkCapture []string `yaml:"networkCapture" json:"networkCapture" default:"[]"`
|
||||
ServiceMeshCapture []string `yaml:"serviceMeshCapture" json:"serviceMeshCapture" default:"[]"`
|
||||
KernelModule []string `yaml:"kernelModule" json:"kernelModule" default:"[]"`
|
||||
EBPFCapture []string `yaml:"ebpfCapture" json:"ebpfCapture" default:"[]"`
|
||||
}
|
||||
|
||||
type KernelModuleConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
Image string `yaml:"image" json:"image" default:"kubeshark/pf-ring-module:all"`
|
||||
UnloadOnDestroy bool `yaml:"unloadOnDestroy" json:"unloadOnDestroy" default:"false"`
|
||||
}
|
||||
|
||||
type MetricsConfig struct {
|
||||
Port uint16 `yaml:"port" json:"port" default:"49100"`
|
||||
}
|
||||
@@ -232,11 +231,10 @@ type TapConfig struct {
|
||||
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
|
||||
IPv6 bool `yaml:"ipv6" json:"ipv6" default:"true"`
|
||||
Debug bool `yaml:"debug" json:"debug" default:"false"`
|
||||
KernelModule KernelModuleConfig `yaml:"kernelModule" json:"kernelModule"`
|
||||
Telemetry TelemetryConfig `yaml:"telemetry" json:"telemetry"`
|
||||
ResourceGuard ResourceGuardConfig `yaml:"resourceGuard" json:"resourceGuard"`
|
||||
Sentry SentryConfig `yaml:"sentry" json:"sentry"`
|
||||
DefaultFilter string `yaml:"defaultFilter" json:"defaultFilter" default:"!dns and !tcp and !udp and !icmp"`
|
||||
DefaultFilter string `yaml:"defaultFilter" json:"defaultFilter" default:"!dns and !error"`
|
||||
ScriptingDisabled bool `yaml:"scriptingDisabled" json:"scriptingDisabled" default:"false"`
|
||||
TargetedPodsUpdateDisabled bool `yaml:"targetedPodsUpdateDisabled" json:"targetedPodsUpdateDisabled" default:"false"`
|
||||
PresetFiltersChangingEnabled bool `yaml:"presetFiltersChangingEnabled" json:"presetFiltersChangingEnabled" default:"true"`
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v2
|
||||
name: kubeshark
|
||||
version: "52.3.86"
|
||||
version: "52.3.92"
|
||||
description: The API Traffic Analyzer for Kubernetes
|
||||
home: https://kubeshark.co
|
||||
keywords:
|
||||
|
||||
@@ -1,152 +0,0 @@
|
||||
# PF_RING
|
||||
|
||||
<!-- TOC -->
|
||||
|
||||
- [PF\_RING](#pf_ring)
|
||||
- [Overview](#overview)
|
||||
- [Loading PF\_RING module on Kubernetes nodes](#loading-pf_ring-module-on-kubernetes-nodes)
|
||||
- [Pre-built kernel module exists and external egress allowed](#pre-built-kernel-module-exists-and-external-egress-allowed)
|
||||
- [Pre-built kernel module doesn't exist or external egress isn't allowed](#pre-built-kernel-module-doesnt-exist-or-external-egress-isnt-allowed)
|
||||
- [Appendix A: PF\_RING kernel module compilation](#appendix-a-pf_ring-kernel-module-compilation)
|
||||
- [Automated complilation](#automated-complilation)
|
||||
- [Manual compilation](#manual-compilation)
|
||||
|
||||
<!-- /TOC -->
|
||||
|
||||
## Overview
|
||||
|
||||
PF_RING™ is an advanced Linux kernel module and user-space framework designed for high-speed packet processing. It offers a uniform API for packet processing applications, enabling efficient handling of large volumes of network data.
|
||||
|
||||
For comprehensive information on PF_RING™, please visit the [User's Guide]((https://www.ntop.org/guides/pf_ring) and access detailed [API Documentation](http://www.ntop.org/guides/pf_ring_api/files.html).
|
||||
|
||||
## Loading PF_RING module on Kubernetes nodes
|
||||
|
||||
PF_RING kernel module loading is performed via of the `worker` component pod.
|
||||
The target container `tap.kernelModule.image` must contain `pf_ring.ko` file under path `/opt/lib/modules/<kernel version>/pf_ring.ko`.
|
||||
Kubeshark provides ready to use containers with kernel modules for the most popular kernel versions running in different managed clouds.
|
||||
|
||||
Prior to deploying `kubeshark` with PF_RING enabled, it is essential to verify if a PF_RING kernel module is already built for your kernel version.
|
||||
Kubeshark provides additional CLI tool for this purpose - [pf-ring-compiler](https://github.com/kubeshark/pf-ring-compiler).
|
||||
|
||||
Compatibility verification can be done by running:
|
||||
|
||||
```bash
|
||||
pfring-compiler compatibility
|
||||
```
|
||||
|
||||
This command checks for the availability of kernel modules for the kernel versions running across all nodes in the Kubernetes cluster.
|
||||
|
||||
Example output for a compatible cluster:
|
||||
|
||||
```bash
|
||||
Node Kernel Version Supported
|
||||
ip-192-168-77-230.us-west-2.compute.internal 5.10.199-190.747.amzn2.x86_64 true
|
||||
ip-192-168-34-216.us-west-2.compute.internal 5.10.199-190.747.amzn2.x86_64 true
|
||||
|
||||
Cluster is compatible
|
||||
```
|
||||
|
||||
Another option to verify availability of kernel modules is just inspecting available kernel module versions via:
|
||||
|
||||
```bash
|
||||
curl https://api.kubeshark.co/kernel-modules/meta/versions.jso
|
||||
```
|
||||
|
||||
Based on Kubernetes cluster compatibility and external connection capabilities, user has two options:
|
||||
|
||||
1. Use Kubeshark provided container `kubeshark/pf-ring-module`
|
||||
2. Build custom container with required kernel module version.
|
||||
|
||||
### Pre-built kernel module exists and external egress allowed
|
||||
|
||||
In this case no additional configuration required.
|
||||
Kubeshark will load PF_RING kernel module from the default `kubeshark/pf-ring-module:all` container.
|
||||
|
||||
### Pre-built kernel module doesn't exist or external egress isn't allowed
|
||||
|
||||
In this case building custom Docker image is required.
|
||||
|
||||
1. Compile PF_RING kernel module for target version
|
||||
|
||||
Skip if you have `pf_ring.ko` for the target kernel version.
|
||||
Otherwise, follow [Appendix A](#appendix-a-pf_ring-kernel-module-compilation) for details.
|
||||
|
||||
2. Build container
|
||||
|
||||
The same build process Kubeshark has can be reused (follow [pfring-compilier](https://github.com/kubeshark/pf-ring-compiler/tree/main/modules) for details).
|
||||
|
||||
3. Configure Helm values
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
kernelModule:
|
||||
image: <container from stage 2>
|
||||
```
|
||||
|
||||
|
||||
## Appendix A: PF_RING kernel module compilation
|
||||
|
||||
PF_RING kernel module compilation can be completed automatically or manually.
|
||||
|
||||
### Automated complilation
|
||||
|
||||
In case your Kubernetes workers run supported Linux distribution, `kubeshark` CLI can be used to build PF_RING module:
|
||||
|
||||
```bash
|
||||
pfring-compiler compile --target <distro>
|
||||
```
|
||||
|
||||
This command requires:
|
||||
|
||||
- kubectl to be installed and configured with a proper context
|
||||
- egress connection to Internet available
|
||||
|
||||
This command:
|
||||
|
||||
1. Runs Kubernetes job with build container
|
||||
2. Waits for job to be completed
|
||||
3. Downloads `pf-ring-<kernel version>.ko` file into the current folder.
|
||||
4. Cleans up created job.
|
||||
|
||||
Currently supported distros:
|
||||
|
||||
- Ubuntu
|
||||
- RHEL 9
|
||||
- Amazon Linux 2
|
||||
|
||||
### Manual compilation
|
||||
|
||||
The process description is based on Ubuntu 22.04 distribution.
|
||||
|
||||
1. Get terminal access to the node with target kernel version
|
||||
This can be done either via SSH directly to node or with debug container running on the target node:
|
||||
|
||||
```bash
|
||||
kubectl debug node/<target node> -it --attach=true --image=ubuntu:22.04
|
||||
```
|
||||
|
||||
2. Install build tools and kernel headers
|
||||
|
||||
```bash
|
||||
apt update
|
||||
apt install -y gcc build-essential make git wget tar gzip
|
||||
apt install -y linux-headers-$(uname -r)
|
||||
```
|
||||
|
||||
3. Download PF_RING source code
|
||||
|
||||
```bash
|
||||
wget https://github.com/ntop/PF_RING/archive/refs/tags/8.4.0.tar.gz
|
||||
tar -xf 8.4.0.tar.gz
|
||||
cd PF_RING-8.4.0/kernel
|
||||
```
|
||||
|
||||
4. Compile the kernel module
|
||||
|
||||
```bash
|
||||
make KERNEL_SRC=/usr/src/linux-headers-$(uname -r)
|
||||
```
|
||||
|
||||
5. Copy `pf_ring.ko` to the local file system.
|
||||
|
||||
Use `scp` or `kubectl cp` depending on type of access(SSH or debug pod).
|
||||
@@ -104,6 +104,20 @@ helm install kubeshark kubeshark/kubeshark \
|
||||
|
||||
Please refer to [metrics](./metrics.md) documentation for details.
|
||||
|
||||
## Override Tag, Tags, Images
|
||||
|
||||
In addition to using a private registry, you can further override the images' tag, specific image tags and specific image names.
|
||||
|
||||
Example for overriding image names:
|
||||
|
||||
```yaml
|
||||
docker:
|
||||
overrideImage:
|
||||
worker: docker.io/kubeshark/worker:v52.3.87
|
||||
front: docker.io/kubeshark/front:v52.3.87
|
||||
hub: docker.io/kubeshark/hub:v52.3.87
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
| Parameter | Description | Default |
|
||||
@@ -114,7 +128,8 @@ Please refer to [metrics](./metrics.md) documentation for details.
|
||||
| `tap.docker.tagLocked` | If `false` - use latest minor tag | `true` |
|
||||
| `tap.docker.imagePullPolicy` | Kubernetes image pull policy | `Always` |
|
||||
| `tap.docker.imagePullSecrets` | Kubernetes secrets to pull the images | `[]` |
|
||||
| `tap.docker.overrideTag` | DANGER: Used to override specific images, when testing custom features from the Kubeshark team | `""` |
|
||||
| `tap.docker.overrideImage` | Can be used to directly override image names | `""` |
|
||||
| `tap.docker.overrideTag` | Can be used to override image tags | `""` |
|
||||
| `tap.proxy.hub.srvPort` | Hub server port. Change if already occupied. | `8898` |
|
||||
| `tap.proxy.worker.srvPort` | Worker server port. Change if already occupied.| `30001` |
|
||||
| `tap.proxy.front.port` | Front service port. Change if already occupied.| `8899` |
|
||||
@@ -168,17 +183,14 @@ Please refer to [metrics](./metrics.md) documentation for details.
|
||||
| `tap.ingress.annotations` | `Ingress` annotations | `{}` |
|
||||
| `tap.ipv6` | Enable IPv6 support for the front-end | `true` |
|
||||
| `tap.debug` | Enable debug mode | `false` |
|
||||
| `tap.kernelModule.enabled` | Use PF_RING kernel module([details](PF_RING.md)) | `false` |
|
||||
| `tap.kernelModule.image` | Container image containing PF_RING kernel module with supported kernel version([details](PF_RING.md)) | "kubeshark/pf-ring-module:all" |
|
||||
| `tap.kernelModule.unloadOnDestroy` | Create additional container which watches for pod termination and unloads PF_RING kernel module. | `false`|
|
||||
| `tap.telemetry.enabled` | Enable anonymous usage statistics collection | `true` |
|
||||
| `tap.resourceGuard.enabled` | Enable resource guard worker process, which watches RAM/disk usage and enables/disables traffic capture based on available resources | `false` |
|
||||
| `tap.sentry.enabled` | Enable sending of error logs to Sentry | `false` |
|
||||
| `tap.sentry.environment` | Sentry environment to label error logs with | `production` |
|
||||
| `tap.defaultFilter` | Sets the default dashboard KFL filter (e.g. `http`). By default, this value is set to filter out noisy protocols such as DNS, UDP, ICMP and TCP. The user can easily change this in the Dashboard. You can also change this value to change this behavior. | `"!dns and !tcp and !udp and !icmp"` |
|
||||
| `tap.defaultFilter` | Sets the default dashboard KFL filter (e.g. `http`). By default, this value is set to filter out noisy protocols such as DNS, UDP, ICMP and TCP. The user can easily change this, **temporarily**, in the Dashboard. For a permanent change, you should change this value in the `values.yaml` or `config.yaml` file. | `"!dns and !error"` |
|
||||
| `tap.globalFilter` | Prepends to any KFL filter and can be used to limit what is visible in the dashboard. For example, `redact("request.headers.Authorization")` will redact the appropriate field. Another example `!dns` will not show any DNS traffic. | `""` |
|
||||
| `tap.metrics.port` | Pod port used to expose Prometheus metrics | `49100` |
|
||||
| `tap.enabledDissectors` | This is an array of strings representing the list of supported protocols. Remove or comment out redundant protocols (e.g., dns).| The default list excludes: `dns` and `tcp` |
|
||||
| `tap.enabledDissectors` | This is an array of strings representing the list of supported protocols. Remove or comment out redundant protocols (e.g., dns).| The default list excludes: `udp` and `tcp` |
|
||||
| `logs.file` | Logs dump path | `""` |
|
||||
| `pcapdump.enabled` | Enable recording of all traffic captured according to other parameters. Whatever Kubeshark captures, considering pod targeting rules, will be stored in pcap files ready to be viewed by tools | `true` |
|
||||
| `pcapdump.maxTime` | The time window into the past that will be stored. Older traffic will be discarded. | `2h` |
|
||||
|
||||
@@ -31,8 +31,8 @@ rules:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
resourceNames:
|
||||
- kube-system
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
|
||||
@@ -51,7 +51,9 @@ spec:
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
{{- if .Values.tap.docker.overrideTag.hub }}
|
||||
{{- if .Values.tap.docker.overrideImage.hub }}
|
||||
image: '{{ .Values.tap.docker.overrideImage.hub }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.hub }}
|
||||
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.overrideTag.hub }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/hub:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
|
||||
@@ -79,11 +81,19 @@ spec:
|
||||
port: 8080
|
||||
resources:
|
||||
limits:
|
||||
{{ if ne (toString .Values.tap.resources.hub.limits.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.hub.limits.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.hub.limits.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.hub.limits.memory }}
|
||||
{{ end }}
|
||||
requests:
|
||||
{{ if ne (toString .Values.tap.resources.hub.requests.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.hub.requests.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.hub.requests.memor) "0" }}
|
||||
memory: {{ .Values.tap.resources.hub.requests.memory }}
|
||||
{{ end }}
|
||||
volumeMounts:
|
||||
- name: saml-x509-volume
|
||||
mountPath: "/etc/saml/x509"
|
||||
|
||||
@@ -66,7 +66,9 @@ spec:
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: REACT_APP_SENTRY_ENVIRONMENT
|
||||
value: '{{ .Values.tap.sentry.environment }}'
|
||||
{{- if .Values.tap.docker.overrideTag.front }}
|
||||
{{- if .Values.tap.docker.overrideImage.front }}
|
||||
image: '{{ .Values.tap.docker.overrideImage.front }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.front }}
|
||||
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.overrideTag.front }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/front:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
|
||||
|
||||
@@ -25,29 +25,6 @@ spec:
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: kubeshark
|
||||
spec:
|
||||
{{- if .Values.tap.kernelModule.enabled }}
|
||||
initContainers:
|
||||
- name: load-pf-ring
|
||||
image: {{ .Values.tap.kernelModule.image }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
{{- range .Values.tap.capabilities.kernelModule }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
drop:
|
||||
- ALL
|
||||
volumeMounts:
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
{{- end }}
|
||||
containers:
|
||||
- command:
|
||||
- ./worker
|
||||
@@ -67,9 +44,6 @@ spec:
|
||||
{{- end }}
|
||||
- -procfs
|
||||
- /hostproc
|
||||
{{- if .Values.tap.kernelModule.enabled }}
|
||||
- -kernel-module
|
||||
{{- end }}
|
||||
{{- if ne .Values.tap.packetCapture "ebpf" }}
|
||||
- -disable-ebpf
|
||||
{{- end }}
|
||||
@@ -83,7 +57,9 @@ spec:
|
||||
{{- if .Values.tap.debug }}
|
||||
- -debug
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideTag.worker }}
|
||||
{{- if .Values.tap.docker.overrideImage.worker }}
|
||||
image: '{{ .Values.tap.docker.overrideImage.worker }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.worker }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
@@ -123,11 +99,19 @@ spec:
|
||||
value: '{{ .Values.tap.sentry.environment }}'
|
||||
resources:
|
||||
limits:
|
||||
{{ if ne (toString .Values.tap.resources.sniffer.limits.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.sniffer.limits.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.sniffer.limits.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.sniffer.limits.memory }}
|
||||
{{ end }}
|
||||
requests:
|
||||
{{ if ne (toString .Values.tap.resources.sniffer.requests.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.sniffer.requests.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.sniffer.requests.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.sniffer.requests.memory }}
|
||||
{{ end }}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
@@ -164,20 +148,6 @@ spec:
|
||||
readOnly: true
|
||||
- mountPath: /app/data
|
||||
name: data
|
||||
{{- if and (eq .Values.tap.kernelModule.enabled true) (eq .Values.tap.kernelModule.unloadOnDestroy true) }}
|
||||
- name: unload-pf-ring
|
||||
image: {{ .Values.tap.kernelModule.image }}
|
||||
command: ["/bin/sh"]
|
||||
args: ["-c", "trap 'rmmod pf_ring && sleep 3' SIGTERM; while true; do sleep 1; done"]
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
{{- range .Values.tap.capabilities.kernelModule }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
drop:
|
||||
- ALL
|
||||
{{- end }}
|
||||
{{- if .Values.tap.tls }}
|
||||
- command:
|
||||
- ./tracer
|
||||
@@ -226,11 +196,19 @@ spec:
|
||||
value: '{{ .Values.tap.sentry.environment }}'
|
||||
resources:
|
||||
limits:
|
||||
{{ if ne (toString .Values.tap.resources.tracer.limits.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.tracer.limits.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.tracer.limits.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.tracer.limits.memory }}
|
||||
{{ end }}
|
||||
requests:
|
||||
{{ if ne (toString .Values.tap.resources.tracer.requests.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.tracer.requests.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.tracer.requests.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.tracer.requests.memory }}
|
||||
{{ end }}
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
|
||||
@@ -6,6 +6,10 @@ tap:
|
||||
tagLocked: true
|
||||
imagePullPolicy: Always
|
||||
imagePullSecrets: []
|
||||
overrideImage:
|
||||
worker: ""
|
||||
hub: ""
|
||||
front: ""
|
||||
overrideTag:
|
||||
worker: ""
|
||||
hub: ""
|
||||
@@ -36,24 +40,24 @@ tap:
|
||||
resources:
|
||||
hub:
|
||||
limits:
|
||||
cpu: ""
|
||||
cpu: "0"
|
||||
memory: 5Gi
|
||||
requests:
|
||||
cpu: ""
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
sniffer:
|
||||
limits:
|
||||
cpu: ""
|
||||
cpu: "0"
|
||||
memory: 5Gi
|
||||
requests:
|
||||
cpu: ""
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
tracer:
|
||||
limits:
|
||||
cpu: ""
|
||||
cpu: "0"
|
||||
memory: 5Gi
|
||||
requests:
|
||||
cpu: ""
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
serviceMesh: true
|
||||
tls: true
|
||||
@@ -92,10 +96,6 @@ tap:
|
||||
annotations: {}
|
||||
ipv6: true
|
||||
debug: false
|
||||
kernelModule:
|
||||
enabled: false
|
||||
image: kubeshark/pf-ring-module:all
|
||||
unloadOnDestroy: false
|
||||
telemetry:
|
||||
enabled: true
|
||||
resourceGuard:
|
||||
@@ -103,7 +103,7 @@ tap:
|
||||
sentry:
|
||||
enabled: false
|
||||
environment: production
|
||||
defaultFilter: "!dns and !tcp and !udp and !icmp"
|
||||
defaultFilter: "!dns and !error"
|
||||
scriptingDisabled: false
|
||||
targetedPodsUpdateDisabled: false
|
||||
presetFiltersChangingEnabled: true
|
||||
@@ -117,8 +117,6 @@ tap:
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- DAC_OVERRIDE
|
||||
kernelModule:
|
||||
- SYS_MODULE
|
||||
ebpfCapture:
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
@@ -135,7 +133,7 @@ tap:
|
||||
- sctp
|
||||
- syscall
|
||||
- ws
|
||||
- tls
|
||||
- ldap
|
||||
metrics:
|
||||
port: 49100
|
||||
pprof:
|
||||
@@ -175,6 +173,7 @@ dissectorsUpdatingEnabled: true
|
||||
scripting:
|
||||
env: {}
|
||||
source: ""
|
||||
sources: []
|
||||
watchScripts: true
|
||||
active: []
|
||||
console: true
|
||||
|
||||
@@ -247,6 +247,10 @@ func (provider *Provider) GetNamespaces() (namespaces []string) {
|
||||
return
|
||||
}
|
||||
|
||||
func (provider *Provider) GetClientSet() *kubernetes.Clientset {
|
||||
return provider.clientSet
|
||||
}
|
||||
|
||||
func getClientSet(config *rest.Config) (*kubernetes.Clientset, error) {
|
||||
clientSet, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
|
||||
@@ -4,10 +4,10 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-hub-network-policy
|
||||
@@ -31,10 +31,10 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front-network-policy
|
||||
@@ -58,10 +58,10 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-worker-network-policy
|
||||
@@ -87,10 +87,10 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-service-account
|
||||
@@ -104,10 +104,10 @@ metadata:
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
LICENSE: ''
|
||||
@@ -121,10 +121,10 @@ metadata:
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
AUTH_SAML_X509_CRT: |
|
||||
@@ -137,10 +137,10 @@ metadata:
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
AUTH_SAML_X509_KEY: |
|
||||
@@ -152,10 +152,10 @@ metadata:
|
||||
name: kubeshark-nginx-config-map
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
data:
|
||||
default.conf: |
|
||||
@@ -216,10 +216,10 @@ metadata:
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
data:
|
||||
POD_REGEX: '.*'
|
||||
@@ -244,7 +244,7 @@ data:
|
||||
RECORDING_DISABLED: ''
|
||||
STOP_TRAFFIC_CAPTURING_DISABLED: 'false'
|
||||
GLOBAL_FILTER: ""
|
||||
DEFAULT_FILTER: "!dns and !tcp and !udp and !icmp"
|
||||
DEFAULT_FILTER: "!dns and !error"
|
||||
TRAFFIC_SAMPLE_RATE: '100'
|
||||
JSON_TTL: '5m'
|
||||
PCAP_TTL: '10s'
|
||||
@@ -252,7 +252,7 @@ data:
|
||||
TIMEZONE: ' '
|
||||
CLOUD_LICENSE_ENABLED: 'true'
|
||||
DUPLICATE_TIMEFRAME: '200ms'
|
||||
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,sctp,syscall,ws,tls'
|
||||
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,sctp,syscall,ws,ldap'
|
||||
DISSECTORS_UPDATING_ENABLED: 'true'
|
||||
DETECT_DUPLICATES: 'false'
|
||||
PCAP_DUMP_ENABLE: 'true'
|
||||
@@ -266,10 +266,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-cluster-role-default
|
||||
@@ -295,8 +295,8 @@ rules:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
resourceNames:
|
||||
- kube-system
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
@@ -314,10 +314,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-cluster-role-binding-default
|
||||
@@ -336,10 +336,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-self-config-role
|
||||
@@ -366,10 +366,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-self-config-role-binding
|
||||
@@ -389,10 +389,10 @@ kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-hub
|
||||
@@ -411,10 +411,10 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front
|
||||
@@ -433,10 +433,10 @@ kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
@@ -446,10 +446,10 @@ metadata:
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.co/app: worker
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
ports:
|
||||
- name: metrics
|
||||
@@ -464,10 +464,10 @@ metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
sidecar.istio.io/inject: "false"
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-worker-daemon-set
|
||||
@@ -482,10 +482,10 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: kubeshark
|
||||
@@ -510,7 +510,7 @@ spec:
|
||||
- 'auto'
|
||||
- -staletimeout
|
||||
- '30'
|
||||
image: 'docker.io/kubeshark/worker:v52.3.86'
|
||||
image: 'docker.io/kubeshark/worker:v52.3.92'
|
||||
imagePullPolicy: Always
|
||||
name: sniffer
|
||||
ports:
|
||||
@@ -540,11 +540,17 @@ spec:
|
||||
value: 'production'
|
||||
resources:
|
||||
limits:
|
||||
cpu:
|
||||
|
||||
|
||||
memory: 5Gi
|
||||
|
||||
requests:
|
||||
cpu:
|
||||
|
||||
cpu: 50m
|
||||
|
||||
|
||||
memory: 50Mi
|
||||
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
@@ -584,7 +590,7 @@ spec:
|
||||
- /hostproc
|
||||
- -disable-ebpf
|
||||
- -disable-tls-log
|
||||
image: 'docker.io/kubeshark/worker:v52.3.86'
|
||||
image: 'docker.io/kubeshark/worker:v52.3.92'
|
||||
imagePullPolicy: Always
|
||||
name: tracer
|
||||
env:
|
||||
@@ -604,11 +610,17 @@ spec:
|
||||
value: 'production'
|
||||
resources:
|
||||
limits:
|
||||
cpu:
|
||||
|
||||
|
||||
memory: 5Gi
|
||||
|
||||
requests:
|
||||
cpu:
|
||||
|
||||
cpu: 50m
|
||||
|
||||
|
||||
memory: 50Mi
|
||||
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
@@ -680,10 +692,10 @@ kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-hub
|
||||
@@ -699,10 +711,10 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
@@ -730,7 +742,7 @@ spec:
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: 'false'
|
||||
image: 'docker.io/kubeshark/hub:v52.3.86'
|
||||
image: 'docker.io/kubeshark/hub:v52.3.92'
|
||||
imagePullPolicy: Always
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
@@ -748,11 +760,17 @@ spec:
|
||||
port: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu:
|
||||
|
||||
|
||||
memory: 5Gi
|
||||
|
||||
requests:
|
||||
cpu:
|
||||
|
||||
cpu: 50m
|
||||
|
||||
|
||||
memory: 50Mi
|
||||
|
||||
volumeMounts:
|
||||
- name: saml-x509-volume
|
||||
mountPath: "/etc/saml/x509"
|
||||
@@ -778,10 +796,10 @@ kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front
|
||||
@@ -797,10 +815,10 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
helm.sh/chart: kubeshark-52.3.86
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.86"
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
containers:
|
||||
@@ -835,7 +853,7 @@ spec:
|
||||
value: 'false'
|
||||
- name: REACT_APP_SENTRY_ENVIRONMENT
|
||||
value: 'production'
|
||||
image: 'docker.io/kubeshark/front:v52.3.86'
|
||||
image: 'docker.io/kubeshark/front:v52.3.92'
|
||||
imagePullPolicy: Always
|
||||
name: kubeshark-front
|
||||
livenessProbe:
|
||||
|
||||
Reference in New Issue
Block a user