mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-19 20:40:17 +00:00
Compare commits
42 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
62912d248d | ||
|
|
be8136687b | ||
|
|
3d4606d439 | ||
|
|
46ca7e3ad7 | ||
|
|
e9796bfb24 | ||
|
|
ce7913ce2e | ||
|
|
8f6ef686de | ||
|
|
f2e60cdee1 | ||
|
|
67aa1dac39 | ||
|
|
818a9e2bec | ||
|
|
7eb35a4e11 | ||
|
|
858864e7bc | ||
|
|
ad10212ba5 | ||
|
|
0e3f137a69 | ||
|
|
ef17eb9fbe | ||
|
|
aa7c8f36f5 | ||
|
|
c92f509863 | ||
|
|
0d5bbd53aa | ||
|
|
0b00b1846b | ||
|
|
507099c1ec | ||
|
|
aca3f4ad44 | ||
|
|
f9c66df528 | ||
|
|
1d572e6bff | ||
|
|
46ad335446 | ||
|
|
317357e83b | ||
|
|
d89ef8789f | ||
|
|
773ad78ac7 | ||
|
|
bbcaf74fa7 | ||
|
|
639f1deb51 | ||
|
|
b377bfe35f | ||
|
|
5242d9af07 | ||
|
|
12f8883052 | ||
|
|
7eef5efcd9 | ||
|
|
af47154a8d | ||
|
|
17759d296d | ||
|
|
29de008f22 | ||
|
|
261a0ca1a9 | ||
|
|
e819e9b697 | ||
|
|
a03aa56d07 | ||
|
|
83f437f3f8 | ||
|
|
f5637972f2 | ||
|
|
4cabf13788 |
23
Makefile
23
Makefile
@@ -182,24 +182,21 @@ release:
|
||||
@cd ../tracer && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../hub && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../kubeshark && git checkout master && git pull && sed -i 's/^version:.*/version: "$(VERSION)"/' helm-chart/Chart.yaml && make && make generate-helm-values && make generate-manifests
|
||||
@cd ../kubeshark && git checkout master && git pull && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make
|
||||
@if [ "$(shell uname)" = "Darwin" ]; then \
|
||||
codesign --sign - --force --preserve-metadata=entitlements,requirements,flags,runtime ./bin/kubeshark__; \
|
||||
fi
|
||||
@make generate-helm-values && make generate-manifests
|
||||
@git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push
|
||||
@git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd helm-chart && cp -r . ../../kubeshark.github.io/charts/chart
|
||||
@cd helm-chart && rm -r ../../kubeshark.github.io/charts/chart/* && cp -r . ../../kubeshark.github.io/charts/chart
|
||||
@cd ../../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
|
||||
@cd ../kubeshark
|
||||
|
||||
soft-release:
|
||||
@cd ../worker && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../tracer && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../hub && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../kubeshark && git checkout master && git pull && sed -i 's/^version:.*/version: "$(VERSION)"/' helm-chart/Chart.yaml && make && make generate-helm-values && make generate-manifests
|
||||
@git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push
|
||||
# @git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
# @cd helm-chart && cp -r . ../../kubeshark.github.io/charts/chart
|
||||
# @cd ../../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
|
||||
# @cd ../kubeshark
|
||||
release-dry-run:
|
||||
@cd ../kubeshark && git checkout master && git pull && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make && make generate-helm-values && make generate-manifests
|
||||
@cd helm-chart && rm -r ../../kubeshark.github.io/charts/chart/* && cp -r . ../../kubeshark.github.io/charts/chart
|
||||
@cd ../kubeshark
|
||||
|
||||
branch:
|
||||
@cd ../worker && git checkout master && git pull && git checkout -b $(name); git push --set-upstream origin $(name)
|
||||
|
||||
65
README.md
65
README.md
@@ -12,7 +12,7 @@
|
||||
<a href="https://hub.docker.com/r/kubeshark/worker">
|
||||
<img alt="Image size" src="https://img.shields.io/docker/image-size/kubeshark/kubeshark/latest?logo=Docker&style=flat-square">
|
||||
</a>
|
||||
<a href="https://discord.gg/WkvRGMUcx7">
|
||||
<a href="https://discord.gg/WkvRGMUcx7">
|
||||
<img alt="Discord" src="https://img.shields.io/discord/1042559155224973352?logo=Discord&style=flat-square&label=discord">
|
||||
</a>
|
||||
<a href="https://join.slack.com/t/kubeshark/shared_invite/zt-1m90td3n7-VHxN_~V5kVp80SfQW3SfpA">
|
||||
@@ -22,60 +22,75 @@
|
||||
|
||||
<p align="center">
|
||||
<b>
|
||||
Want to see Kubeshark in action, right now? Visit this
|
||||
<a href="https://demo.kubeshark.co/">live demo deployment</a> of Kubeshark.
|
||||
Want to see Kubeshark in action right now? Visit this
|
||||
<a href="https://demo.kubeshark.co/">live demo deployment</a> of Kubeshark.
|
||||
</b>
|
||||
</p>
|
||||
|
||||
**Kubeshark** is an API Traffic Analyzer for [**Kubernetes**](https://kubernetes.io/) providing real-time, protocol-level visibility into Kubernetes’ internal network, capturing and monitoring all traffic and payloads going in, out and across containers, pods, nodes and clusters.
|
||||
**Kubeshark** is a network observability platform for [**Kubernetes**](https://kubernetes.io/), providing real-time, protocol-level visibility into Kubernetes’ network. It enables users to inspect all internal and external cluster connections, API calls, and data in transit. Additionally, Kubeshark detects suspicious network behaviors, triggers automated actions, and provides deep insights into the network.
|
||||
|
||||

|
||||
|
||||
Think [TCPDump](https://en.wikipedia.org/wiki/Tcpdump) and [Wireshark](https://www.wireshark.org/) re-invented for Kubernetes
|
||||
Think [TCPDump](https://en.wikipedia.org/wiki/Tcpdump) and [Wireshark](https://www.wireshark.org/) reimagined for Kubernetes.
|
||||
|
||||
## Getting Started
|
||||
|
||||
Download **Kubeshark**'s binary distribution [latest release](https://github.com/kubeshark/kubeshark/releases/latest) and run following one of these examples:
|
||||
|
||||
```shell
|
||||
kubeshark tap
|
||||
```
|
||||
|
||||
```shell
|
||||
kubeshark tap -n sock-shop "(catalo*|front-end*)"
|
||||
```
|
||||
|
||||
Running any of the :point_up: above commands will open the [Web UI](https://docs.kubeshark.co/en/ui) in your browser which streams the traffic in your Kubernetes cluster in real-time.
|
||||
Download **Kubeshark**'s binary distribution [latest release](https://github.com/kubeshark/kubeshark/releases/latest) or use one of the following methods to deploy **Kubeshark**. The [web-based dashboard](https://docs.kubeshark.co/en/ui) should open in your browser, showing a real-time view of your cluster's traffic.
|
||||
|
||||
### Homebrew
|
||||
|
||||
[Homebrew](https://brew.sh/) :beer: users install Kubeshark CLI with:
|
||||
[Homebrew](https://brew.sh/) :beer: users can install the Kubeshark CLI with:
|
||||
|
||||
```shell
|
||||
brew install kubeshark
|
||||
kubeshark tap
|
||||
```
|
||||
|
||||
To clean up:
|
||||
```shell
|
||||
kubeshark clean
|
||||
```
|
||||
|
||||
### Helm
|
||||
|
||||
Add the helm repository and install the chart:
|
||||
Add the Helm repository and install the chart:
|
||||
|
||||
```shell
|
||||
helm repo add kubeshark https://helm.kubeshark.co
|
||||
helm install kubeshark kubeshark/kubeshark
|
||||
helm install kubeshark kubeshark/kubeshark
|
||||
```
|
||||
Follow the on-screen instructions how to connect to the dashboard.
|
||||
|
||||
To clean up:
|
||||
```shell
|
||||
helm uninstall kubeshark
|
||||
```
|
||||
|
||||
## Building From Source
|
||||
|
||||
Clone this repository and run `make` command to build it. After the build is complete, the executable can be found at `./bin/kubeshark__`.
|
||||
Clone this repository and run the `make` command to build it. After the build is complete, the executable can be found at `./bin/kubeshark`.
|
||||
|
||||
## Documentation
|
||||
|
||||
To learn more, read the [documentation](https://docs.kubeshark.co).
|
||||
|
||||
## Additional Use Cases
|
||||
|
||||
### Dump All Cluster-wide Traffic into a Single PCAP File
|
||||
|
||||
Record **all** cluster traffic and consolidate it into a single PCAP file (tcpdump-style).
|
||||
|
||||
Run Kubeshark to start capturing traffic:
|
||||
```shell
|
||||
kubeshark tap --set headless=true
|
||||
```
|
||||
> You can press `^C` to stop the command. Kubeshark will continue running in the background.
|
||||
|
||||
Take a snapshot of traffic (e.g., from the past 5 minutes):
|
||||
```shell
|
||||
kubeshark pcapdump --time 5m
|
||||
```
|
||||
> Read more [here](https://docs.kubeshark.co/en/pcapdump).
|
||||
|
||||
## Contributing
|
||||
|
||||
We :heart: pull requests! See [CONTRIBUTING.md](CONTRIBUTING.md) for the contribution guide.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project is for everyone. We ask that our users and contributors take a few minutes to review our [Code of Conduct](CODE_OF_CONDUCT.md).
|
||||
|
||||
@@ -2,10 +2,14 @@ package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -30,54 +34,61 @@ var pcapDumpCmd = &cobra.Command{
|
||||
}
|
||||
}
|
||||
|
||||
debugEnabled, _ := cmd.Flags().GetBool("debug")
|
||||
if debugEnabled {
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
log.Debug().Msg("Debug logging enabled")
|
||||
} else {
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
}
|
||||
|
||||
// Use the current context in kubeconfig
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error building kubeconfig")
|
||||
return err
|
||||
return fmt.Errorf("Error building kubeconfig: %w", err)
|
||||
}
|
||||
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error creating Kubernetes client")
|
||||
return err
|
||||
return fmt.Errorf("Error creating Kubernetes client: %w", err)
|
||||
}
|
||||
|
||||
// Handle copy operation if the copy string is provided
|
||||
|
||||
if !cmd.Flags().Changed(configStructs.PcapDumpEnabled) {
|
||||
destDir, _ := cmd.Flags().GetString(configStructs.PcapDest)
|
||||
log.Info().Msg("Copying PCAP files")
|
||||
err = copyPcapFiles(clientset, config, destDir)
|
||||
// Parse the `--time` flag
|
||||
timeIntervalStr, _ := cmd.Flags().GetString("time")
|
||||
var cutoffTime *time.Time // Use a pointer to distinguish between provided and not provided
|
||||
if timeIntervalStr != "" {
|
||||
duration, err := time.ParseDuration(timeIntervalStr)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error copying PCAP files")
|
||||
return err
|
||||
return fmt.Errorf("Invalid format %w", err)
|
||||
}
|
||||
} else {
|
||||
// Handle start operation if the start string is provided
|
||||
tempCutoffTime := time.Now().Add(-duration)
|
||||
cutoffTime = &tempCutoffTime
|
||||
}
|
||||
|
||||
enabled, err := cmd.Flags().GetBool(configStructs.PcapDumpEnabled)
|
||||
// Test the dest dir if provided
|
||||
destDir, _ := cmd.Flags().GetString(configStructs.PcapDest)
|
||||
if destDir != "" {
|
||||
info, err := os.Stat(destDir)
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("Directory does not exist: %s", destDir)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error getting pcapdump enable flag")
|
||||
return err
|
||||
return fmt.Errorf("Error checking dest directory: %w", err)
|
||||
}
|
||||
timeInterval, _ := cmd.Flags().GetString(configStructs.PcapTimeInterval)
|
||||
maxTime, _ := cmd.Flags().GetString(configStructs.PcapMaxTime)
|
||||
maxSize, _ := cmd.Flags().GetString(configStructs.PcapMaxSize)
|
||||
err = startStopPcap(clientset, enabled, timeInterval, maxTime, maxSize)
|
||||
if !info.IsDir() {
|
||||
return fmt.Errorf("Dest path is not a directory: %s", destDir)
|
||||
}
|
||||
tempFile, err := os.CreateTemp(destDir, "write-test-*")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error starting/stopping PCAP dump")
|
||||
return err
|
||||
}
|
||||
|
||||
if enabled {
|
||||
log.Info().Msg("Pcapdump started successfully")
|
||||
return nil
|
||||
} else {
|
||||
log.Info().Msg("Pcapdump stopped successfully")
|
||||
return nil
|
||||
return fmt.Errorf("Directory %s is not writable", destDir)
|
||||
}
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}
|
||||
|
||||
log.Info().Msg("Copying PCAP files")
|
||||
err = copyPcapFiles(clientset, config, destDir, cutoffTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -92,10 +103,8 @@ func init() {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapTimeInterval, defaultPcapDumpConfig.PcapTimeInterval, "Time interval for PCAP file rotation (used with --start)")
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapMaxTime, defaultPcapDumpConfig.PcapMaxTime, "Maximum time for retaining old PCAP files (used with --start)")
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapMaxSize, defaultPcapDumpConfig.PcapMaxSize, "Maximum size of PCAP files before deletion (used with --start)")
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapTime, "", "Time interval (e.g., 10m, 1h) in the past for which the pcaps are copied")
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapDest, "", "Local destination path for copied PCAP files (can not be used together with --enabled)")
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapKubeconfig, "", "Enabled/Disable to pcap dumps (can not be used together with --dest)")
|
||||
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapKubeconfig, "", "Path for kubeconfig (if not provided the default location will be checked)")
|
||||
pcapDumpCmd.Flags().Bool("debug", false, "Enable debug logging")
|
||||
}
|
||||
|
||||
@@ -1,16 +1,18 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kubeshark/gopacket"
|
||||
"github.com/kubeshark/gopacket/layers"
|
||||
"github.com/kubeshark/gopacket/pcapgo"
|
||||
"github.com/rs/zerolog/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -21,20 +23,25 @@ import (
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
|
||||
const label = "app.kubeshark.co/app=worker"
|
||||
const SELF_RESOURCES_PREFIX = "kubeshark-"
|
||||
const SUFFIX_CONFIG_MAP = "config-map"
|
||||
const (
|
||||
label = "app.kubeshark.co/app=worker"
|
||||
srcDir = "pcapdump"
|
||||
maxSnaplen uint32 = 262144
|
||||
maxTimePerFile = time.Minute * 5
|
||||
)
|
||||
|
||||
// NamespaceFiles represents the namespace and the files found in that namespace.
|
||||
type NamespaceFiles struct {
|
||||
Namespace string // The namespace in which the files were found
|
||||
SrcDir string // The source directory from which the files were listed
|
||||
Files []string // List of files found in the namespace
|
||||
// PodFileInfo represents information about a pod, its namespace, and associated files
|
||||
type PodFileInfo struct {
|
||||
Pod corev1.Pod
|
||||
SrcDir string
|
||||
Files []string
|
||||
CopiedFiles []string
|
||||
}
|
||||
|
||||
// listWorkerPods fetches all worker pods from multiple namespaces
|
||||
func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespaces []string) ([]corev1.Pod, error) {
|
||||
var allPods []corev1.Pod
|
||||
func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespaces []string) ([]*PodFileInfo, error) {
|
||||
var podFileInfos []*PodFileInfo
|
||||
var errs []error
|
||||
labelSelector := label
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
@@ -43,112 +50,30 @@ func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespa
|
||||
LabelSelector: labelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list worker pods in namespace %s: %w", namespace, err)
|
||||
}
|
||||
|
||||
// Accumulate the pods
|
||||
allPods = append(allPods, pods.Items...)
|
||||
}
|
||||
|
||||
return allPods, nil
|
||||
}
|
||||
|
||||
// listFilesInPodDir lists all files in the specified directory inside the pod across multiple namespaces
|
||||
func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, config *rest.Config, podName string, namespaces []string, configMapName, configMapKey string) ([]NamespaceFiles, error) {
|
||||
var namespaceFilesList []NamespaceFiles
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
// Attempt to get the ConfigMap in the current namespace
|
||||
configMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, configMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to list worker pods in namespace %s: %w", namespace, err))
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the source directory exists in the ConfigMap
|
||||
srcDir, ok := configMap.Data[configMapKey]
|
||||
if !ok || srcDir == "" {
|
||||
log.Error().Msgf("source directory not found in ConfigMap %s in namespace %s", configMapName, namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
// Attempt to get the pod in the current namespace
|
||||
pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("failed to get pod %s in namespace %s", podName, namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
nodeName := pod.Spec.NodeName
|
||||
srcFilePath := filepath.Join("data", nodeName, srcDir)
|
||||
|
||||
cmd := []string{"ls", srcFilePath}
|
||||
req := clientset.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(podName).
|
||||
Namespace(namespace).
|
||||
SubResource("exec").
|
||||
Param("container", "sniffer").
|
||||
Param("stdout", "true").
|
||||
Param("stderr", "true").
|
||||
Param("command", cmd[0]).
|
||||
Param("command", cmd[1])
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("failed to initialize executor for pod %s in namespace %s", podName, namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
var stdoutBuf bytes.Buffer
|
||||
var stderrBuf bytes.Buffer
|
||||
|
||||
// Execute the command to list files
|
||||
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdout: &stdoutBuf,
|
||||
Stderr: &stderrBuf,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("error listing files in pod %s in namespace %s: %s", podName, namespace, stderrBuf.String())
|
||||
continue
|
||||
}
|
||||
|
||||
// Split the output (file names) into a list
|
||||
files := strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n")
|
||||
if len(files) > 0 {
|
||||
// Append the NamespaceFiles struct to the list
|
||||
namespaceFilesList = append(namespaceFilesList, NamespaceFiles{
|
||||
Namespace: namespace,
|
||||
SrcDir: srcDir,
|
||||
Files: files,
|
||||
for _, pod := range pods.Items {
|
||||
podFileInfos = append(podFileInfos, &PodFileInfo{
|
||||
Pod: pod,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(namespaceFilesList) == 0 {
|
||||
return nil, fmt.Errorf("no files found in pod %s across the provided namespaces", podName)
|
||||
}
|
||||
|
||||
return namespaceFilesList, nil
|
||||
return podFileInfos, errors.Join(errs...)
|
||||
}
|
||||
|
||||
// copyFileFromPod copies a single file from a pod to a local destination
|
||||
func copyFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, podName, namespace, srcDir, srcFile, destFile string) error {
|
||||
// Get the pod to retrieve its node name
|
||||
pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pod %s in namespace %s: %w", podName, namespace, err)
|
||||
}
|
||||
// listFilesInPodDir lists all files in the specified directory inside the pod across multiple namespaces
|
||||
func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, config *rest.Config, pod *PodFileInfo, cutoffTime *time.Time) error {
|
||||
nodeName := pod.Pod.Spec.NodeName
|
||||
srcFilePath := filepath.Join("data", nodeName, srcDir)
|
||||
|
||||
// Construct the complete path using /data, the node name, srcDir, and srcFile
|
||||
nodeName := pod.Spec.NodeName
|
||||
srcFilePath := filepath.Join("data", nodeName, srcDir, srcFile)
|
||||
|
||||
// Execute the `cat` command to read the file at the srcFilePath
|
||||
cmd := []string{"cat", srcFilePath}
|
||||
cmd := []string{"ls", srcFilePath}
|
||||
req := clientset.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(podName).
|
||||
Namespace(namespace).
|
||||
Name(pod.Pod.Name).
|
||||
Namespace(pod.Pod.Namespace).
|
||||
SubResource("exec").
|
||||
Param("container", "sniffer").
|
||||
Param("stdout", "true").
|
||||
@@ -158,7 +83,81 @@ func copyFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, confi
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize executor for pod %s in namespace %s: %w", podName, namespace, err)
|
||||
return err
|
||||
}
|
||||
|
||||
var stdoutBuf bytes.Buffer
|
||||
var stderrBuf bytes.Buffer
|
||||
|
||||
// Execute the command to list files
|
||||
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdout: &stdoutBuf,
|
||||
Stderr: &stderrBuf,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Split the output (file names) into a list
|
||||
files := strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n")
|
||||
if len(files) == 0 {
|
||||
// No files were found in the target dir for this pod
|
||||
return nil
|
||||
}
|
||||
|
||||
var filteredFiles []string
|
||||
var fileProcessingErrs []error
|
||||
// Filter files based on cutoff time if provided
|
||||
for _, file := range files {
|
||||
if cutoffTime != nil {
|
||||
parts := strings.Split(file, "-")
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
timestampStr := parts[len(parts)-2] + parts[len(parts)-1][:6] // Extract YYYYMMDDHHMMSS
|
||||
fileTime, err := time.Parse("20060102150405", timestampStr)
|
||||
if err != nil {
|
||||
fileProcessingErrs = append(fileProcessingErrs, fmt.Errorf("failed parse file timestamp %s: %w", file, err))
|
||||
continue
|
||||
}
|
||||
|
||||
if fileTime.Before(*cutoffTime) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Add file to filtered list
|
||||
filteredFiles = append(filteredFiles, file)
|
||||
}
|
||||
|
||||
pod.SrcDir = srcDir
|
||||
pod.Files = filteredFiles
|
||||
|
||||
return errors.Join(fileProcessingErrs...)
|
||||
}
|
||||
|
||||
// copyFileFromPod copies a single file from a pod to a local destination
|
||||
func copyFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, pod *PodFileInfo, srcFile, destFile string) error {
|
||||
// Construct the complete path using /data, the node name, srcDir, and srcFile
|
||||
nodeName := pod.Pod.Spec.NodeName
|
||||
srcFilePath := filepath.Join("data", nodeName, srcDir, srcFile)
|
||||
|
||||
// Execute the `cat` command to read the file at the srcFilePath
|
||||
cmd := []string{"cat", srcFilePath}
|
||||
req := clientset.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(pod.Pod.Name).
|
||||
Namespace(pod.Pod.Namespace).
|
||||
SubResource("exec").
|
||||
Param("container", "sniffer").
|
||||
Param("stdout", "true").
|
||||
Param("stderr", "true").
|
||||
Param("command", cmd[0]).
|
||||
Param("command", cmd[1])
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize executor for pod %s in namespace %s: %w", pod.Pod.Name, pod.Pod.Namespace, err)
|
||||
}
|
||||
|
||||
// Create the local file to write the content to
|
||||
@@ -177,7 +176,7 @@ func copyFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, confi
|
||||
Stderr: &stderrBuf,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error copying file from pod %s in namespace %s: %s", podName, namespace, stderrBuf.String())
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -187,184 +186,173 @@ func mergePCAPs(outputFile string, inputFiles []string) error {
|
||||
// Create the output file
|
||||
f, err := os.Create(outputFile)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to create output file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Create a pcap writer for the output file
|
||||
writer := pcapgo.NewWriter(f)
|
||||
err = writer.WriteFileHeader(65536, layers.LinkTypeEthernet) // Snapshot length and LinkType
|
||||
bufWriter := bufio.NewWriterSize(f, 4*1024*1024)
|
||||
defer bufWriter.Flush()
|
||||
|
||||
// Create the PCAP writer
|
||||
writer := pcapgo.NewWriter(bufWriter)
|
||||
err = writer.WriteFileHeader(maxSnaplen, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to write PCAP file header: %w", err)
|
||||
}
|
||||
|
||||
var mergingErrs []error
|
||||
|
||||
for _, inputFile := range inputFiles {
|
||||
log.Info().Msgf("Merging %s int %s", inputFile, outputFile)
|
||||
// Open each input file
|
||||
// Open the input file
|
||||
file, err := os.Open(inputFile)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Failed to open %v", inputFile)
|
||||
mergingErrs = append(mergingErrs, fmt.Errorf("failed to open %s: %w", inputFile, err))
|
||||
continue
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
fileInfo, err := file.Stat()
|
||||
if err != nil {
|
||||
mergingErrs = append(mergingErrs, fmt.Errorf("failed to stat file %s: %w", inputFile, err))
|
||||
file.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
if fileInfo.Size() == 0 {
|
||||
// Skip empty files
|
||||
log.Debug().Msgf("Skipped empty file: %s", inputFile)
|
||||
file.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Create the PCAP reader for the input file
|
||||
reader, err := pcapgo.NewReader(file)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Failed to create pcapng reader for %v", file.Name())
|
||||
mergingErrs = append(mergingErrs, fmt.Errorf("failed to create pcapng reader for %v: %w", file.Name(), err))
|
||||
file.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Create the packet source
|
||||
packetSource := gopacket.NewPacketSource(reader, layers.LinkTypeEthernet)
|
||||
|
||||
for packet := range packetSource.Packets() {
|
||||
err := writer.WritePacket(packet.Metadata().CaptureInfo, packet.Data())
|
||||
for {
|
||||
// Read packet data
|
||||
data, ci, err := reader.ReadPacketData()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Failed to write packet to %v", outputFile)
|
||||
continue
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
break
|
||||
}
|
||||
mergingErrs = append(mergingErrs, fmt.Errorf("error reading packet from file %s: %w", file.Name(), err))
|
||||
break
|
||||
}
|
||||
|
||||
// Write the packet to the output file
|
||||
err = writer.WritePacket(ci, data)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error writing packet to output file")
|
||||
mergingErrs = append(mergingErrs, fmt.Errorf("error writing packet to output file: %w", err))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
file.Close()
|
||||
}
|
||||
|
||||
log.Debug().Err(errors.Join(mergingErrs...))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setPcapConfigInKubernetes sets the PCAP config for all pods across multiple namespaces
|
||||
func setPcapConfigInKubernetes(ctx context.Context, clientset *clientk8s.Clientset, podName string, namespaces []string, enabledPcap bool, timeInterval, maxTime, maxSize string) error {
|
||||
for _, namespace := range namespaces {
|
||||
// Load the existing ConfigMap in the current namespace
|
||||
configMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, "kubeshark-config-map", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("failed to get ConfigMap in namespace %s", namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update the values with user-provided input
|
||||
configMap.Data["PCAP_TIME_INTERVAL"] = timeInterval
|
||||
configMap.Data["PCAP_MAX_SIZE"] = maxSize
|
||||
configMap.Data["PCAP_MAX_TIME"] = maxTime
|
||||
configMap.Data["PCAP_DUMP_ENABLE"] = strconv.FormatBool(enabledPcap)
|
||||
|
||||
// Apply the updated ConfigMap back to the cluster in the current namespace
|
||||
_, err = clientset.CoreV1().ConfigMaps(namespace).Update(ctx, configMap, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("failed to update ConfigMap in namespace %s", namespace)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// startPcap function for starting the PCAP capture
|
||||
func startStopPcap(clientset *kubernetes.Clientset, pcapEnable bool, timeInterval, maxTime, maxSize string) error {
|
||||
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
|
||||
func copyPcapFiles(clientset *kubernetes.Clientset, config *rest.Config, destDir string, cutoffTime *time.Time) error {
|
||||
// List all namespaces
|
||||
namespaceList, err := clientset.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return err
|
||||
}
|
||||
|
||||
targetNamespaces := kubernetesProvider.GetNamespaces()
|
||||
var targetNamespaces []string
|
||||
for _, ns := range namespaceList.Items {
|
||||
targetNamespaces = append(targetNamespaces, ns.Name)
|
||||
}
|
||||
|
||||
// List worker pods
|
||||
// List all worker pods
|
||||
workerPods, err := listWorkerPods(context.Background(), clientset, targetNamespaces)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error listing worker pods")
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterate over each pod to start the PCAP capture by updating the configuration in Kubernetes
|
||||
for _, pod := range workerPods {
|
||||
err := setPcapConfigInKubernetes(context.Background(), clientset, pod.Name, targetNamespaces, pcapEnable, timeInterval, maxTime, maxSize)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error setting PCAP config for pod %s", pod.Name)
|
||||
continue
|
||||
if len(workerPods) == 0 {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyPcapFiles function for copying the PCAP files from the worker pods
|
||||
func copyPcapFiles(clientset *kubernetes.Clientset, config *rest.Config, destDir string) error {
|
||||
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return err
|
||||
log.Debug().Err(err).Msg("error while listing worker pods")
|
||||
}
|
||||
|
||||
targetNamespaces := kubernetesProvider.GetNamespaces()
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// List worker pods
|
||||
workerPods, err := listWorkerPods(context.Background(), clientset, targetNamespaces)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error listing worker pods")
|
||||
return err
|
||||
}
|
||||
var currentFiles []string
|
||||
|
||||
// Iterate over each pod to get the PCAP directory from config and copy files
|
||||
// Launch a goroutine for each pod
|
||||
for _, pod := range workerPods {
|
||||
// Get the list of NamespaceFiles (files per namespace) and their source directories
|
||||
namespaceFiles, err := listFilesInPodDir(context.Background(), clientset, config, pod.Name, targetNamespaces, SELF_RESOURCES_PREFIX+SUFFIX_CONFIG_MAP, "PCAP_SRC_DIR")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error listing files in pod %s", pod.Name)
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
|
||||
// Copy each file from the pod to the local destination for each namespace
|
||||
for _, nsFiles := range namespaceFiles {
|
||||
for _, file := range nsFiles.Files {
|
||||
go func(pod *PodFileInfo) {
|
||||
defer wg.Done()
|
||||
|
||||
// List files for the current pod
|
||||
err := listFilesInPodDir(context.Background(), clientset, config, pod, cutoffTime)
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msgf("error listing files in pod %s", pod.Pod.Name)
|
||||
return
|
||||
}
|
||||
|
||||
// Copy files from the pod
|
||||
for _, file := range pod.Files {
|
||||
destFile := filepath.Join(destDir, file)
|
||||
|
||||
// Pass the correct namespace and related details to the function
|
||||
err = copyFileFromPod(context.Background(), clientset, config, pod.Name, nsFiles.Namespace, nsFiles.SrcDir, file, destFile)
|
||||
// Add a timeout context for file copy
|
||||
ctx, cancel := context.WithTimeout(context.Background(), maxTimePerFile)
|
||||
err := copyFileFromPod(ctx, clientset, config, pod, file, destFile)
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error copying file from pod %s in namespace %s", pod.Name, nsFiles.Namespace)
|
||||
} else {
|
||||
log.Info().Msgf("Copied %s from %s to %s", file, pod.Name, destFile)
|
||||
log.Debug().Err(err).Msgf("error copying file %s from pod %s in namespace %s", file, pod.Pod.Name, pod.Pod.Namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
currentFiles = append(currentFiles, destFile)
|
||||
log.Info().Msgf("Copied file %s from pod %s to %s", file, pod.Pod.Name, destFile)
|
||||
pod.CopiedFiles = append(pod.CopiedFiles, destFile)
|
||||
}
|
||||
}
|
||||
|
||||
}(pod)
|
||||
}
|
||||
|
||||
if len(currentFiles) == 0 {
|
||||
log.Error().Msgf("No files to merge")
|
||||
// Wait for all goroutines to complete
|
||||
wg.Wait()
|
||||
|
||||
var copiedFiles []string
|
||||
for _, pod := range workerPods {
|
||||
copiedFiles = append(copiedFiles, pod.CopiedFiles...)
|
||||
}
|
||||
|
||||
if len(copiedFiles) == 0 {
|
||||
log.Info().Msg("No pcaps available to copy on the workers")
|
||||
return nil
|
||||
// continue
|
||||
}
|
||||
|
||||
// Generate a temporary filename based on the first file
|
||||
tempMergedFile := currentFiles[0] + "_temp"
|
||||
// Generate a temporary filename for the merged file
|
||||
tempMergedFile := copiedFiles[0] + "_temp"
|
||||
|
||||
// Merge the PCAPs into the temporary file
|
||||
err = mergePCAPs(tempMergedFile, currentFiles)
|
||||
// Merge PCAP files
|
||||
err = mergePCAPs(tempMergedFile, copiedFiles)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error merging files")
|
||||
return err
|
||||
// continue
|
||||
os.Remove(tempMergedFile)
|
||||
return fmt.Errorf("error merging files: %w", err)
|
||||
}
|
||||
|
||||
// Remove the original files after merging
|
||||
for _, file := range currentFiles {
|
||||
err := os.Remove(file)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error removing file %s", file)
|
||||
for _, file := range copiedFiles {
|
||||
if err := os.Remove(file); err != nil {
|
||||
log.Debug().Err(err).Msgf("error removing file %s", file)
|
||||
}
|
||||
}
|
||||
|
||||
// Rename the temp file to the final name (removing "_temp")
|
||||
// Rename the temp file to the final name
|
||||
finalMergedFile := strings.TrimSuffix(tempMergedFile, "_temp")
|
||||
err = os.Rename(tempMergedFile, finalMergedFile)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error renaming merged file %s", tempMergedFile)
|
||||
// continue
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Msgf("Merged file created: %s", finalMergedFile)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -63,6 +63,9 @@ func InitConfig(cmd *cobra.Command) error {
|
||||
|
||||
Config = CreateDefaultConfig()
|
||||
Config.Tap.Debug = DebugMode
|
||||
if DebugMode {
|
||||
Config.LogLevel = "debug"
|
||||
}
|
||||
cmdName = cmd.Name()
|
||||
if utils.Contains([]string{
|
||||
"clean",
|
||||
|
||||
@@ -16,42 +16,70 @@ const (
|
||||
func CreateDefaultConfig() ConfigStruct {
|
||||
return ConfigStruct{
|
||||
Tap: configStructs.TapConfig{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"linux"},
|
||||
NodeSelectorTerms: configStructs.NodeSelectorTermsConfig{
|
||||
Workers: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"linux"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Hub: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"linux"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Front: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"linux"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Capabilities: configStructs.CapabilitiesConfig{
|
||||
NetworkCapture: []string{
|
||||
// NET_RAW is required to listen the network traffic
|
||||
"NET_RAW",
|
||||
// NET_ADMIN is required to listen the network traffic
|
||||
"NET_ADMIN",
|
||||
},
|
||||
ServiceMeshCapture: []string{
|
||||
// SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
|
||||
"SYS_ADMIN",
|
||||
// SYS_PTRACE is required to set netns to other process + to open libssl.so of other process
|
||||
"SYS_PTRACE",
|
||||
// DAC_OVERRIDE is required to read /proc/PID/environ
|
||||
"DAC_OVERRIDE",
|
||||
},
|
||||
EBPFCapture: []string{
|
||||
// SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
|
||||
"SYS_ADMIN",
|
||||
// SYS_PTRACE is required to set netns to other process + to open libssl.so of other process
|
||||
"SYS_PTRACE",
|
||||
// SYS_RESOURCE is required to change rlimits for eBPF
|
||||
"SYS_RESOURCE",
|
||||
// IPC_LOCK is required for ebpf perf buffers allocations after some amount of size buffer size:
|
||||
// https://github.com/kubeshark/tracer/blob/13e24725ba8b98216dd0e553262e6d9c56dce5fa/main.go#L82)
|
||||
"IPC_LOCK",
|
||||
SecurityContext: configStructs.SecurityContextConfig{
|
||||
Privileged: true,
|
||||
// Capabilities used only when running in unprivileged mode
|
||||
Capabilities: configStructs.CapabilitiesConfig{
|
||||
NetworkCapture: []string{
|
||||
// NET_RAW is required to listen the network traffic
|
||||
"NET_RAW",
|
||||
// NET_ADMIN is required to listen the network traffic
|
||||
"NET_ADMIN",
|
||||
},
|
||||
ServiceMeshCapture: []string{
|
||||
// SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
|
||||
"SYS_ADMIN",
|
||||
// SYS_PTRACE is required to set netns to other process + to open libssl.so of other process
|
||||
"SYS_PTRACE",
|
||||
// DAC_OVERRIDE is required to read /proc/PID/environ
|
||||
"DAC_OVERRIDE",
|
||||
},
|
||||
EBPFCapture: []string{
|
||||
// SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
|
||||
"SYS_ADMIN",
|
||||
// SYS_PTRACE is required to set netns to other process + to open libssl.so of other process
|
||||
"SYS_PTRACE",
|
||||
// SYS_RESOURCE is required to change rlimits for eBPF
|
||||
"SYS_RESOURCE",
|
||||
// IPC_LOCK is required for ebpf perf buffers allocations after some amount of size buffer size:
|
||||
// https://github.com/kubeshark/tracer/blob/13e24725ba8b98216dd0e553262e6d9c56dce5fa/main.go#L82)
|
||||
"IPC_LOCK",
|
||||
},
|
||||
},
|
||||
},
|
||||
Auth: configStructs.AuthConfig{
|
||||
@@ -59,9 +87,14 @@ func CreateDefaultConfig() ConfigStruct {
|
||||
RoleAttribute: "role",
|
||||
Roles: map[string]configStructs.Role{
|
||||
"admin": {
|
||||
Filter: "",
|
||||
CanDownloadPCAP: true,
|
||||
CanUseScripting: true,
|
||||
Filter: "",
|
||||
CanDownloadPCAP: true,
|
||||
CanUseScripting: true,
|
||||
ScriptingPermissions: configStructs.ScriptingPermissions{
|
||||
CanSave: true,
|
||||
CanActivate: true,
|
||||
CanDelete: true,
|
||||
},
|
||||
CanUpdateTargetedPods: true,
|
||||
CanStopTrafficCapturing: true,
|
||||
ShowAdminConsoleLink: true,
|
||||
@@ -83,6 +116,16 @@ func CreateDefaultConfig() ConfigStruct {
|
||||
"ws",
|
||||
// "tlsx",
|
||||
"ldap",
|
||||
"radius",
|
||||
"diameter",
|
||||
},
|
||||
PortMapping: configStructs.PortMapping{
|
||||
HTTP: []uint16{80, 443, 8080},
|
||||
AMQP: []uint16{5671, 5672},
|
||||
KAFKA: []uint16{9092},
|
||||
REDIS: []uint16{6379},
|
||||
LDAP: []uint16{389},
|
||||
DIAMETER: []uint16{3868},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -98,21 +141,21 @@ type ManifestsConfig struct {
|
||||
}
|
||||
|
||||
type ConfigStruct struct {
|
||||
Tap configStructs.TapConfig `yaml:"tap" json:"tap"`
|
||||
Logs configStructs.LogsConfig `yaml:"logs" json:"logs"`
|
||||
Config configStructs.ConfigConfig `yaml:"config,omitempty" json:"config,omitempty"`
|
||||
PcapDump configStructs.PcapDumpConfig `yaml:"pcapdump" json:"pcapdump"`
|
||||
Kube KubeConfig `yaml:"kube" json:"kube"`
|
||||
DumpLogs bool `yaml:"dumpLogs" json:"dumpLogs" default:"false"`
|
||||
HeadlessMode bool `yaml:"headless" json:"headless" default:"false"`
|
||||
License string `yaml:"license" json:"license" default:""`
|
||||
CloudLicenseEnabled bool `yaml:"cloudLicenseEnabled" json:"cloudLicenseEnabled" default:"true"`
|
||||
SupportChatEnabled bool `yaml:"supportChatEnabled" json:"supportChatEnabled" default:"true"`
|
||||
InternetConnectivity bool `yaml:"internetConnectivity" json:"internetConnectivity" default:"true"`
|
||||
DissectorsUpdatingEnabled bool `yaml:"dissectorsUpdatingEnabled" json:"dissectorsUpdatingEnabled" default:"true"`
|
||||
Scripting configStructs.ScriptingConfig `yaml:"scripting" json:"scripting"`
|
||||
Manifests ManifestsConfig `yaml:"manifests,omitempty" json:"manifests,omitempty"`
|
||||
Timezone string `yaml:"timezone" json:"timezone"`
|
||||
Tap configStructs.TapConfig `yaml:"tap" json:"tap"`
|
||||
Logs configStructs.LogsConfig `yaml:"logs" json:"logs"`
|
||||
Config configStructs.ConfigConfig `yaml:"config,omitempty" json:"config,omitempty"`
|
||||
PcapDump configStructs.PcapDumpConfig `yaml:"pcapdump" json:"pcapdump"`
|
||||
Kube KubeConfig `yaml:"kube" json:"kube"`
|
||||
DumpLogs bool `yaml:"dumpLogs" json:"dumpLogs" default:"false"`
|
||||
HeadlessMode bool `yaml:"headless" json:"headless" default:"false"`
|
||||
License string `yaml:"license" json:"license" default:""`
|
||||
CloudLicenseEnabled bool `yaml:"cloudLicenseEnabled" json:"cloudLicenseEnabled" default:"true"`
|
||||
SupportChatEnabled bool `yaml:"supportChatEnabled" json:"supportChatEnabled" default:"true"`
|
||||
InternetConnectivity bool `yaml:"internetConnectivity" json:"internetConnectivity" default:"true"`
|
||||
Scripting configStructs.ScriptingConfig `yaml:"scripting" json:"scripting"`
|
||||
Manifests ManifestsConfig `yaml:"manifests,omitempty" json:"manifests,omitempty"`
|
||||
Timezone string `yaml:"timezone" json:"timezone"`
|
||||
LogLevel string `yaml:"logLevel" json:"logLevel" default:"warning"`
|
||||
}
|
||||
|
||||
func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy {
|
||||
|
||||
@@ -43,6 +43,7 @@ const (
|
||||
PcapTimeInterval = "timeInterval"
|
||||
PcapKubeconfig = "kubeconfig"
|
||||
PcapDumpEnabled = "enabled"
|
||||
PcapTime = "time"
|
||||
)
|
||||
|
||||
type ResourceLimitsHub struct {
|
||||
@@ -71,7 +72,7 @@ type ResourceRequirementsWorker struct {
|
||||
}
|
||||
|
||||
type WorkerConfig struct {
|
||||
SrvPort uint16 `yaml:"srvPort" json:"srvPort" default:"30001"`
|
||||
SrvPort uint16 `yaml:"srvPort" json:"srvPort" default:"48999"`
|
||||
}
|
||||
|
||||
type HubConfig struct {
|
||||
@@ -110,19 +111,55 @@ type DockerConfig struct {
|
||||
OverrideTag OverrideTagConfig `yaml:"overrideTag" json:"overrideTag"`
|
||||
}
|
||||
|
||||
type DnsConfig struct {
|
||||
Nameservers []string `yaml:"nameservers" json:"nameservers" default:"[]"`
|
||||
Searches []string `yaml:"searches" json:"searches" default:"[]"`
|
||||
Options []DnsConfigOption `yaml:"options" json:"options" default:"[]"`
|
||||
}
|
||||
|
||||
type DnsConfigOption struct {
|
||||
Name string `yaml:"name" json:"name"`
|
||||
Value string `yaml:"value" json:"value"`
|
||||
}
|
||||
|
||||
type ResourcesConfig struct {
|
||||
Hub ResourceRequirementsHub `yaml:"hub" json:"hub"`
|
||||
Sniffer ResourceRequirementsWorker `yaml:"sniffer" json:"sniffer"`
|
||||
Tracer ResourceRequirementsWorker `yaml:"tracer" json:"tracer"`
|
||||
}
|
||||
|
||||
type ProbesConfig struct {
|
||||
Hub ProbeConfig `yaml:"hub" json:"hub"`
|
||||
Sniffer ProbeConfig `yaml:"sniffer" json:"sniffer"`
|
||||
}
|
||||
|
||||
type NodeSelectorTermsConfig struct {
|
||||
Hub []v1.NodeSelectorTerm `yaml:"hub" json:"hub" default:"[]"`
|
||||
Workers []v1.NodeSelectorTerm `yaml:"workers" json:"workers" default:"[]"`
|
||||
Front []v1.NodeSelectorTerm `yaml:"front" json:"front" default:"[]"`
|
||||
}
|
||||
|
||||
type ProbeConfig struct {
|
||||
InitialDelaySeconds int `yaml:"initialDelaySeconds" json:"initialDelaySeconds" default:"15"`
|
||||
PeriodSeconds int `yaml:"periodSeconds" json:"periodSeconds" default:"10"`
|
||||
SuccessThreshold int `yaml:"successThreshold" json:"successThreshold" default:"1"`
|
||||
FailureThreshold int `yaml:"failureThreshold" json:"failureThreshold" default:"3"`
|
||||
}
|
||||
|
||||
type ScriptingPermissions struct {
|
||||
CanSave bool `yaml:"canSave" json:"canSave" default:"true"`
|
||||
CanActivate bool `yaml:"canActivate" json:"canActivate" default:"true"`
|
||||
CanDelete bool `yaml:"canDelete" json:"canDelete" default:"true"`
|
||||
}
|
||||
|
||||
type Role struct {
|
||||
Filter string `yaml:"filter" json:"filter" default:""`
|
||||
CanDownloadPCAP bool `yaml:"canDownloadPCAP" json:"canDownloadPCAP" default:"false"`
|
||||
CanUseScripting bool `yaml:"canUseScripting" json:"canUseScripting" default:"false"`
|
||||
CanUpdateTargetedPods bool `yaml:"canUpdateTargetedPods" json:"canUpdateTargetedPods" default:"false"`
|
||||
CanStopTrafficCapturing bool `yaml:"canStopTrafficCapturing" json:"canStopTrafficCapturing" default:"false"`
|
||||
ShowAdminConsoleLink bool `yaml:"showAdminConsoleLink" json:"showAdminConsoleLink" default:"false"`
|
||||
Filter string `yaml:"filter" json:"filter" default:""`
|
||||
CanDownloadPCAP bool `yaml:"canDownloadPCAP" json:"canDownloadPCAP" default:"false"`
|
||||
CanUseScripting bool `yaml:"canUseScripting" json:"canUseScripting" default:"false"`
|
||||
ScriptingPermissions ScriptingPermissions `yaml:"scriptingPermissions" json:"scriptingPermissions"`
|
||||
CanUpdateTargetedPods bool `yaml:"canUpdateTargetedPods" json:"canUpdateTargetedPods" default:"false"`
|
||||
CanStopTrafficCapturing bool `yaml:"canStopTrafficCapturing" json:"canStopTrafficCapturing" default:"false"`
|
||||
ShowAdminConsoleLink bool `yaml:"showAdminConsoleLink" json:"showAdminConsoleLink" default:"false"`
|
||||
}
|
||||
|
||||
type SamlConfig struct {
|
||||
@@ -200,52 +237,82 @@ type PcapDumpConfig struct {
|
||||
PcapTimeInterval string `yaml:"timeInterval" json:"timeInterval" default:"1m"`
|
||||
PcapMaxTime string `yaml:"maxTime" json:"maxTime" default:"1h"`
|
||||
PcapMaxSize string `yaml:"maxSize" json:"maxSize" default:"500MB"`
|
||||
PcapSrcDir string `yaml:"pcapSrcDir" json:"pcapSrcDir" default:"pcapdump"`
|
||||
PcapTime string `yaml:"time" json:"time" default:"time"`
|
||||
PcapDebug bool `yaml:"debug" json:"debug" default:"false"`
|
||||
PcapDest string `yaml:"dest" json:"dest" default:""`
|
||||
}
|
||||
|
||||
type PortMapping struct {
|
||||
HTTP []uint16 `yaml:"http" json:"http"`
|
||||
AMQP []uint16 `yaml:"amqp" json:"amqp"`
|
||||
KAFKA []uint16 `yaml:"kafka" json:"kafka"`
|
||||
REDIS []uint16 `yaml:"redis" json:"redis"`
|
||||
LDAP []uint16 `yaml:"ldap" json:"ldap"`
|
||||
DIAMETER []uint16 `yaml:"diameter" json:"diameter"`
|
||||
}
|
||||
|
||||
type SecurityContextConfig struct {
|
||||
Privileged bool `yaml:"privileged" json:"privileged" default:"true"`
|
||||
AppArmorProfile AppArmorProfileConfig `yaml:"appArmorProfile" json:"appArmorProfile"`
|
||||
SeLinuxOptions SeLinuxOptionsConfig `yaml:"seLinuxOptions" json:"seLinuxOptions"`
|
||||
Capabilities CapabilitiesConfig `yaml:"capabilities" json:"capabilities"`
|
||||
}
|
||||
|
||||
type AppArmorProfileConfig struct {
|
||||
Type string `yaml:"type" json:"type"`
|
||||
LocalhostProfile string `yaml:"localhostProfile" json:"localhostProfile"`
|
||||
}
|
||||
|
||||
type SeLinuxOptionsConfig struct {
|
||||
Level string `yaml:"level" json:"level"`
|
||||
Role string `yaml:"role" json:"role"`
|
||||
Type string `yaml:"type" json:"type"`
|
||||
User string `yaml:"user" json:"user"`
|
||||
}
|
||||
|
||||
type TapConfig struct {
|
||||
Docker DockerConfig `yaml:"docker" json:"docker"`
|
||||
Proxy ProxyConfig `yaml:"proxy" json:"proxy"`
|
||||
PodRegexStr string `yaml:"regex" json:"regex" default:".*"`
|
||||
Namespaces []string `yaml:"namespaces" json:"namespaces" default:"[]"`
|
||||
ExcludedNamespaces []string `yaml:"excludedNamespaces" json:"excludedNamespaces" default:"[]"`
|
||||
BpfOverride string `yaml:"bpfOverride" json:"bpfOverride" default:""`
|
||||
Stopped bool `yaml:"stopped" json:"stopped" default:"false"`
|
||||
Release ReleaseConfig `yaml:"release" json:"release"`
|
||||
PersistentStorage bool `yaml:"persistentStorage" json:"persistentStorage" default:"false"`
|
||||
PersistentStorageStatic bool `yaml:"persistentStorageStatic" json:"persistentStorageStatic" default:"false"`
|
||||
EfsFileSytemIdAndPath string `yaml:"efsFileSytemIdAndPath" json:"efsFileSytemIdAndPath" default:""`
|
||||
StorageLimit string `yaml:"storageLimit" json:"storageLimit" default:"5000Mi"`
|
||||
StorageClass string `yaml:"storageClass" json:"storageClass" default:"standard"`
|
||||
DryRun bool `yaml:"dryRun" json:"dryRun" default:"false"`
|
||||
Resources ResourcesConfig `yaml:"resources" json:"resources"`
|
||||
ServiceMesh bool `yaml:"serviceMesh" json:"serviceMesh" default:"true"`
|
||||
Tls bool `yaml:"tls" json:"tls" default:"true"`
|
||||
DisableTlsLog bool `yaml:"disableTlsLog" json:"disableTlsLog" default:"true"`
|
||||
PacketCapture string `yaml:"packetCapture" json:"packetCapture" default:"best"`
|
||||
IgnoreTainted bool `yaml:"ignoreTainted" json:"ignoreTainted" default:"false"`
|
||||
Labels map[string]string `yaml:"labels" json:"labels" default:"{}"`
|
||||
Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"`
|
||||
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeSelectorTerms" json:"nodeSelectorTerms" default:"[]"`
|
||||
Auth AuthConfig `yaml:"auth" json:"auth"`
|
||||
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
|
||||
IPv6 bool `yaml:"ipv6" json:"ipv6" default:"true"`
|
||||
Debug bool `yaml:"debug" json:"debug" default:"false"`
|
||||
Telemetry TelemetryConfig `yaml:"telemetry" json:"telemetry"`
|
||||
ResourceGuard ResourceGuardConfig `yaml:"resourceGuard" json:"resourceGuard"`
|
||||
Sentry SentryConfig `yaml:"sentry" json:"sentry"`
|
||||
DefaultFilter string `yaml:"defaultFilter" json:"defaultFilter" default:"!dns and !error"`
|
||||
ScriptingDisabled bool `yaml:"scriptingDisabled" json:"scriptingDisabled" default:"false"`
|
||||
TargetedPodsUpdateDisabled bool `yaml:"targetedPodsUpdateDisabled" json:"targetedPodsUpdateDisabled" default:"false"`
|
||||
PresetFiltersChangingEnabled bool `yaml:"presetFiltersChangingEnabled" json:"presetFiltersChangingEnabled" default:"true"`
|
||||
RecordingDisabled bool `yaml:"recordingDisabled" json:"recordingDisabled" default:"false"`
|
||||
StopTrafficCapturingDisabled bool `yaml:"stopTrafficCapturingDisabled" json:"stopTrafficCapturingDisabled" default:"false"`
|
||||
Capabilities CapabilitiesConfig `yaml:"capabilities" json:"capabilities"`
|
||||
GlobalFilter string `yaml:"globalFilter" json:"globalFilter" default:""`
|
||||
EnabledDissectors []string `yaml:"enabledDissectors" json:"enabledDissectors"`
|
||||
Metrics MetricsConfig `yaml:"metrics" json:"metrics"`
|
||||
Pprof PprofConfig `yaml:"pprof" json:"pprof"`
|
||||
Misc MiscConfig `yaml:"misc" json:"misc"`
|
||||
Docker DockerConfig `yaml:"docker" json:"docker"`
|
||||
Proxy ProxyConfig `yaml:"proxy" json:"proxy"`
|
||||
PodRegexStr string `yaml:"regex" json:"regex" default:".*"`
|
||||
Namespaces []string `yaml:"namespaces" json:"namespaces" default:"[]"`
|
||||
ExcludedNamespaces []string `yaml:"excludedNamespaces" json:"excludedNamespaces" default:"[]"`
|
||||
BpfOverride string `yaml:"bpfOverride" json:"bpfOverride" default:""`
|
||||
Stopped bool `yaml:"stopped" json:"stopped" default:"false"`
|
||||
Release ReleaseConfig `yaml:"release" json:"release"`
|
||||
PersistentStorage bool `yaml:"persistentStorage" json:"persistentStorage" default:"false"`
|
||||
PersistentStorageStatic bool `yaml:"persistentStorageStatic" json:"persistentStorageStatic" default:"false"`
|
||||
EfsFileSytemIdAndPath string `yaml:"efsFileSytemIdAndPath" json:"efsFileSytemIdAndPath" default:""`
|
||||
StorageLimit string `yaml:"storageLimit" json:"storageLimit" default:"5000Mi"`
|
||||
StorageClass string `yaml:"storageClass" json:"storageClass" default:"standard"`
|
||||
DryRun bool `yaml:"dryRun" json:"dryRun" default:"false"`
|
||||
DnsConfig DnsConfig `yaml:"dns" json:"dns"`
|
||||
Resources ResourcesConfig `yaml:"resources" json:"resources"`
|
||||
Probes ProbesConfig `yaml:"probes" json:"probes"`
|
||||
ServiceMesh bool `yaml:"serviceMesh" json:"serviceMesh" default:"true"`
|
||||
Tls bool `yaml:"tls" json:"tls" default:"true"`
|
||||
DisableTlsLog bool `yaml:"disableTlsLog" json:"disableTlsLog" default:"true"`
|
||||
PacketCapture string `yaml:"packetCapture" json:"packetCapture" default:"best"`
|
||||
IgnoreTainted bool `yaml:"ignoreTainted" json:"ignoreTainted" default:"false"`
|
||||
Labels map[string]string `yaml:"labels" json:"labels" default:"{}"`
|
||||
Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"`
|
||||
NodeSelectorTerms NodeSelectorTermsConfig `yaml:"nodeSelectorTerms" json:"nodeSelectorTerms" default:"{}"`
|
||||
Auth AuthConfig `yaml:"auth" json:"auth"`
|
||||
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
|
||||
IPv6 bool `yaml:"ipv6" json:"ipv6" default:"true"`
|
||||
Debug bool `yaml:"debug" json:"debug" default:"false"`
|
||||
Telemetry TelemetryConfig `yaml:"telemetry" json:"telemetry"`
|
||||
ResourceGuard ResourceGuardConfig `yaml:"resourceGuard" json:"resourceGuard"`
|
||||
Sentry SentryConfig `yaml:"sentry" json:"sentry"`
|
||||
DefaultFilter string `yaml:"defaultFilter" json:"defaultFilter" default:"!dns and !error"`
|
||||
LiveConfigMapChangesDisabled bool `yaml:"liveConfigMapChangesDisabled" json:"liveConfigMapChangesDisabled" default:"false"`
|
||||
GlobalFilter string `yaml:"globalFilter" json:"globalFilter" default:""`
|
||||
EnabledDissectors []string `yaml:"enabledDissectors" json:"enabledDissectors"`
|
||||
PortMapping PortMapping `yaml:"portMapping" json:"portMapping"`
|
||||
CustomMacros map[string]string `yaml:"customMacros" json:"customMacros" default:"{\"https\":\"tls and (http or http2)\"}"`
|
||||
Metrics MetricsConfig `yaml:"metrics" json:"metrics"`
|
||||
Pprof PprofConfig `yaml:"pprof" json:"pprof"`
|
||||
Misc MiscConfig `yaml:"misc" json:"misc"`
|
||||
SecurityContext SecurityContextConfig `yaml:"securityContext" json:"securityContext"`
|
||||
}
|
||||
|
||||
func (config *TapConfig) PodRegex() *regexp.Regexp {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v2
|
||||
name: kubeshark
|
||||
version: "52.3.92"
|
||||
version: "52.4"
|
||||
description: The API Traffic Analyzer for Kubernetes
|
||||
home: https://kubeshark.co
|
||||
keywords:
|
||||
|
||||
@@ -131,7 +131,7 @@ Example for overriding image names:
|
||||
| `tap.docker.overrideImage` | Can be used to directly override image names | `""` |
|
||||
| `tap.docker.overrideTag` | Can be used to override image tags | `""` |
|
||||
| `tap.proxy.hub.srvPort` | Hub server port. Change if already occupied. | `8898` |
|
||||
| `tap.proxy.worker.srvPort` | Worker server port. Change if already occupied.| `30001` |
|
||||
| `tap.proxy.worker.srvPort` | Worker server port. Change if already occupied.| `48999` |
|
||||
| `tap.proxy.front.port` | Front service port. Change if already occupied.| `8899` |
|
||||
| `tap.proxy.host` | Change to 0.0.0.0 top open up to the world. | `127.0.0.1` |
|
||||
| `tap.regex` | Target (process traffic from) pods that match regex | `.*` |
|
||||
@@ -148,6 +148,9 @@ Example for overriding image names:
|
||||
| `tap.storageLimit` | Limit of either the `emptyDir` or `persistentVolumeClaim` | `500Mi` |
|
||||
| `tap.storageClass` | Storage class of the `PersistentVolumeClaim` | `standard` |
|
||||
| `tap.dryRun` | Preview of all pods matching the regex, without tapping them | `false` |
|
||||
| `tap.dnsConfig.nameservers` | Nameservers to use for DNS resolution | `[]` |
|
||||
| `tap.dnsConfig.searches` | Search domains to use for DNS resolution | `[]` |
|
||||
| `tap.dnsConfig.options` | DNS options to use for DNS resolution | `[]` |
|
||||
| `tap.resources.hub.limits.cpu` | CPU limit for hub | `""` (no limit) |
|
||||
| `tap.resources.hub.limits.memory` | Memory limit for hub | `5Gi` |
|
||||
| `tap.resources.hub.requests.cpu` | CPU request for hub | `50m` |
|
||||
@@ -160,13 +163,23 @@ Example for overriding image names:
|
||||
| `tap.resources.tracer.limits.memory` | Memory limit for tracer | `3Gi` |
|
||||
| `tap.resources.tracer.requests.cpu` | CPU request for tracer | `50m` |
|
||||
| `tap.resources.tracer.requests.memory` | Memory request for tracer | `50Mi` |
|
||||
| `tap.probes.hub.initialDelaySeconds` | Initial delay before probing the hub | `15` |
|
||||
| `tap.probes.hub.periodSeconds` | Period between probes for the hub | `10` |
|
||||
| `tap.probes.hub.successThreshold` | Number of successful probes before considering the hub healthy | `1` |
|
||||
| `tap.probes.hub.failureThreshold` | Number of failed probes before considering the hub unhealthy | `3` |
|
||||
| `tap.probes.sniffer.initialDelaySeconds` | Initial delay before probing the sniffer | `15` |
|
||||
| `tap.probes.sniffer.periodSeconds` | Period between probes for the sniffer | `10` |
|
||||
| `tap.probes.sniffer.successThreshold` | Number of successful probes before considering the sniffer healthy | `1` |
|
||||
| `tap.probes.sniffer.failureThreshold` | Number of failed probes before considering the sniffer unhealthy | `3` |
|
||||
| `tap.serviceMesh` | Capture traffic from service meshes like Istio, Linkerd, Consul, etc. | `true` |
|
||||
| `tap.tls` | Capture the encrypted/TLS traffic from cryptography libraries like OpenSSL | `true` |
|
||||
| `tap.disableTlsLog` | Suppress logging for TLS/eBPF | `true` |
|
||||
| `tap.ignoreTainted` | Whether to ignore tainted nodes | `false` |
|
||||
| `tap.labels` | Kubernetes labels to apply to all Kubeshark resources | `{}` |
|
||||
| `tap.annotations` | Kubernetes annotations to apply to all Kubeshark resources | `{}` |
|
||||
| `tap.nodeSelectorTerms` | Node selector terms | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.nodeSelectorTerms.Workers` | Node selector terms for workers components | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.nodeSelectorTerms.Hub` | Node selector terms for hub component | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.nodeSelectorTerms.Front` | Node selector terms for front-end component | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.auth.enabled` | Enable authentication | `false` |
|
||||
| `tap.auth.type` | Authentication type (1 option available: `saml`) | `saml` |
|
||||
| `tap.auth.approvedEmails` | List of approved email addresses for authentication | `[]` |
|
||||
@@ -175,7 +188,7 @@ Example for overriding image names:
|
||||
| `tap.auth.saml.x509crt` | A self-signed X.509 `.cert` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.x509key` | A self-signed X.509 `.key` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.roleAttribute` | A SAML attribute name corresponding to user's authorization role <br/>(effective, if `tap.auth.type = saml`) | `role` |
|
||||
| `tap.auth.saml.roles` | A list of SAML authorization roles and their permissions <br/>(effective, if `tap.auth.type = saml`) | `{"admin":{"canDownloadPCAP":true,"canUpdateTargetedPods":true,"canUseScripting":true, "canStopTrafficCapturing":true, "filter":"","showAdminConsoleLink":true}}` |
|
||||
| `tap.auth.saml.roles` | A list of SAML authorization roles and their permissions <br/>(effective, if `tap.auth.type = saml`) | `{"admin":{"canDownloadPCAP":true,"canUpdateTargetedPods":true,"canUseScripting":true, "scriptingPermissions":{"canSave":true, "canActivate":true, "canDelete":true}, "canStopTrafficCapturing":true, "filter":"","showAdminConsoleLink":true}}` |
|
||||
| `tap.ingress.enabled` | Enable `Ingress` | `false` |
|
||||
| `tap.ingress.className` | Ingress class name | `""` |
|
||||
| `tap.ingress.host` | Host of the `Ingress` | `ks.svc.cluster.local` |
|
||||
@@ -188,6 +201,7 @@ Example for overriding image names:
|
||||
| `tap.sentry.enabled` | Enable sending of error logs to Sentry | `false` |
|
||||
| `tap.sentry.environment` | Sentry environment to label error logs with | `production` |
|
||||
| `tap.defaultFilter` | Sets the default dashboard KFL filter (e.g. `http`). By default, this value is set to filter out noisy protocols such as DNS, UDP, ICMP and TCP. The user can easily change this, **temporarily**, in the Dashboard. For a permanent change, you should change this value in the `values.yaml` or `config.yaml` file. | `"!dns and !error"` |
|
||||
| `tap.liveConfigMapChangesDisabled` | If set to `true`, all user functionality (scripting, targeting settings, global & default KFL modification, traffic recording, traffic capturing on/off, protocol dissectors) involving dynamic ConfigMap changes from UI will be disabled | `false` |
|
||||
| `tap.globalFilter` | Prepends to any KFL filter and can be used to limit what is visible in the dashboard. For example, `redact("request.headers.Authorization")` will redact the appropriate field. Another example `!dns` will not show any DNS traffic. | `""` |
|
||||
| `tap.metrics.port` | Pod port used to expose Prometheus metrics | `49100` |
|
||||
| `tap.enabledDissectors` | This is an array of strings representing the list of supported protocols. Remove or comment out redundant protocols (e.g., dns).| The default list excludes: `udp` and `tcp` |
|
||||
@@ -206,7 +220,6 @@ Example for overriding image names:
|
||||
| `timezone` | IANA time zone applied to time shown in the front-end | `""` (local time zone applies) |
|
||||
| `supportChatEnabled` | Enable real-time support chat channel based on Intercom | `true` |
|
||||
| `internetConnectivity` | Turns off API requests that are dependant on Internet connectivity such as `telemetry` and `online-support`. | `true` |
|
||||
| `dissectorsUpdatingEnabled` | Turns off UI for enabling/disabling dissectors | `true` |
|
||||
|
||||
KernelMapping pairs kernel versions with a
|
||||
DriverContainer image. Kernel versions can be matched
|
||||
|
||||
@@ -31,9 +31,8 @@ spec:
|
||||
- ./hub
|
||||
- -port
|
||||
- "8080"
|
||||
{{- if .Values.tap.debug }}
|
||||
- -debug
|
||||
{{- end }}
|
||||
- -loglevel
|
||||
- '{{ .Values.logLevel | default "warning" }}'
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
@@ -59,24 +58,18 @@ spec:
|
||||
image: '{{ .Values.tap.docker.registry }}/hub:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: {{ .Values.tap.probes.hub.periodSeconds }}
|
||||
failureThreshold: {{ .Values.tap.probes.hub.failureThreshold }}
|
||||
successThreshold: {{ .Values.tap.probes.hub.successThreshold }}
|
||||
initialDelaySeconds: {{ .Values.tap.probes.hub.initialDelaySeconds }}
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: {{ .Values.tap.probes.hub.periodSeconds }}
|
||||
failureThreshold: {{ .Values.tap.probes.hub.failureThreshold }}
|
||||
successThreshold: {{ .Values.tap.probes.hub.successThreshold }}
|
||||
initialDelaySeconds: {{ .Values.tap.probes.hub.initialDelaySeconds }}
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
resources:
|
||||
@@ -98,6 +91,43 @@ spec:
|
||||
- name: saml-x509-volume
|
||||
mountPath: "/etc/saml/x509"
|
||||
readOnly: true
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if gt (len .Values.tap.nodeSelectorTerms.hub) 0}}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
{{- toYaml .Values.tap.nodeSelectorTerms.hub | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.tap.dns.nameservers .Values.tap.dns.searches .Values.tap.dns.options }}
|
||||
dnsConfig:
|
||||
{{- if .Values.tap.dns.nameservers }}
|
||||
nameservers:
|
||||
{{- range .Values.tap.dns.nameservers }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.searches }}
|
||||
searches:
|
||||
{{- range .Values.tap.dns.searches }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.options }}
|
||||
options:
|
||||
{{- range .Values.tap.dns.options }}
|
||||
- name: {{ .name | quote }}
|
||||
{{- if .value }}
|
||||
value: {{ .value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: saml-x509-volume
|
||||
projected:
|
||||
|
||||
@@ -37,20 +37,20 @@ spec:
|
||||
- name: REACT_APP_TIMEZONE
|
||||
value: '{{ not (eq .Values.timezone "") | ternary .Values.timezone " " }}'
|
||||
- name: REACT_APP_SCRIPTING_DISABLED
|
||||
value: '{{ .Values.tap.scriptingDisabled }}'
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled }}'
|
||||
- name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED
|
||||
value: '{{ .Values.tap.targetedPodsUpdateDisabled }}'
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled }}'
|
||||
- name: REACT_APP_PRESET_FILTERS_CHANGING_ENABLED
|
||||
value: '{{ .Values.tap.presetFiltersChangingEnabled }}'
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
- name: REACT_APP_BPF_OVERRIDE_DISABLED
|
||||
value: '{{ eq .Values.tap.packetCapture "ebpf" | ternary "true" "false" }}'
|
||||
- name: REACT_APP_RECORDING_DISABLED
|
||||
value: '{{ .Values.tap.recordingDisabled }}'
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled }}'
|
||||
- name: REACT_APP_STOP_TRAFFIC_CAPTURING_DISABLED
|
||||
value: '{{- if and .Values.tap.stopTrafficCapturingDisabled .Values.tap.stopped -}}
|
||||
value: '{{- if and .Values.tap.liveConfigMapChangesDisabled .Values.tap.stopped -}}
|
||||
false
|
||||
{{- else -}}
|
||||
{{ .Values.tap.stopTrafficCapturingDisabled | ternary "true" "false" }}
|
||||
{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "false" }}
|
||||
{{- end -}}'
|
||||
- name: 'REACT_APP_CLOUD_LICENSE_ENABLED'
|
||||
value: '{{- if or (and .Values.cloudLicenseEnabled (not (empty .Values.license))) (not .Values.internetConnectivity) -}}
|
||||
@@ -61,7 +61,7 @@ spec:
|
||||
- name: REACT_APP_SUPPORT_CHAT_ENABLED
|
||||
value: '{{ and .Values.supportChatEnabled .Values.internetConnectivity | ternary "true" "false" }}'
|
||||
- name: REACT_APP_DISSECTORS_UPDATING_ENABLED
|
||||
value: '{{ .Values.dissectorsUpdatingEnabled | ternary "true" "false" }}'
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
- name: REACT_APP_SENTRY_ENABLED
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: REACT_APP_SENTRY_ENVIRONMENT
|
||||
@@ -74,12 +74,6 @@ spec:
|
||||
image: '{{ .Values.tap.docker.registry }}/front:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
name: kubeshark-front
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
@@ -108,6 +102,43 @@ spec:
|
||||
mountPath: /etc/nginx/conf.d/default.conf
|
||||
subPath: default.conf
|
||||
readOnly: true
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if gt (len .Values.tap.nodeSelectorTerms.front) 0}}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
{{- toYaml .Values.tap.nodeSelectorTerms.front | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.tap.dns.nameservers .Values.tap.dns.searches .Values.tap.dns.options }}
|
||||
dnsConfig:
|
||||
{{- if .Values.tap.dns.nameservers }}
|
||||
nameservers:
|
||||
{{- range .Values.tap.dns.nameservers }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.searches }}
|
||||
searches:
|
||||
{{- range .Values.tap.dns.searches }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.options }}
|
||||
options:
|
||||
{{- range .Values.tap.dns.options }}
|
||||
- name: {{ .name | quote }}
|
||||
{{- if .value }}
|
||||
value: {{ .value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: nginx-config
|
||||
configMap:
|
||||
|
||||
@@ -25,6 +25,26 @@ spec:
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: kubeshark
|
||||
spec:
|
||||
{{- if .Values.tap.mountBpf }}
|
||||
initContainers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mkdir -p /sys/fs/bpf && mount | grep -q '/sys/fs/bpf' || mount -t bpf bpf /sys/fs/bpf
|
||||
{{- if .Values.tap.docker.overrideTag.worker }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
name: mount-bpf
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /sys
|
||||
name: sys
|
||||
mountPropagation: Bidirectional
|
||||
{{- end }}
|
||||
containers:
|
||||
- command:
|
||||
- ./worker
|
||||
@@ -36,17 +56,16 @@ spec:
|
||||
- '{{ .Values.tap.metrics.port }}'
|
||||
- -packet-capture
|
||||
- '{{ .Values.tap.packetCapture }}'
|
||||
{{- if .Values.tap.tls }}
|
||||
- -unixsocket
|
||||
- -loglevel
|
||||
- '{{ .Values.logLevel | default "warning" }}'
|
||||
{{- if not .Values.tap.tls }}
|
||||
- -disable-tracer
|
||||
{{- end }}
|
||||
{{- if .Values.tap.serviceMesh }}
|
||||
- -servicemesh
|
||||
{{- end }}
|
||||
- -procfs
|
||||
- /hostproc
|
||||
{{- if ne .Values.tap.packetCapture "ebpf" }}
|
||||
- -disable-ebpf
|
||||
{{- end }}
|
||||
{{- if .Values.tap.resourceGuard.enabled }}
|
||||
- -enable-resource-guard
|
||||
{{- end }}
|
||||
@@ -54,9 +73,6 @@ spec:
|
||||
- '{{ .Values.tap.misc.resolutionStrategy }}'
|
||||
- -staletimeout
|
||||
- '{{ .Values.tap.misc.staleTimeoutSeconds }}'
|
||||
{{- if .Values.tap.debug }}
|
||||
- -debug
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideImage.worker }}
|
||||
image: '{{ .Values.tap.docker.overrideImage.worker }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.worker }}
|
||||
@@ -113,30 +129,64 @@ spec:
|
||||
memory: {{ .Values.tap.resources.sniffer.requests.memory }}
|
||||
{{ end }}
|
||||
securityContext:
|
||||
privileged: {{ .Values.tap.securityContext.privileged }}
|
||||
{{- if not .Values.tap.securityContext.privileged }}
|
||||
{{- $aaProfile := .Values.tap.securityContext.appArmorProfile }}
|
||||
{{- $selinuxOpts := .Values.tap.securityContext.seLinuxOptions }}
|
||||
{{- if or (ne $aaProfile.type "") (ne $aaProfile.localhostProfile "") }}
|
||||
appArmorProfile:
|
||||
{{- if ne $aaProfile.type "" }}
|
||||
type: {{ $aaProfile.type }}
|
||||
{{- end }}
|
||||
{{- if ne $aaProfile.localhostProfile "" }}
|
||||
localhostProfile: {{ $aaProfile.localhostProfile }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or (ne $selinuxOpts.level "") (ne $selinuxOpts.role "") (ne $selinuxOpts.type "") (ne $selinuxOpts.user "") }}
|
||||
seLinuxOptions:
|
||||
{{- if ne $selinuxOpts.level "" }}
|
||||
level: {{ $selinuxOpts.level }}
|
||||
{{- end }}
|
||||
{{- if ne $selinuxOpts.role "" }}
|
||||
role: {{ $selinuxOpts.role }}
|
||||
{{- end }}
|
||||
{{- if ne $selinuxOpts.type "" }}
|
||||
type: {{ $selinuxOpts.type }}
|
||||
{{- end }}
|
||||
{{- if ne $selinuxOpts.user "" }}
|
||||
user: {{ $selinuxOpts.user }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
capabilities:
|
||||
add:
|
||||
{{- range .Values.tap.capabilities.networkCapture }}
|
||||
{{- range .Values.tap.securityContext.capabilities.networkCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.serviceMesh }}
|
||||
{{- range .Values.tap.capabilities.serviceMeshCapture }}
|
||||
{{- range .Values.tap.securityContext.capabilities.serviceMeshCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.securityContext.capabilities.ebpfCapture }}
|
||||
{{- range .Values.tap.securityContext.capabilities.ebpfCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
drop:
|
||||
- ALL
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: {{ .Values.tap.probes.sniffer.periodSeconds }}
|
||||
failureThreshold: {{ .Values.tap.probes.sniffer.failureThreshold }}
|
||||
successThreshold: {{ .Values.tap.probes.sniffer.successThreshold }}
|
||||
initialDelaySeconds: {{ .Values.tap.probes.sniffer.initialDelaySeconds }}
|
||||
tcpSocket:
|
||||
port: {{ .Values.tap.proxy.worker.srvPort }}
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: {{ .Values.tap.probes.sniffer.periodSeconds }}
|
||||
failureThreshold: {{ .Values.tap.probes.sniffer.failureThreshold }}
|
||||
successThreshold: {{ .Values.tap.probes.sniffer.successThreshold }}
|
||||
initialDelaySeconds: {{ .Values.tap.probes.sniffer.initialDelaySeconds }}
|
||||
tcpSocket:
|
||||
port: {{ .Values.tap.proxy.worker.srvPort }}
|
||||
volumeMounts:
|
||||
@@ -146,6 +196,7 @@ spec:
|
||||
- mountPath: /sys
|
||||
name: sys
|
||||
readOnly: true
|
||||
mountPropagation: HostToContainer
|
||||
- mountPath: /app/data
|
||||
name: data
|
||||
{{- if .Values.tap.tls }}
|
||||
@@ -153,12 +204,6 @@ spec:
|
||||
- ./tracer
|
||||
- -procfs
|
||||
- /hostproc
|
||||
{{- if ne .Values.tap.packetCapture "ebpf" }}
|
||||
- -disable-ebpf
|
||||
{{- end }}
|
||||
{{- if .Values.tap.debug }}
|
||||
- -debug
|
||||
{{- end }}
|
||||
{{- if .Values.tap.disableTlsLog }}
|
||||
- -disable-tls-log
|
||||
{{- end }}
|
||||
@@ -166,18 +211,14 @@ spec:
|
||||
- -port
|
||||
- '{{ add .Values.tap.proxy.worker.srvPort 1 }}'
|
||||
{{- end }}
|
||||
- -loglevel
|
||||
- '{{ .Values.logLevel | default "warning" }}'
|
||||
{{- if .Values.tap.docker.overrideTag.worker }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
name: tracer
|
||||
env:
|
||||
- name: POD_NAME
|
||||
@@ -210,16 +251,45 @@ spec:
|
||||
memory: {{ .Values.tap.resources.tracer.requests.memory }}
|
||||
{{ end }}
|
||||
securityContext:
|
||||
privileged: {{ .Values.tap.securityContext.privileged }}
|
||||
{{- if not .Values.tap.securityContext.privileged }}
|
||||
{{- $aaProfile := .Values.tap.securityContext.appArmorProfile }}
|
||||
{{- $selinuxOpts := .Values.tap.securityContext.seLinuxOptions }}
|
||||
{{- if or (ne $aaProfile.type "") (ne $aaProfile.localhostProfile "") }}
|
||||
appArmorProfile:
|
||||
{{- if ne $aaProfile.type "" }}
|
||||
type: {{ $aaProfile.type }}
|
||||
{{- end }}
|
||||
{{- if ne $aaProfile.localhostProfile "" }}
|
||||
localhostProfile: {{ $aaProfile.localhostProfile }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or (ne $selinuxOpts.level "") (ne $selinuxOpts.role "") (ne $selinuxOpts.type "") (ne $selinuxOpts.user "") }}
|
||||
seLinuxOptions:
|
||||
{{- if ne $selinuxOpts.level "" }}
|
||||
level: {{ $selinuxOpts.level }}
|
||||
{{- end }}
|
||||
{{- if ne $selinuxOpts.role "" }}
|
||||
role: {{ $selinuxOpts.role }}
|
||||
{{- end }}
|
||||
{{- if ne $selinuxOpts.type "" }}
|
||||
type: {{ $selinuxOpts.type }}
|
||||
{{- end }}
|
||||
{{- if ne $selinuxOpts.user "" }}
|
||||
user: {{ $selinuxOpts.user }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
capabilities:
|
||||
add:
|
||||
{{- range .Values.tap.capabilities.ebpfCapture }}
|
||||
{{- range .Values.tap.securityContext.capabilities.ebpfCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
{{- range .Values.tap.capabilities.networkCapture }}
|
||||
{{- range .Values.tap.securityContext.capabilities.networkCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
drop:
|
||||
- ALL
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /hostproc
|
||||
name: proc
|
||||
@@ -227,6 +297,7 @@ spec:
|
||||
- mountPath: /sys
|
||||
name: sys
|
||||
readOnly: true
|
||||
mountPropagation: HostToContainer
|
||||
- mountPath: /app/data
|
||||
name: data
|
||||
- mountPath: /etc/os-release
|
||||
@@ -248,13 +319,43 @@ spec:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
{{- end }}
|
||||
{{- if gt (len .Values.tap.nodeSelectorTerms) 0}}
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if gt (len .Values.tap.nodeSelectorTerms.workers) 0}}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
{{- toYaml .Values.tap.nodeSelectorTerms | nindent 12 }}
|
||||
{{- toYaml .Values.tap.nodeSelectorTerms.workers | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.tap.dns.nameservers .Values.tap.dns.searches .Values.tap.dns.options }}
|
||||
dnsConfig:
|
||||
{{- if .Values.tap.dns.nameservers }}
|
||||
nameservers:
|
||||
{{- range .Values.tap.dns.nameservers }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.searches }}
|
||||
searches:
|
||||
{{- range .Values.tap.dns.searches }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.options }}
|
||||
options:
|
||||
{{- range .Values.tap.dns.options }}
|
||||
- name: {{ .name | quote }}
|
||||
{{- if .value }}
|
||||
value: {{ .value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /proc
|
||||
|
||||
@@ -27,14 +27,14 @@ data:
|
||||
AUTH_SAML_ROLE_ATTRIBUTE: '{{ .Values.tap.auth.saml.roleAttribute }}'
|
||||
AUTH_SAML_ROLES: '{{ .Values.tap.auth.saml.roles | toJson }}'
|
||||
TELEMETRY_DISABLED: '{{ not .Values.internetConnectivity | ternary "true" (not .Values.tap.telemetry.enabled | ternary "true" "false") }}'
|
||||
SCRIPTING_DISABLED: '{{ .Values.tap.scriptingDisabled | ternary "true" "" }}'
|
||||
TARGETED_PODS_UPDATE_DISABLED: '{{ .Values.tap.targetedPodsUpdateDisabled | ternary "true" "" }}'
|
||||
PRESET_FILTERS_CHANGING_ENABLED: '{{ .Values.tap.presetFiltersChangingEnabled | ternary "true" "" }}'
|
||||
RECORDING_DISABLED: '{{ .Values.tap.recordingDisabled | ternary "true" "" }}'
|
||||
STOP_TRAFFIC_CAPTURING_DISABLED: '{{- if and .Values.tap.stopTrafficCapturingDisabled .Values.tap.stopped -}}
|
||||
SCRIPTING_DISABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "" }}'
|
||||
TARGETED_PODS_UPDATE_DISABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "" }}'
|
||||
PRESET_FILTERS_CHANGING_ENABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
RECORDING_DISABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "" }}'
|
||||
STOP_TRAFFIC_CAPTURING_DISABLED: '{{- if and .Values.tap.liveConfigMapChangesDisabled .Values.tap.stopped -}}
|
||||
false
|
||||
{{- else -}}
|
||||
{{ .Values.tap.stopTrafficCapturingDisabled | ternary "true" "false" }}
|
||||
{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "false" }}
|
||||
{{- end }}'
|
||||
GLOBAL_FILTER: {{ include "kubeshark.escapeDoubleQuotes" .Values.tap.globalFilter | quote }}
|
||||
DEFAULT_FILTER: {{ include "kubeshark.escapeDoubleQuotes" .Values.tap.defaultFilter | quote }}
|
||||
@@ -50,10 +50,11 @@ data:
|
||||
{{- end }}'
|
||||
DUPLICATE_TIMEFRAME: '{{ .Values.tap.misc.duplicateTimeframe }}'
|
||||
ENABLED_DISSECTORS: '{{ gt (len .Values.tap.enabledDissectors) 0 | ternary (join "," .Values.tap.enabledDissectors) "" }}'
|
||||
DISSECTORS_UPDATING_ENABLED: '{{ .Values.dissectorsUpdatingEnabled | ternary "true" "false" }}'
|
||||
CUSTOM_MACROS: '{{ toJson .Values.tap.customMacros }}'
|
||||
DISSECTORS_UPDATING_ENABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
DETECT_DUPLICATES: '{{ .Values.tap.misc.detectDuplicates | ternary "true" "false" }}'
|
||||
PCAP_DUMP_ENABLE: '{{ .Values.pcapdump.enabled }}'
|
||||
PCAP_TIME_INTERVAL: '{{ .Values.pcapdump.timeInterval }}'
|
||||
PCAP_MAX_TIME: '{{ .Values.pcapdump.maxTime }}'
|
||||
PCAP_MAX_SIZE: '{{ .Values.pcapdump.maxSize }}'
|
||||
PCAP_SRC_DIR: '{{ .Values.pcapdump.pcapSrcDir }}'
|
||||
PORT_MAPPING: '{{ toJson .Values.tap.portMapping }}'
|
||||
|
||||
23
helm-chart/templates/16-hub-service-metrics.yaml
Normal file
23
helm-chart/templates/16-hub-service-metrics.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/port: '9100'
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-hub-metrics
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.co/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
ports:
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: 9100
|
||||
targetPort: 9100
|
||||
@@ -20,6 +20,9 @@ spec:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 9100
|
||||
egress:
|
||||
- {}
|
||||
---
|
||||
@@ -2,26 +2,36 @@ Thank you for installing {{ title .Chart.Name }}.
|
||||
|
||||
Registry: {{ .Values.tap.docker.registry }}
|
||||
Tag: {{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (printf "v%s" .Chart.Version) }}
|
||||
|
||||
{{- if .Values.tap.docker.overrideTag.worker }}
|
||||
Overridden worker tag: {{ .Values.tap.docker.overrideTag.worker }}
|
||||
{{ end }}
|
||||
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideTag.hub }}
|
||||
Overridden hub tag: {{ .Values.tap.docker.overrideTag.hub }}
|
||||
{{ end }}
|
||||
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideTag.front }}
|
||||
Overridden front tag: {{ .Values.tap.docker.overrideTag.front }}
|
||||
{{ end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideImage.worker }}
|
||||
Overridden worker image: {{ .Values.tap.docker.overrideImage.worker }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideImage.hub }}
|
||||
Overridden hub image: {{ .Values.tap.docker.overrideImage.hub }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideImage.front }}
|
||||
Overridden front image: {{ .Values.tap.docker.overrideImage.front }}
|
||||
{{- end }}
|
||||
|
||||
Your deployment has been successful. The release is named `{{ .Release.Name }}` and it has been deployed in the `{{ .Release.Namespace }}` namespace.
|
||||
|
||||
{{- if .Values.tap.telemetry.enabled }}
|
||||
Notice: Telemetry is enabled. Kubeshark will collect anonymous usage statistics.
|
||||
{{ end }}
|
||||
Notices:
|
||||
{{- if .Values.supportChatEnabled}}
|
||||
- Support chat using Intercom is enabled. It can be disabled using `--set supportChatEnabled=false`
|
||||
{{- end }}
|
||||
{{- if eq .Values.license ""}}
|
||||
- No license key was detected. You can get your license key from https://console.kubeshark.co/.
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.tap.ingress.enabled }}
|
||||
{{ if .Values.tap.ingress.enabled }}
|
||||
|
||||
You can now access the application through the following URL:
|
||||
http{{ if .Values.tap.ingress.tls }}s{{ end }}://{{ .Values.tap.ingress.host }}
|
||||
@@ -36,4 +46,4 @@ To access the application, follow these steps:
|
||||
2. Once port forwarding is done, you can access the application by visiting the following URL in your web browser:
|
||||
http://0.0.0.0:8899
|
||||
|
||||
{{ end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -16,7 +16,7 @@ tap:
|
||||
front: ""
|
||||
proxy:
|
||||
worker:
|
||||
srvPort: 30001
|
||||
srvPort: 48999
|
||||
hub:
|
||||
srvPort: 8898
|
||||
front:
|
||||
@@ -37,6 +37,10 @@ tap:
|
||||
storageLimit: 5000Mi
|
||||
storageClass: standard
|
||||
dryRun: false
|
||||
dns:
|
||||
nameservers: []
|
||||
searches: []
|
||||
options: []
|
||||
resources:
|
||||
hub:
|
||||
limits:
|
||||
@@ -59,6 +63,17 @@ tap:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
probes:
|
||||
hub:
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
sniffer:
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
serviceMesh: true
|
||||
tls: true
|
||||
disableTlsLog: true
|
||||
@@ -67,11 +82,24 @@ tap:
|
||||
labels: {}
|
||||
annotations: {}
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
hub:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
workers:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
front:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
auth:
|
||||
enabled: false
|
||||
type: saml
|
||||
@@ -85,6 +113,10 @@ tap:
|
||||
filter: ""
|
||||
canDownloadPCAP: true
|
||||
canUseScripting: true
|
||||
scriptingPermissions:
|
||||
canSave: true
|
||||
canActivate: true
|
||||
canDelete: true
|
||||
canUpdateTargetedPods: true
|
||||
canStopTrafficCapturing: true
|
||||
showAdminConsoleLink: true
|
||||
@@ -104,24 +136,7 @@ tap:
|
||||
enabled: false
|
||||
environment: production
|
||||
defaultFilter: "!dns and !error"
|
||||
scriptingDisabled: false
|
||||
targetedPodsUpdateDisabled: false
|
||||
presetFiltersChangingEnabled: true
|
||||
recordingDisabled: false
|
||||
stopTrafficCapturingDisabled: false
|
||||
capabilities:
|
||||
networkCapture:
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
serviceMeshCapture:
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- DAC_OVERRIDE
|
||||
ebpfCapture:
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- SYS_RESOURCE
|
||||
- IPC_LOCK
|
||||
liveConfigMapChangesDisabled: false
|
||||
globalFilter: ""
|
||||
enabledDissectors:
|
||||
- amqp
|
||||
@@ -134,6 +149,26 @@ tap:
|
||||
- syscall
|
||||
- ws
|
||||
- ldap
|
||||
- radius
|
||||
- diameter
|
||||
portMapping:
|
||||
http:
|
||||
- 80
|
||||
- 443
|
||||
- 8080
|
||||
amqp:
|
||||
- 5671
|
||||
- 5672
|
||||
kafka:
|
||||
- 9092
|
||||
redis:
|
||||
- 6379
|
||||
ldap:
|
||||
- 389
|
||||
diameter:
|
||||
- 3868
|
||||
customMacros:
|
||||
https: tls and (http or http2)
|
||||
metrics:
|
||||
port: 49100
|
||||
pprof:
|
||||
@@ -151,6 +186,29 @@ tap:
|
||||
duplicateTimeframe: 200ms
|
||||
detectDuplicates: false
|
||||
staleTimeoutSeconds: 30
|
||||
securityContext:
|
||||
privileged: true
|
||||
appArmorProfile:
|
||||
type: ""
|
||||
localhostProfile: ""
|
||||
seLinuxOptions:
|
||||
level: ""
|
||||
role: ""
|
||||
type: ""
|
||||
user: ""
|
||||
capabilities:
|
||||
networkCapture:
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
serviceMeshCapture:
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- DAC_OVERRIDE
|
||||
ebpfCapture:
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- SYS_RESOURCE
|
||||
- IPC_LOCK
|
||||
logs:
|
||||
file: ""
|
||||
grep: ""
|
||||
@@ -159,7 +217,9 @@ pcapdump:
|
||||
timeInterval: 1m
|
||||
maxTime: 1h
|
||||
maxSize: 500MB
|
||||
pcapSrcDir: pcapdump
|
||||
time: time
|
||||
debug: false
|
||||
dest: ""
|
||||
kube:
|
||||
configPath: ""
|
||||
context: ""
|
||||
@@ -169,7 +229,6 @@ license: ""
|
||||
cloudLicenseEnabled: true
|
||||
supportChatEnabled: true
|
||||
internetConnectivity: true
|
||||
dissectorsUpdatingEnabled: true
|
||||
scripting:
|
||||
env: {}
|
||||
source: ""
|
||||
@@ -178,3 +237,4 @@ scripting:
|
||||
active: []
|
||||
console: true
|
||||
timezone: ""
|
||||
logLevel: warning
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
---
|
||||
# Source: kubeshark/templates/16-network-policies.yaml
|
||||
# Source: kubeshark/templates/17-network-policies.yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-hub-network-policy
|
||||
@@ -23,18 +23,21 @@ spec:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 9100
|
||||
egress:
|
||||
- {}
|
||||
---
|
||||
# Source: kubeshark/templates/16-network-policies.yaml
|
||||
# Source: kubeshark/templates/17-network-policies.yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front-network-policy
|
||||
@@ -53,15 +56,15 @@ spec:
|
||||
egress:
|
||||
- {}
|
||||
---
|
||||
# Source: kubeshark/templates/16-network-policies.yaml
|
||||
# Source: kubeshark/templates/17-network-policies.yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-worker-network-policy
|
||||
@@ -76,7 +79,7 @@ spec:
|
||||
ingress:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 30001
|
||||
port: 48999
|
||||
- protocol: TCP
|
||||
port: 49100
|
||||
egress:
|
||||
@@ -87,10 +90,10 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-service-account
|
||||
@@ -104,10 +107,10 @@ metadata:
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
LICENSE: ''
|
||||
@@ -121,10 +124,10 @@ metadata:
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
AUTH_SAML_X509_CRT: |
|
||||
@@ -137,10 +140,10 @@ metadata:
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
AUTH_SAML_X509_KEY: |
|
||||
@@ -152,10 +155,10 @@ metadata:
|
||||
name: kubeshark-nginx-config-map
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
data:
|
||||
default.conf: |
|
||||
@@ -216,10 +219,10 @@ metadata:
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
data:
|
||||
POD_REGEX: '.*'
|
||||
@@ -236,7 +239,7 @@ data:
|
||||
AUTH_TYPE: 'oidc'
|
||||
AUTH_SAML_IDP_METADATA_URL: ''
|
||||
AUTH_SAML_ROLE_ATTRIBUTE: 'role'
|
||||
AUTH_SAML_ROLES: '{"admin":{"canDownloadPCAP":true,"canStopTrafficCapturing":true,"canUpdateTargetedPods":true,"canUseScripting":true,"filter":"","showAdminConsoleLink":true}}'
|
||||
AUTH_SAML_ROLES: '{"admin":{"canDownloadPCAP":true,"canStopTrafficCapturing":true,"canUpdateTargetedPods":true,"canUseScripting":true,"filter":"","scriptingPermissions":{"canActivate":true,"canDelete":true,"canSave":true},"showAdminConsoleLink":true}}'
|
||||
TELEMETRY_DISABLED: 'false'
|
||||
SCRIPTING_DISABLED: ''
|
||||
TARGETED_PODS_UPDATE_DISABLED: ''
|
||||
@@ -252,24 +255,25 @@ data:
|
||||
TIMEZONE: ' '
|
||||
CLOUD_LICENSE_ENABLED: 'true'
|
||||
DUPLICATE_TIMEFRAME: '200ms'
|
||||
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,sctp,syscall,ws,ldap'
|
||||
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,sctp,syscall,ws,ldap,radius,diameter'
|
||||
CUSTOM_MACROS: '{"https":"tls and (http or http2)"}'
|
||||
DISSECTORS_UPDATING_ENABLED: 'true'
|
||||
DETECT_DUPLICATES: 'false'
|
||||
PCAP_DUMP_ENABLE: 'true'
|
||||
PCAP_TIME_INTERVAL: '1m'
|
||||
PCAP_MAX_TIME: '1h'
|
||||
PCAP_MAX_SIZE: '500MB'
|
||||
PCAP_SRC_DIR: 'pcapdump'
|
||||
PORT_MAPPING: '{"amqp":[5671,5672],"diameter":[3868],"http":[80,443,8080],"kafka":[9092],"ldap":[389],"redis":[6379]}'
|
||||
---
|
||||
# Source: kubeshark/templates/02-cluster-role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-cluster-role-default
|
||||
@@ -314,10 +318,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-cluster-role-binding-default
|
||||
@@ -336,10 +340,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-self-config-role
|
||||
@@ -366,10 +370,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-self-config-role-binding
|
||||
@@ -389,10 +393,10 @@ kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-hub
|
||||
@@ -411,10 +415,10 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front
|
||||
@@ -433,10 +437,10 @@ kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
@@ -446,10 +450,10 @@ metadata:
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.co/app: worker
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
ports:
|
||||
- name: metrics
|
||||
@@ -457,6 +461,35 @@ spec:
|
||||
port: 49100
|
||||
targetPort: 49100
|
||||
---
|
||||
# Source: kubeshark/templates/16-hub-service-metrics.yaml
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/port: '9100'
|
||||
name: kubeshark-hub-metrics
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
ports:
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: 9100
|
||||
targetPort: 9100
|
||||
---
|
||||
# Source: kubeshark/templates/09-worker-daemon-set.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
@@ -464,10 +497,10 @@ metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
sidecar.istio.io/inject: "false"
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-worker-daemon-set
|
||||
@@ -482,10 +515,10 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: kubeshark
|
||||
@@ -496,21 +529,21 @@ spec:
|
||||
- -i
|
||||
- any
|
||||
- -port
|
||||
- '30001'
|
||||
- '48999'
|
||||
- -metrics-port
|
||||
- '49100'
|
||||
- -packet-capture
|
||||
- 'best'
|
||||
- -unixsocket
|
||||
- -loglevel
|
||||
- 'warning'
|
||||
- -servicemesh
|
||||
- -procfs
|
||||
- /hostproc
|
||||
- -disable-ebpf
|
||||
- -resolution-strategy
|
||||
- 'auto'
|
||||
- -staletimeout
|
||||
- '30'
|
||||
image: 'docker.io/kubeshark/worker:v52.3.92'
|
||||
image: 'docker.io/kubeshark/worker:v52.4'
|
||||
imagePullPolicy: Always
|
||||
name: sniffer
|
||||
ports:
|
||||
@@ -552,29 +585,21 @@ spec:
|
||||
memory: 50Mi
|
||||
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- DAC_OVERRIDE
|
||||
drop:
|
||||
- ALL
|
||||
privileged: true
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
periodSeconds: 10
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 5
|
||||
initialDelaySeconds: 15
|
||||
tcpSocket:
|
||||
port: 30001
|
||||
port: 48999
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
periodSeconds: 10
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 5
|
||||
initialDelaySeconds: 15
|
||||
tcpSocket:
|
||||
port: 30001
|
||||
port: 48999
|
||||
volumeMounts:
|
||||
- mountPath: /hostproc
|
||||
name: proc
|
||||
@@ -582,15 +607,17 @@ spec:
|
||||
- mountPath: /sys
|
||||
name: sys
|
||||
readOnly: true
|
||||
mountPropagation: HostToContainer
|
||||
- mountPath: /app/data
|
||||
name: data
|
||||
- command:
|
||||
- ./tracer
|
||||
- -procfs
|
||||
- /hostproc
|
||||
- -disable-ebpf
|
||||
- -disable-tls-log
|
||||
image: 'docker.io/kubeshark/worker:v52.3.92'
|
||||
- -loglevel
|
||||
- 'warning'
|
||||
image: 'docker.io/kubeshark/worker:v52.4'
|
||||
imagePullPolicy: Always
|
||||
name: tracer
|
||||
env:
|
||||
@@ -622,16 +649,7 @@ spec:
|
||||
memory: 50Mi
|
||||
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- SYS_RESOURCE
|
||||
- IPC_LOCK
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
drop:
|
||||
- ALL
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /hostproc
|
||||
name: proc
|
||||
@@ -639,6 +657,7 @@ spec:
|
||||
- mountPath: /sys
|
||||
name: sys
|
||||
readOnly: true
|
||||
mountPropagation: HostToContainer
|
||||
- mountPath: /app/data
|
||||
name: data
|
||||
- mountPath: /etc/os-release
|
||||
@@ -692,10 +711,10 @@ kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-hub
|
||||
@@ -711,10 +730,10 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
@@ -725,6 +744,8 @@ spec:
|
||||
- ./hub
|
||||
- -port
|
||||
- "8080"
|
||||
- -loglevel
|
||||
- 'warning'
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
@@ -742,20 +763,20 @@ spec:
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: 'false'
|
||||
image: 'docker.io/kubeshark/hub:v52.3.92'
|
||||
image: 'docker.io/kubeshark/hub:v52.4'
|
||||
imagePullPolicy: Always
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
periodSeconds: 10
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
initialDelaySeconds: 15
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
periodSeconds: 10
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
initialDelaySeconds: 15
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
resources:
|
||||
@@ -775,6 +796,15 @@ spec:
|
||||
- name: saml-x509-volume
|
||||
mountPath: "/etc/saml/x509"
|
||||
readOnly: true
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
volumes:
|
||||
- name: saml-x509-volume
|
||||
projected:
|
||||
@@ -796,10 +826,10 @@ kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front
|
||||
@@ -815,10 +845,10 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
helm.sh/chart: kubeshark-52.3.92
|
||||
helm.sh/chart: kubeshark-52.4
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.3.92"
|
||||
app.kubernetes.io/version: "52.4"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
containers:
|
||||
@@ -853,7 +883,7 @@ spec:
|
||||
value: 'false'
|
||||
- name: REACT_APP_SENTRY_ENVIRONMENT
|
||||
value: 'production'
|
||||
image: 'docker.io/kubeshark/front:v52.3.92'
|
||||
image: 'docker.io/kubeshark/front:v52.4'
|
||||
imagePullPolicy: Always
|
||||
name: kubeshark-front
|
||||
livenessProbe:
|
||||
@@ -883,6 +913,15 @@ spec:
|
||||
mountPath: /etc/nginx/conf.d/default.conf
|
||||
subPath: default.conf
|
||||
readOnly: true
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
volumes:
|
||||
- name: nginx-config
|
||||
configMap:
|
||||
|
||||
Reference in New Issue
Block a user