mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-16 02:50:03 +00:00
Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
139336d4ee | ||
|
|
f68fed0de8 | ||
|
|
1d7d242e6c | ||
|
|
aa904e23c7 | ||
|
|
baf0e65337 | ||
|
|
a33a3467fc | ||
|
|
a9b598bc41 | ||
|
|
0aee367ad5 | ||
|
|
8c7d9ea8fd | ||
|
|
fab0f713ed | ||
|
|
2563cc1922 | ||
|
|
26c9f42eba |
@@ -45,10 +45,6 @@ Download **Kubeshark**'s binary distribution [latest release](https://github.com
|
||||
kubeshark tap
|
||||
```
|
||||
|
||||
```shell
|
||||
kubeshark tap -A
|
||||
```
|
||||
|
||||
```shell
|
||||
kubeshark tap -n sock-shop "(catalo*|front-end*)"
|
||||
```
|
||||
|
||||
@@ -12,14 +12,14 @@ func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
|
||||
|
||||
var connectedToHub, connectedToFront bool
|
||||
|
||||
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), "/echo", kubernetesProvider); err != nil {
|
||||
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), "/echo", kubernetesProvider); err != nil {
|
||||
log.Error().Err(err).Msg("Couldn't connect to Hub using proxy!")
|
||||
} else {
|
||||
connectedToHub = true
|
||||
log.Info().Msg("Connected successfully to Hub using proxy.")
|
||||
}
|
||||
|
||||
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort), "", kubernetesProvider); err != nil {
|
||||
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port), "", kubernetesProvider); err != nil {
|
||||
log.Error().Err(err).Msg("Couldn't connect to Front using proxy!")
|
||||
} else {
|
||||
connectedToFront = true
|
||||
|
||||
@@ -36,12 +36,12 @@ func init() {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
|
||||
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
|
||||
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
|
||||
}
|
||||
|
||||
func runConsole() {
|
||||
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
|
||||
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err != nil || response.StatusCode != 200 {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
|
||||
@@ -51,10 +51,10 @@ func runConsole() {
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.SrcPort).Msg("Connecting to:")
|
||||
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.Port).Msg("Connecting to:")
|
||||
u := url.URL{
|
||||
Scheme: "ws",
|
||||
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.SrcPort),
|
||||
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.Port),
|
||||
Path: "/scripts/logs",
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
@@ -145,7 +146,7 @@ var hubPodMappings = map[string]interface{}{
|
||||
},
|
||||
},
|
||||
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}",
|
||||
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
|
||||
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
|
||||
"spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.hub.limits.cpu }}",
|
||||
"spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.hub.limits.memory }}",
|
||||
"spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.hub.requests.cpu }}",
|
||||
@@ -156,7 +157,7 @@ var hubServiceMappings = serviceAccountMappings
|
||||
var frontPodMappings = map[string]interface{}{
|
||||
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
|
||||
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}",
|
||||
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
|
||||
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
|
||||
}
|
||||
var frontServiceMappings = serviceAccountMappings
|
||||
var persistentVolumeMappings = map[string]interface{}{
|
||||
@@ -167,12 +168,13 @@ var persistentVolumeMappings = map[string]interface{}{
|
||||
var workerDaemonSetMappings = map[string]interface{}{
|
||||
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
|
||||
"spec.template.spec.containers[0].image": "{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}",
|
||||
"spec.template.spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
|
||||
"spec.template.spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
|
||||
"spec.template.spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.worker.limits.cpu }}",
|
||||
"spec.template.spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.worker.limits.memory }}",
|
||||
"spec.template.spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.worker.requests.cpu }}",
|
||||
"spec.template.spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.worker.requests.memory }}",
|
||||
"spec.template.spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./worker -debug\" \"./worker\" }}",
|
||||
"spec.template.spec.containers[0].command[4]": "{{ .Values.tap.proxy.worker.srvport }}",
|
||||
"spec.template.spec.containers[0].command[6]": "{{ .Values.tap.packetcapture }}",
|
||||
}
|
||||
|
||||
@@ -257,6 +259,66 @@ func template(object interface{}, mappings map[string]interface{}) (template int
|
||||
return
|
||||
}
|
||||
|
||||
func handleHubPod(manifest string) string {
|
||||
lines := strings.Split(manifest, "\n")
|
||||
|
||||
for i, line := range lines {
|
||||
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
|
||||
lines[i] = " hostPort: {{ .Values.tap.proxy.hub.srvport }}"
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func handleFrontPod(manifest string) string {
|
||||
lines := strings.Split(manifest, "\n")
|
||||
|
||||
for i, line := range lines {
|
||||
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
|
||||
lines[i] = " hostPort: {{ .Values.tap.proxy.front.srvport }}"
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func handlePVCManifest(manifest string) string {
|
||||
return fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s{{- end }}\n", manifest)
|
||||
}
|
||||
|
||||
func handleDaemonSetManifest(manifest string) string {
|
||||
lines := strings.Split(manifest, "\n")
|
||||
|
||||
for i, line := range lines {
|
||||
if strings.TrimSpace(line) == "- mountPath: /app/data" {
|
||||
lines[i] = fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s", line)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(line) == "name: kubeshark-persistent-volume" {
|
||||
lines[i] = fmt.Sprintf("%s\n{{- end }}", line)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(line) == "- name: kubeshark-persistent-volume" {
|
||||
lines[i] = fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s", line)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(line) == "claimName: kubeshark-persistent-volume-claim" {
|
||||
lines[i] = fmt.Sprintf("%s\n{{- end }}", line)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(strings.TrimSpace(line), "- containerPort:") {
|
||||
lines[i] = " - containerPort: {{ .Values.tap.proxy.worker.srvport }}"
|
||||
}
|
||||
|
||||
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
|
||||
lines[i] = " hostPort: {{ .Values.tap.proxy.worker.srvport }}"
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func dumpHelmChart(objects map[string]interface{}) error {
|
||||
folder := filepath.Join(".", "helm-chart")
|
||||
templatesFolder := filepath.Join(folder, "templates")
|
||||
@@ -285,6 +347,22 @@ func dumpHelmChart(objects map[string]interface{}) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if filename == "04-hub-pod.yaml" {
|
||||
manifest = handleHubPod(manifest)
|
||||
}
|
||||
|
||||
if filename == "06-front-pod.yaml" {
|
||||
manifest = handleFrontPod(manifest)
|
||||
}
|
||||
|
||||
if filename == "08-persistent-volume-claim.yaml" {
|
||||
manifest = handlePVCManifest(manifest)
|
||||
}
|
||||
|
||||
if filename == "09-worker-daemon-set.yaml" {
|
||||
manifest = handleDaemonSetManifest(manifest)
|
||||
}
|
||||
|
||||
path := filepath.Join(templatesFolder, filename)
|
||||
err = os.WriteFile(path, []byte(manifestHeader+manifest), 0644)
|
||||
if err != nil {
|
||||
|
||||
@@ -103,6 +103,10 @@ func generateManifests() (
|
||||
workerDaemonSet *kubernetes.DaemonSet,
|
||||
err error,
|
||||
) {
|
||||
config.Config.License = ""
|
||||
persistentStorage := config.Config.Tap.PersistentStorage
|
||||
config.Config.Tap.PersistentStorage = true
|
||||
|
||||
var kubernetesProvider *kubernetes.Provider
|
||||
kubernetesProvider, err = getKubernetesProviderForCli(true, true)
|
||||
if err != nil {
|
||||
@@ -142,7 +146,7 @@ func generateManifests() (
|
||||
ImagePullPolicy: config.Config.ImagePullPolicy(),
|
||||
ImagePullSecrets: config.Config.ImagePullSecrets(),
|
||||
Debug: config.Config.Tap.Debug,
|
||||
}, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
|
||||
}, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -169,6 +173,8 @@ func generateManifests() (
|
||||
return
|
||||
}
|
||||
|
||||
config.Config.Tap.PersistentStorage = persistentStorage
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
18
cmd/pro.go
18
cmd/pro.go
@@ -40,19 +40,19 @@ func init() {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
|
||||
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
|
||||
proCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
|
||||
}
|
||||
|
||||
func acquireLicense() {
|
||||
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
|
||||
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err != nil || response.StatusCode != 200 {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
|
||||
runProxy(false, true)
|
||||
}
|
||||
|
||||
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
|
||||
log.Info().Str("url", PRO_URL).Msg("Opening in the browser:")
|
||||
utils.OpenBrowser(PRO_URL)
|
||||
@@ -61,17 +61,19 @@ func acquireLicense() {
|
||||
}
|
||||
|
||||
func updateLicense(licenseKey string) {
|
||||
log.Info().Str("key", licenseKey).Msg("Received license:")
|
||||
|
||||
config.Config.License = licenseKey
|
||||
err := config.WriteConfig(&config.Config)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
|
||||
connector.PostLicenseSingle(config.Config.License)
|
||||
|
||||
log.Info().Msg("Updated the license. Exiting.")
|
||||
|
||||
go func() {
|
||||
connector.PostLicense(config.Config.License)
|
||||
|
||||
log.Info().Msg("Updated the license. Exiting.")
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
os.Exit(0)
|
||||
}()
|
||||
@@ -105,8 +107,6 @@ func runLicenseRecieverServer() {
|
||||
|
||||
licenseKey := string(data)
|
||||
|
||||
log.Info().Str("key", licenseKey).Msg("Received license:")
|
||||
|
||||
updateLicense(licenseKey)
|
||||
})
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ func init() {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
|
||||
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
|
||||
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
|
||||
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
|
||||
proxyCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
|
||||
}
|
||||
|
||||
@@ -63,12 +63,12 @@ func runProxy(block bool, noBrowser bool) {
|
||||
|
||||
var establishedProxy bool
|
||||
|
||||
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
|
||||
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err == nil && response.StatusCode == 200 {
|
||||
log.Info().
|
||||
Str("service", kubernetes.HubServiceName).
|
||||
Int("port", int(config.Config.Tap.Proxy.Hub.SrcPort)).
|
||||
Int("port", int(config.Config.Tap.Proxy.Hub.Port)).
|
||||
Msg("Found a running service.")
|
||||
|
||||
okToOpen("Hub", hubUrl, true)
|
||||
@@ -79,8 +79,8 @@ func runProxy(block bool, noBrowser bool) {
|
||||
kubernetes.HubServiceName,
|
||||
kubernetes.HubPodName,
|
||||
configStructs.ProxyHubPortLabel,
|
||||
config.Config.Tap.Proxy.Hub.SrcPort,
|
||||
config.Config.Tap.Proxy.Hub.DstPort,
|
||||
config.Config.Tap.Proxy.Hub.Port,
|
||||
configStructs.ContainerPort,
|
||||
"/echo",
|
||||
)
|
||||
connector := connect.NewConnector(hubUrl, connect.DefaultRetries, connect.DefaultTimeout)
|
||||
@@ -93,12 +93,12 @@ func runProxy(block bool, noBrowser bool) {
|
||||
okToOpen("Hub", hubUrl, true)
|
||||
}
|
||||
|
||||
frontUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
|
||||
frontUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port)
|
||||
response, err = http.Get(fmt.Sprintf("%s/", frontUrl))
|
||||
if err == nil && response.StatusCode == 200 {
|
||||
log.Info().
|
||||
Str("service", kubernetes.FrontServiceName).
|
||||
Int("port", int(config.Config.Tap.Proxy.Front.SrcPort)).
|
||||
Int("port", int(config.Config.Tap.Proxy.Front.Port)).
|
||||
Msg("Found a running service.")
|
||||
|
||||
okToOpen("Kubeshark", frontUrl, noBrowser)
|
||||
@@ -109,8 +109,8 @@ func runProxy(block bool, noBrowser bool) {
|
||||
kubernetes.FrontServiceName,
|
||||
kubernetes.FrontPodName,
|
||||
configStructs.ProxyFrontPortLabel,
|
||||
config.Config.Tap.Proxy.Front.SrcPort,
|
||||
config.Config.Tap.Proxy.Front.DstPort,
|
||||
config.Config.Tap.Proxy.Front.Port,
|
||||
configStructs.ContainerPort,
|
||||
"",
|
||||
)
|
||||
connector := connect.NewConnector(frontUrl, connect.DefaultRetries, connect.DefaultTimeout)
|
||||
|
||||
@@ -34,7 +34,7 @@ func init() {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
|
||||
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
|
||||
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
|
||||
}
|
||||
|
||||
@@ -44,14 +44,14 @@ func runScripts() {
|
||||
return
|
||||
}
|
||||
|
||||
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
|
||||
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err != nil || response.StatusCode != 200 {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
|
||||
runProxy(false, true)
|
||||
}
|
||||
|
||||
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
|
||||
watchScripts(true)
|
||||
}
|
||||
|
||||
@@ -47,11 +47,12 @@ func init() {
|
||||
tapCmd.Flags().StringP(configStructs.DockerTagLabel, "t", defaultTapConfig.Docker.Tag, "The tag of the Docker images that are going to be pulled")
|
||||
tapCmd.Flags().String(configStructs.DockerImagePullPolicy, defaultTapConfig.Docker.ImagePullPolicy, "ImagePullPolicy for the Docker images")
|
||||
tapCmd.Flags().StringSlice(configStructs.DockerImagePullSecrets, defaultTapConfig.Docker.ImagePullSecrets, "ImagePullSecrets for the Docker images")
|
||||
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
|
||||
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
|
||||
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
|
||||
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
|
||||
tapCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
|
||||
tapCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultTapConfig.Namespaces, "Namespaces selector")
|
||||
tapCmd.Flags().StringP(configStructs.SelfNamespaceLabel, "s", defaultTapConfig.SelfNamespace, "Self-namespace of Kubeshark")
|
||||
tapCmd.Flags().Bool(configStructs.PersistentStorageLabel, defaultTapConfig.PersistentStorage, "Enable persistent storage (PersistentVolumeClaim)")
|
||||
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit (per node)")
|
||||
tapCmd.Flags().String(configStructs.StorageClassLabel, defaultTapConfig.StorageClass, "Override the default storage class of the PersistentVolumeClaim (per node)")
|
||||
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/kubeshark/kubeshark/internal/connect"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
@@ -141,10 +142,10 @@ func createAndStartContainers(
|
||||
|
||||
hostConfigFront := &container.HostConfig{
|
||||
PortBindings: nat.PortMap{
|
||||
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Front.DstPort)): []nat.PortBinding{
|
||||
nat.Port(fmt.Sprintf("%d/tcp", configStructs.ContainerPort)): []nat.PortBinding{
|
||||
{
|
||||
HostIP: hostIP,
|
||||
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.SrcPort),
|
||||
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.Port),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -156,7 +157,7 @@ func createAndStartContainers(
|
||||
Env: []string{
|
||||
"REACT_APP_DEFAULT_FILTER= ",
|
||||
"REACT_APP_HUB_HOST= ",
|
||||
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.SrcPort),
|
||||
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.Port),
|
||||
},
|
||||
}, hostConfigFront, nil, nil, nameFront)
|
||||
if err != nil {
|
||||
@@ -165,16 +166,16 @@ func createAndStartContainers(
|
||||
|
||||
hostConfigHub := &container.HostConfig{
|
||||
PortBindings: nat.PortMap{
|
||||
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): []nat.PortBinding{
|
||||
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): []nat.PortBinding{
|
||||
{
|
||||
HostIP: hostIP,
|
||||
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort),
|
||||
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.DstPort)}
|
||||
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrvPort)}
|
||||
if config.DebugMode {
|
||||
cmdHub = append(cmdHub, fmt.Sprintf("-%s", config.DebugFlag))
|
||||
}
|
||||
@@ -183,13 +184,13 @@ func createAndStartContainers(
|
||||
Image: imageHub,
|
||||
Cmd: cmdHub,
|
||||
Tty: false,
|
||||
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): {}},
|
||||
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): {}},
|
||||
}, hostConfigHub, nil, nil, nameHub)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.DstPort)}
|
||||
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.SrvPort)}
|
||||
if config.DebugMode {
|
||||
cmdWorker = append(cmdWorker, fmt.Sprintf("-%s", config.DebugFlag))
|
||||
}
|
||||
@@ -328,7 +329,7 @@ func pcap(tarPath string) {
|
||||
},
|
||||
}
|
||||
|
||||
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
connector.PostWorkerPodToHub(workerPod)
|
||||
|
||||
// License
|
||||
@@ -337,10 +338,10 @@ func pcap(tarPath string) {
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("url", kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)).
|
||||
Str("url", kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)).
|
||||
Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
|
||||
|
||||
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
|
||||
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port)
|
||||
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
|
||||
|
||||
if !config.Config.HeadlessMode {
|
||||
|
||||
@@ -60,7 +60,7 @@ func tap() {
|
||||
Str("limit", config.Config.Tap.StorageLimit).
|
||||
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP/UDP streams will be removed once the limit is reached.", misc.Software))
|
||||
|
||||
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
|
||||
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
|
||||
if err != nil {
|
||||
@@ -409,8 +409,8 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
|
||||
kubernetes.HubServiceName,
|
||||
kubernetes.HubPodName,
|
||||
configStructs.ProxyHubPortLabel,
|
||||
config.Config.Tap.Proxy.Hub.SrcPort,
|
||||
config.Config.Tap.Proxy.Hub.DstPort,
|
||||
config.Config.Tap.Proxy.Hub.Port,
|
||||
configStructs.ContainerPort,
|
||||
"/echo",
|
||||
)
|
||||
|
||||
@@ -460,7 +460,7 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
|
||||
|
||||
if !update {
|
||||
// Hub proxy URL
|
||||
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
|
||||
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
|
||||
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
|
||||
}
|
||||
|
||||
@@ -476,12 +476,12 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid
|
||||
kubernetes.FrontServiceName,
|
||||
kubernetes.FrontPodName,
|
||||
configStructs.ProxyFrontPortLabel,
|
||||
config.Config.Tap.Proxy.Front.SrcPort,
|
||||
config.Config.Tap.Proxy.Front.DstPort,
|
||||
config.Config.Tap.Proxy.Front.Port,
|
||||
configStructs.ContainerPort,
|
||||
"",
|
||||
)
|
||||
|
||||
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
|
||||
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port)
|
||||
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
|
||||
|
||||
if !config.Config.HeadlessMode {
|
||||
|
||||
@@ -17,6 +17,7 @@ const (
|
||||
ProxyHostLabel = "proxy-host"
|
||||
NamespacesLabel = "namespaces"
|
||||
SelfNamespaceLabel = "selfnamespace"
|
||||
PersistentStorageLabel = "persistentstorage"
|
||||
StorageLimitLabel = "storagelimit"
|
||||
StorageClassLabel = "storageclass"
|
||||
DryRunLabel = "dryrun"
|
||||
@@ -25,6 +26,8 @@ const (
|
||||
TlsLabel = "tls"
|
||||
IgnoreTaintedLabel = "ignoreTainted"
|
||||
DebugLabel = "debug"
|
||||
ContainerPort = 80
|
||||
ContainerPortStr = "80"
|
||||
)
|
||||
|
||||
type ResourceLimits struct {
|
||||
@@ -43,18 +46,17 @@ type ResourceRequirements struct {
|
||||
}
|
||||
|
||||
type WorkerConfig struct {
|
||||
SrcPort uint16 `yaml:"port" default:"8897"`
|
||||
DstPort uint16 `yaml:"srvport" default:"8897"`
|
||||
SrvPort uint16 `yaml:"srvport" default:"8897"`
|
||||
}
|
||||
|
||||
type HubConfig struct {
|
||||
SrcPort uint16 `yaml:"port" default:"8898"`
|
||||
DstPort uint16 `yaml:"srvport" default:"80"`
|
||||
Port uint16 `yaml:"port" default:"8898"`
|
||||
SrvPort uint16 `yaml:"srvport" default:"8898"`
|
||||
}
|
||||
|
||||
type FrontConfig struct {
|
||||
SrcPort uint16 `yaml:"port" default:"8899"`
|
||||
DstPort uint16 `yaml:"srvport" default:"80"`
|
||||
Port uint16 `yaml:"port" default:"8899"`
|
||||
SrvPort uint16 `yaml:"srvport" default:"8899"`
|
||||
}
|
||||
|
||||
type ProxyConfig struct {
|
||||
@@ -82,6 +84,7 @@ type TapConfig struct {
|
||||
PodRegexStr string `yaml:"regex" default:".*"`
|
||||
Namespaces []string `yaml:"namespaces"`
|
||||
SelfNamespace string `yaml:"selfnamespace" default:"kubeshark"`
|
||||
PersistentStorage bool `yaml:"persistentstorage" default:"false"`
|
||||
StorageLimit string `yaml:"storagelimit" default:"200Mi"`
|
||||
StorageClass string `yaml:"storageclass" default:"standard"`
|
||||
DryRun bool `yaml:"dryrun" default:"false"`
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
apiVersion: v2
|
||||
appVersion: "40.0"
|
||||
appVersion: "40.1"
|
||||
description: The API Traffic Analyzer for Kubernetes
|
||||
home: https://kubeshark.co
|
||||
keywords:
|
||||
@@ -22,4 +22,4 @@ name: kubeshark
|
||||
sources:
|
||||
- https://github.com/kubeshark/kubeshark/tree/master/helm-chart
|
||||
type: application
|
||||
version: "40.0"
|
||||
version: "40.1"
|
||||
|
||||
@@ -26,8 +26,11 @@ spec:
|
||||
- name: SCRIPTING_SCRIPTS
|
||||
value: '[]'
|
||||
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}'
|
||||
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
|
||||
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
|
||||
name: kubeshark-hub
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: {{ .Values.tap.proxy.hub.srvport }}
|
||||
resources:
|
||||
limits:
|
||||
cpu: '{{ .Values.tap.resources.hub.limits.cpu }}'
|
||||
|
||||
@@ -20,8 +20,11 @@ spec:
|
||||
- name: REACT_APP_HUB_PORT
|
||||
value: "8898"
|
||||
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}'
|
||||
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
|
||||
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
|
||||
name: kubeshark-front
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: {{ .Values.tap.proxy.front.srvport }}
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
periodSeconds: 1
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
|
||||
---
|
||||
{{- if .Values.tap.persistentstorage }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
@@ -18,3 +19,4 @@ spec:
|
||||
storage: '{{ .Values.tap.storagelimit }}'
|
||||
storageClassName: '{{ .Values.tap.storageclass }}'
|
||||
status: {}
|
||||
{{- end }}
|
||||
|
||||
@@ -32,7 +32,7 @@ spec:
|
||||
- -i
|
||||
- any
|
||||
- -port
|
||||
- "8897"
|
||||
- '{{ .Values.tap.proxy.worker.srvport }}'
|
||||
- -packet-capture
|
||||
- '{{ .Values.tap.packetcapture }}'
|
||||
- -servicemesh
|
||||
@@ -40,8 +40,11 @@ spec:
|
||||
- -procfs
|
||||
- /hostproc
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}'
|
||||
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
|
||||
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
|
||||
name: kubeshark-worker-daemon-set
|
||||
ports:
|
||||
- containerPort: {{ .Values.tap.proxy.worker.srvport }}
|
||||
hostPort: {{ .Values.tap.proxy.worker.srvport }}
|
||||
resources:
|
||||
limits:
|
||||
cpu: '{{ .Values.tap.resources.worker.limits.cpu }}'
|
||||
@@ -67,8 +70,10 @@ spec:
|
||||
- mountPath: /sys
|
||||
name: sys
|
||||
readOnly: true
|
||||
{{- if .Values.tap.persistentstorage }}
|
||||
- mountPath: /app/data
|
||||
name: kubeshark-persistent-volume
|
||||
{{- end }}
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
hostNetwork: true
|
||||
serviceAccountName: kubeshark-service-account
|
||||
@@ -85,6 +90,8 @@ spec:
|
||||
- hostPath:
|
||||
path: /sys
|
||||
name: sys
|
||||
{{- if .Values.tap.persistentstorage }}
|
||||
- name: kubeshark-persistent-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: kubeshark-persistent-volume-claim
|
||||
{{- end }}
|
||||
|
||||
@@ -6,18 +6,18 @@ tap:
|
||||
imagepullsecrets: []
|
||||
proxy:
|
||||
worker:
|
||||
port: 8897
|
||||
srvport: 8897
|
||||
hub:
|
||||
port: 8898
|
||||
srvport: 80
|
||||
srvport: 8898
|
||||
front:
|
||||
port: 8899
|
||||
srvport: 80
|
||||
srvport: 8899
|
||||
host: 127.0.0.1
|
||||
regex: .*
|
||||
namespaces: []
|
||||
selfnamespace: kubeshark
|
||||
persistentstorage: false
|
||||
storagelimit: 200Mi
|
||||
storageclass: standard
|
||||
dryrun: false
|
||||
|
||||
@@ -151,6 +151,26 @@ func (connector *Connector) PostLicense(license string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (connector *Connector) PostLicenseSingle(license string) {
|
||||
postLicenseUrl := fmt.Sprintf("%s/license", connector.url)
|
||||
|
||||
payload := postLicenseRequest{
|
||||
License: license,
|
||||
}
|
||||
|
||||
if payloadMarshalled, err := json.Marshal(payload); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to marshal the payload:")
|
||||
} else {
|
||||
var resp *http.Response
|
||||
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
|
||||
log.Warn().Err(err).Msg("Failed sending the license to Hub.")
|
||||
} else {
|
||||
log.Debug().Str("license", license).Msg("Reported license to Hub:")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (connector *Connector) PostEnv(env map[string]interface{}) {
|
||||
if len(env) == 0 {
|
||||
return
|
||||
|
||||
@@ -225,8 +225,12 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
|
||||
|
||||
containers := []core.Container{
|
||||
{
|
||||
Name: opts.PodName,
|
||||
Image: opts.PodImage,
|
||||
Name: opts.PodName,
|
||||
Image: opts.PodImage,
|
||||
Ports: []core.ContainerPort{{
|
||||
HostPort: int32(config.Config.Tap.Proxy.Hub.SrvPort),
|
||||
ContainerPort: configStructs.ContainerPort,
|
||||
}},
|
||||
ImagePullPolicy: opts.ImagePullPolicy,
|
||||
Command: command,
|
||||
Resources: core.ResourceRequirements{
|
||||
@@ -250,7 +254,7 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
|
||||
},
|
||||
{
|
||||
Name: "LICENSE",
|
||||
Value: "",
|
||||
Value: config.Config.License,
|
||||
},
|
||||
{
|
||||
Name: "SCRIPTING_ENV",
|
||||
@@ -322,15 +326,19 @@ func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPor
|
||||
|
||||
containers := []core.Container{
|
||||
{
|
||||
Name: opts.PodName,
|
||||
Image: docker.GetFrontImage(),
|
||||
Name: opts.PodName,
|
||||
Image: docker.GetFrontImage(),
|
||||
Ports: []core.ContainerPort{{
|
||||
HostPort: int32(config.Config.Tap.Proxy.Front.SrvPort),
|
||||
ContainerPort: configStructs.ContainerPort,
|
||||
}},
|
||||
ImagePullPolicy: opts.ImagePullPolicy,
|
||||
VolumeMounts: volumeMounts,
|
||||
ReadinessProbe: &core.Probe{
|
||||
FailureThreshold: 3,
|
||||
ProbeHandler: core.ProbeHandler{
|
||||
TCPSocket: &core.TCPSocketAction{
|
||||
Port: intstr.Parse("80"),
|
||||
Port: intstr.Parse(configStructs.ContainerPortStr),
|
||||
},
|
||||
},
|
||||
PeriodSeconds: 1,
|
||||
@@ -419,8 +427,8 @@ func (provider *Provider) BuildHubService(namespace string) *core.Service {
|
||||
Ports: []core.ServicePort{
|
||||
{
|
||||
Name: HubServiceName,
|
||||
TargetPort: intstr.FromInt(80),
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromInt(configStructs.ContainerPort),
|
||||
Port: configStructs.ContainerPort,
|
||||
},
|
||||
},
|
||||
Type: core.ServiceTypeClusterIP,
|
||||
@@ -444,8 +452,8 @@ func (provider *Provider) BuildFrontService(namespace string) *core.Service {
|
||||
Ports: []core.ServicePort{
|
||||
{
|
||||
Name: FrontServiceName,
|
||||
TargetPort: intstr.FromInt(80),
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromInt(configStructs.ContainerPort),
|
||||
Port: configStructs.ContainerPort,
|
||||
},
|
||||
},
|
||||
Type: core.ServiceTypeClusterIP,
|
||||
@@ -758,7 +766,7 @@ func (provider *Provider) BuildWorkerDaemonSet(
|
||||
"-i",
|
||||
"any",
|
||||
"-port",
|
||||
"8897",
|
||||
fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.SrvPort),
|
||||
"-packet-capture",
|
||||
config.Config.Tap.PacketCapture,
|
||||
}
|
||||
@@ -855,18 +863,27 @@ func (provider *Provider) BuildWorkerDaemonSet(
|
||||
MountPath: PersistentVolumeHostPath,
|
||||
}
|
||||
|
||||
// VolumeMount(s)
|
||||
volumeMounts := []core.VolumeMount{
|
||||
procfsVolumeMount,
|
||||
sysfsVolumeMount,
|
||||
}
|
||||
if config.Config.Tap.PersistentStorage {
|
||||
volumeMounts = append(volumeMounts, persistentVolumeMount)
|
||||
}
|
||||
|
||||
// Containers
|
||||
containers := []core.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: podImage,
|
||||
Name: podName,
|
||||
Image: podImage,
|
||||
Ports: []core.ContainerPort{{
|
||||
HostPort: int32(config.Config.Tap.Proxy.Worker.SrvPort),
|
||||
ContainerPort: int32(config.Config.Tap.Proxy.Worker.SrvPort),
|
||||
}},
|
||||
ImagePullPolicy: imagePullPolicy,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
procfsVolumeMount,
|
||||
sysfsVolumeMount,
|
||||
persistentVolumeMount,
|
||||
},
|
||||
Command: command,
|
||||
VolumeMounts: volumeMounts,
|
||||
Command: command,
|
||||
Resources: core.ResourceRequirements{
|
||||
Limits: core.ResourceList{
|
||||
"cpu": cpuLimit,
|
||||
@@ -887,6 +904,15 @@ func (provider *Provider) BuildWorkerDaemonSet(
|
||||
},
|
||||
}
|
||||
|
||||
// Volume(s)
|
||||
volumes := []core.Volume{
|
||||
procfsVolume,
|
||||
sysfsVolume,
|
||||
}
|
||||
if config.Config.Tap.PersistentStorage {
|
||||
volumes = append(volumes, persistentVolume)
|
||||
}
|
||||
|
||||
// Pod
|
||||
pod := DaemonSetPod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -897,14 +923,10 @@ func (provider *Provider) BuildWorkerDaemonSet(
|
||||
}, provider),
|
||||
},
|
||||
Spec: core.PodSpec{
|
||||
ServiceAccountName: ServiceAccountName,
|
||||
HostNetwork: true,
|
||||
Containers: containers,
|
||||
Volumes: []core.Volume{
|
||||
procfsVolume,
|
||||
sysfsVolume,
|
||||
persistentVolume,
|
||||
},
|
||||
ServiceAccountName: ServiceAccountName,
|
||||
HostNetwork: true,
|
||||
Containers: containers,
|
||||
Volumes: volumes,
|
||||
DNSPolicy: core.DNSClusterFirstWithHostNet,
|
||||
TerminationGracePeriodSeconds: new(int64),
|
||||
Tolerations: provider.BuildTolerations(),
|
||||
|
||||
@@ -3,6 +3,7 @@ package kubernetes
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -21,17 +22,19 @@ func CreateWorkers(
|
||||
tls bool,
|
||||
debug bool,
|
||||
) error {
|
||||
persistentVolumeClaim, err := kubernetesProvider.BuildPersistentVolumeClaim()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if config.Config.Tap.PersistentStorage {
|
||||
persistentVolumeClaim, err := kubernetesProvider.BuildPersistentVolumeClaim()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = kubernetesProvider.CreatePersistentVolumeClaim(
|
||||
ctx,
|
||||
namespace,
|
||||
persistentVolumeClaim,
|
||||
); err != nil {
|
||||
return err
|
||||
if _, err = kubernetesProvider.CreatePersistentVolumeClaim(
|
||||
ctx,
|
||||
namespace,
|
||||
persistentVolumeClaim,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
image := docker.GetWorkerImage()
|
||||
|
||||
@@ -26,6 +26,9 @@ spec:
|
||||
image: docker.io/kubeshark/hub:latest
|
||||
imagePullPolicy: Always
|
||||
name: kubeshark-hub
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 8898
|
||||
resources:
|
||||
limits:
|
||||
cpu: 750m
|
||||
|
||||
@@ -22,6 +22,9 @@ spec:
|
||||
image: docker.io/kubeshark/front:latest
|
||||
imagePullPolicy: Always
|
||||
name: kubeshark-front
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 8899
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
periodSeconds: 1
|
||||
|
||||
@@ -42,6 +42,9 @@ spec:
|
||||
image: docker.io/kubeshark/worker:latest
|
||||
imagePullPolicy: Always
|
||||
name: kubeshark-worker-daemon-set
|
||||
ports:
|
||||
- containerPort: 8897
|
||||
hostPort: 8897
|
||||
resources:
|
||||
limits:
|
||||
cpu: 750m
|
||||
|
||||
@@ -94,7 +94,7 @@ func createSelfHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provid
|
||||
}
|
||||
|
||||
func createFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
|
||||
pod, err := kubernetesProvider.BuildFrontPod(opts, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
|
||||
pod, err := kubernetesProvider.BuildFrontPod(opts, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user