Compare commits

..

22 Commits
50.0 ... 50.4

Author SHA1 Message Date
M. Mert Yildiran
d94ce4dce3 🔖 Bump the Helm chart version to 50.4 2023-09-21 21:56:09 +03:00
M. Mert Yildiran
65ab0ca668 🐛 Don't use encoding/json in config command 2023-09-21 21:52:43 +03:00
M. Mert Yildiran
9bc3ea5ffc 🐛 Generate truly the default config with config -r 2023-09-21 21:43:00 +03:00
M. Mert Yildiran
2d17d1a83d Replace gopkg.in/yaml.v3 with github.com/goccy/go-yaml 2023-09-21 21:36:08 +03:00
M. Mert Yildiran
78c89cc5b4 🔖 Bump the Helm chart version to 50.3 2023-09-17 00:09:37 +03:00
M. Mert Yildiran
b5c9a31380 🔧 Run make generate-manifests 2023-09-16 23:52:53 +03:00
Luiz Oliveira
3dfff2b7a5 ♻️ Turn the Ingress path rewrite for Hub into an Nginx location directive (#1426)
* fixes websocket for nginx-ingress

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* update messagem when helm completes

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* force react port to be a path

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* include Authorization header to the proxy

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* remove hub from proxy

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* remove REACT_APP_HUB_PORT info

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* include path back again to REACT_APP_HUB_PORT

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

---------

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
2023-09-15 21:43:34 +03:00
M. Mert Yildiran
583a5b97ee 🔧 Re-order the template filenames and re-generate values.yaml and complete.yaml 2023-09-04 02:25:33 +03:00
Luiz Oliveira
64aae06fe5 🛂 Add a new Role and RoleBinding resources to have write access for our own Secret resource (#1416)
* include role and rolebinding to write secrets

With this, the kubeshark service-account have rights to
update the value of the secrets of the same namespace
where kubeshark was deployed. This was necessary to keep
the value of the license updated

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* Update helm-chart/templates/02-cluster-role.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/templates/03-cluster-role-binding.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/templates/03-cluster-role-binding.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/templates/03-cluster-role-binding.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/templates/02-cluster-role.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

---------

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-09-04 02:20:26 +03:00
Luiz Oliveira
1ccaa03fb2 🏗️ Give the user ability to set ingress as needed (#1417)
* Give the user hability to set ingress as needed

- Removed unecessary IngressClass.
- If no IngressClassName passed, use cluster's default class
- Renamed `ingressclass` with `IngressClassName`. Is the standard name
    used for it.
- Included custom annotations for Ingress. This way user can set any
    custom annotation for the ingress only.

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* Update helm-chart/templates/11-ingress.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update config/configStructs/tapConfig.go

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/templates/11-ingress.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* update default ingressClassName value

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

---------

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-09-04 02:18:43 +03:00
M. Mert Yildiran
3222212367 🔧 Update complete.yaml 2023-09-01 04:09:57 +03:00
M. Mert Yildiran
c5681871e4 🔖 Bump the Helm chart version to 50.2 2023-09-01 03:22:56 +03:00
M. Mert Yildiran
1ac3ba0a6d 🔧 Add a notice about telemetry into NOTES.txt of the Helm chart 2023-08-31 18:55:58 +03:00
M. Mert Yildiran
d3520765eb 🔥 Delete .dockerignore file 2023-08-31 06:16:52 +03:00
M. Mert Yildiran
fa1e7bcf01 🔧 Add TelemetryConfig struct and --telemetry-enabled flag to tap command 2023-08-31 03:50:14 +03:00
M. Mert Yildiran
bf182b6330 🐛 Template the -tls flag in worker DaemonSet 2023-08-29 03:51:08 +03:00
M. Mert Yildiran
f59f84af02 Add export command to download PCAP export 2023-08-28 22:00:36 +03:00
M. Mert Yildiran
cae5a92a13 🔖 Bump the Helm chart version to 50.1 2023-08-25 22:22:36 +03:00
M. Mert Yildiran
7afb1d8b9b Set the probing port of Hub back to 80 2023-08-24 23:51:47 +03:00
M. Mert Yildiran
f628192216 🚑 Add initialDelaySeconds to readiness and liveness probes of worker DaemonSet 2023-08-24 22:05:26 +03:00
M. Mert Yildiran
b1feb4e33f 🔧 Add port-forward-worker Makefile rule 2023-08-23 23:55:33 +03:00
M. Mert Yildiran
94dff24aed 🔥 Delete Chart.lock file 2023-08-23 02:02:29 +03:00
37 changed files with 449 additions and 282 deletions

View File

@@ -1,16 +0,0 @@
# Files
.dockerignore
.editorconfig
.gitignore
Dockerfile
Makefile
LICENSE
**/*.md
**/*_test.go
*.out
# Folders
.git/
.github/
build/
**/node_modules/

View File

@@ -149,3 +149,6 @@ helm-uninstall:
proxy:
kubeshark proxy
port-forward-worker:
kubectl port-forward $$(kubectl get pods | awk '$$1 ~ /^$(LOGS_POD_PREFIX)/' | awk 'END {print $$1}') $(LOGS_FOLLOW) 8897:8897

View File

@@ -18,7 +18,12 @@ var configCmd = &cobra.Command{
Short: fmt.Sprintf("Generate %s config with default values", misc.Software),
RunE: func(cmd *cobra.Command, args []string) error {
if config.Config.Config.Regenerate {
if err := config.WriteConfig(&config.Config); err != nil {
defaultConfig := config.CreateDefaultConfig()
if err := defaults.Set(&defaultConfig); err != nil {
log.Error().Err(err).Send()
return nil
}
if err := config.WriteConfig(&defaultConfig); err != nil {
log.Error().Err(err).Msg("Failed generating config with defaults.")
return nil
}

View File

@@ -36,13 +36,13 @@ func init() {
log.Debug().Err(err).Send()
}
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
consoleCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark")
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark")
consoleCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}
func runConsole() {
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
hubUrl := kubernetes.GetHubUrl()
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
@@ -52,10 +52,10 @@ func runConsole() {
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.Port).Msg("Connecting to:")
log.Info().Str("host", config.Config.Tap.Proxy.Host).Str("url", hubUrl).Msg("Connecting to:")
u := url.URL{
Scheme: "ws",
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.Port),
Host: fmt.Sprintf("%s:%d/api", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Front.Port),
Path: "/scripts/logs",
}
headers := http.Header{}

62
cmd/export.go Normal file
View File

@@ -0,0 +1,62 @@
package cmd
import (
"fmt"
"net/http"
"os"
"path/filepath"
"time"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
var exportCmd = &cobra.Command{
Use: "export",
Short: "Exports the captured traffic into a TAR file that contains PCAP files",
RunE: func(cmd *cobra.Command, args []string) error {
runExport()
return nil
},
}
func init() {
rootCmd.AddCommand(exportCmd)
defaultTapConfig := configStructs.TapConfig{}
if err := defaults.Set(&defaultTapConfig); err != nil {
log.Debug().Err(err).Send()
}
exportCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark")
exportCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark")
exportCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}
func runExport() {
hubUrl := kubernetes.GetHubUrl()
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
dstPath, err := filepath.Abs(fmt.Sprintf("./%d.tar.gz", time.Now().Unix()))
if err != nil {
panic(err)
}
out, err := os.Create(dstPath)
if err != nil {
panic(err)
}
defer out.Close()
connector := connect.NewConnector(kubernetes.GetHubUrl(), connect.DefaultRetries, connect.DefaultTimeout)
connector.PostPcapsMerge(out)
}

View File

@@ -40,19 +40,19 @@ func init() {
log.Debug().Err(err).Send()
}
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
proCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
proCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark")
proCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark")
}
func acquireLicense() {
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
hubUrl := kubernetes.GetHubUrl()
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetHubUrl(), connect.DefaultRetries, connect.DefaultTimeout)
log.Info().Str("url", PRO_URL).Msg("Opening in the browser:")
utils.OpenBrowser(PRO_URL)

View File

@@ -24,8 +24,7 @@ func init() {
log.Debug().Err(err).Send()
}
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the proxy/port-forward")
proxyCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
proxyCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}

View File

@@ -63,38 +63,8 @@ func runProxy(block bool, noBrowser bool) {
var establishedProxy bool
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.HubServiceName).
Int("port", int(config.Config.Tap.Proxy.Hub.Port)).
Msg("Found a running service.")
okToOpen("Hub", hubUrl, true)
} else {
startProxyReportErrorIfAny(
kubernetesProvider,
ctx,
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
connector := connect.NewConnector(hubUrl, connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection("/echo"); err != nil {
log.Error().Msg(fmt.Sprintf(utils.Red, "Couldn't connect to Hub."))
return
}
establishedProxy = true
okToOpen("Hub", hubUrl, true)
}
frontUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
response, err = http.Get(fmt.Sprintf("%s/", frontUrl))
response, err := http.Get(fmt.Sprintf("%s/", frontUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.FrontServiceName).

View File

@@ -34,8 +34,8 @@ func init() {
log.Debug().Err(err).Send()
}
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
scriptsCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark")
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark")
scriptsCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}
@@ -45,14 +45,14 @@ func runScripts() {
return
}
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
hubUrl := kubernetes.GetHubUrl()
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetHubUrl(), connect.DefaultRetries, connect.DefaultTimeout)
watchScripts(true)
}

View File

@@ -47,8 +47,7 @@ func init() {
tapCmd.Flags().StringP(configStructs.DockerTagLabel, "t", defaultTapConfig.Docker.Tag, "The tag of the Docker images that are going to be pulled")
tapCmd.Flags().String(configStructs.DockerImagePullPolicy, defaultTapConfig.Docker.ImagePullPolicy, "ImagePullPolicy for the Docker images")
tapCmd.Flags().StringSlice(configStructs.DockerImagePullSecrets, defaultTapConfig.Docker.ImagePullSecrets, "ImagePullSecrets for the Docker images")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the proxy/port-forward")
tapCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
tapCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultTapConfig.Namespaces, "Namespaces selector")
tapCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
@@ -61,4 +60,5 @@ func init() {
tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries")
tapCmd.Flags().Bool(configStructs.IgnoreTaintedLabel, defaultTapConfig.IgnoreTainted, "Ignore tainted pods while running Worker DaemonSet")
tapCmd.Flags().Bool(configStructs.IngressEnabledLabel, defaultTapConfig.Ingress.Enabled, "Enable Ingress")
tapCmd.Flags().Bool(configStructs.TelemetryEnabledLabel, defaultTapConfig.Telemetry.Enabled, "Enable/disable Telemetry")
}

View File

@@ -506,7 +506,7 @@ func pcap(tarPath string) error {
},
}
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetHubUrl(), connect.DefaultRetries, connect.DefaultTimeout)
connector.PostWorkerPodToHub(workerPod)
// License
@@ -515,7 +515,7 @@ func pcap(tarPath string) error {
}
log.Info().
Str("url", kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)).
Str("url", kubernetes.GetHubUrl()).
Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
url := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)

View File

@@ -65,7 +65,7 @@ func tap() {
Str("limit", config.Config.Tap.StorageLimit).
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP/UDP streams will be removed once the limit is reached.", misc.Software))
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetHubUrl(), connect.DefaultRetries, connect.DefaultTimeout)
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
@@ -77,6 +77,11 @@ func tap() {
state.targetNamespaces = kubernetesProvider.GetNamespaces()
log.Info().
Bool("enabled", config.Config.Tap.Telemetry.Enabled).
Str("notice", "Telemetry can be disabled by setting the flag: --telemetry-enabled=false").
Msg("Telemetry")
log.Info().Strs("namespaces", state.targetNamespaces).Msg("Targeting pods in:")
if err := printTargetedPodsPreview(ctx, kubernetesProvider, state.targetNamespaces); err != nil {
@@ -401,16 +406,6 @@ func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider
}
func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, update bool) {
startProxyReportErrorIfAny(
kubernetesProvider,
ctx,
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
if update {
// Pod regex
@@ -439,12 +434,6 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
connector.PostScriptDone()
}
if !update && !config.Config.Tap.Ingress.Enabled {
// Hub proxy URL
url := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
}
if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts {
watchScripts(false)
}

View File

@@ -12,6 +12,7 @@ import (
"strings"
"github.com/creasty/defaults"
"github.com/goccy/go-yaml"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/misc/version"
"github.com/kubeshark/kubeshark/utils"
@@ -19,7 +20,6 @@ import (
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"gopkg.in/yaml.v3"
)
const (

View File

@@ -27,6 +27,7 @@ const (
TlsLabel = "tls"
IgnoreTaintedLabel = "ignoretainted"
IngressEnabledLabel = "ingress-enabled"
TelemetryEnabledLabel = "telemetry-enabled"
DebugLabel = "debug"
ContainerPort = 80
ContainerPortStr = "80"
@@ -87,11 +88,10 @@ type AuthConfig struct {
type IngressConfig struct {
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
ClassName string `yaml:"classname" json:"classname" default:"kubeshark-ingress-class"`
Controller string `yaml:"controller" json:"controller" default:"k8s.io/ingress-nginx"`
ClassName string `yaml:"classname" json:"classname" default:""`
Host string `yaml:"host" json:"host" default:"ks.svc.cluster.local"`
TLS []networking.IngressTLS `yaml:"tls" json:"tls"`
CertManager string `yaml:"certmanager" json:"certmanager" default:"letsencrypt-prod"`
TLS []networking.IngressTLS `yaml:"tls" json:"tls" default:"[]"`
Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"`
}
type ReleaseConfig struct {
@@ -100,6 +100,10 @@ type ReleaseConfig struct {
Namespace string `yaml:"namespace" json:"namespace" default:"default"`
}
type TelemetryConfig struct {
Enabled bool `yaml:"enabled" json:"enabled" default:"true"`
}
type TapConfig struct {
Docker DockerConfig `yaml:"docker" json:"docker"`
Proxy ProxyConfig `yaml:"proxy" json:"proxy"`
@@ -123,6 +127,7 @@ type TapConfig struct {
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
IPv6 bool `yaml:"ipv6" json:"ipv6" default:"true"`
Debug bool `yaml:"debug" json:"debug" default:"false"`
Telemetry TelemetryConfig `yaml:"telemetry" json:"telemetry"`
}
func (config *TapConfig) PodRegex() *regexp.Regexp {

4
go.mod
View File

@@ -12,6 +12,7 @@ require (
github.com/docker/go-connections v0.4.0
github.com/fsnotify/fsnotify v1.6.0
github.com/gin-gonic/gin v1.9.1
github.com/goccy/go-yaml v1.11.2
github.com/google/go-github/v37 v37.0.0
github.com/gorilla/websocket v1.4.2
github.com/pkg/errors v0.9.1
@@ -19,7 +20,6 @@ require (
github.com/rs/zerolog v1.28.0
github.com/spf13/cobra v1.6.1
github.com/spf13/pflag v1.0.5
gopkg.in/yaml.v3 v3.0.1
helm.sh/helm/v3 v3.12.0
k8s.io/api v0.27.1
k8s.io/apimachinery v0.27.1
@@ -162,6 +162,7 @@ require (
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.7.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
google.golang.org/grpc v1.53.0 // indirect
@@ -169,6 +170,7 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/sourcemap.v1 v1.0.5 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.27.1 // indirect
k8s.io/apiserver v0.27.1 // indirect
k8s.io/cli-runtime v0.27.1 // indirect

4
go.sum
View File

@@ -271,6 +271,8 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-yaml v1.11.2 h1:joq77SxuyIs9zzxEjgyLBugMQ9NEgTWxXfz2wVqwAaQ=
github.com/goccy/go-yaml v1.11.2/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -1035,6 +1037,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=

View File

@@ -1,6 +0,0 @@
dependencies:
- name: cert-manager
repository: https://charts.jetstack.io
version: v1.11.0
digest: sha256:a1643d3bb03dc0d5043d123d72f2d3bde692493784666b00d08fbea3ec5cb2c3
generated: "2023-08-12T02:52:53.562234195+03:00"

View File

@@ -1,5 +1,5 @@
apiVersion: v2
appVersion: "50.0"
appVersion: "50.4"
description: The API Traffic Analyzer for Kubernetes
home: https://kubeshark.co
keywords:
@@ -22,5 +22,5 @@ name: kubeshark
sources:
- https://github.com/kubeshark/kubeshark/tree/master/helm-chart
type: application
version: "50.0"
version: "50.4"
icon: https://raw.githubusercontent.com/kubeshark/assets/master/logo/vector/logo.svg

View File

@@ -24,3 +24,28 @@ rules:
- list
- get
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-self-secrets-role
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- "v1"
- ""
resourceNames:
- kubeshark-secret
resources:
- secrets
verbs:
- get
- watch
- update
- patch

View File

@@ -18,3 +18,23 @@ subjects:
- kind: ServiceAccount
name: {{ include "kubeshark.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubeshark-self-secrets-role-binding
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
namespace: {{ .Release.Namespace }}
subjects:
- kind: ServiceAccount
name: {{ include "kubeshark.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: kubeshark-self-secrets-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -37,11 +37,15 @@ spec:
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
readinessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 80
livenessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 80

View File

@@ -19,5 +19,3 @@ spec:
selector:
app.kubeshark.co/app: hub
type: ClusterIP
status:
loadBalancer: {}

View File

@@ -27,20 +27,22 @@ spec:
- name: REACT_APP_HUB_HOST
value: ' '
- name: REACT_APP_HUB_PORT
value: '{{ .Values.tap.ingress.enabled | ternary "/api" (print ":" .Values.tap.proxy.hub.port) }}'
value: '{{ .Values.tap.ingress.enabled | ternary "/api" (print ":" .Values.tap.proxy.front.port "/api") }}'
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}'
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
name: kubeshark-front
livenessProbe:
failureThreshold: 3
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 80
readinessProbe:
failureThreshold: 3
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 80
timeoutSeconds: 1

View File

@@ -18,5 +18,3 @@ spec:
selector:
app.kubeshark.co/app: front
type: ClusterIP
status:
loadBalancer: {}

View File

@@ -33,7 +33,7 @@ spec:
- -port
- '{{ .Values.tap.proxy.worker.srvport }}'
- -servicemesh
- -tls
{{ .Values.tap.tls | ternary "- -tls" "" }}
- -procfs
- /hostproc
{{ .Values.tap.debug | ternary "- -debug" "" }}
@@ -73,12 +73,16 @@ spec:
- ALL
readinessProbe:
periodSeconds: 1
initialDelaySeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 5
tcpSocket:
port: {{ .Values.tap.proxy.worker.srvport }}
livenessProbe:
periodSeconds: 1
initialDelaySeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 5
tcpSocket:
port: {{ .Values.tap.proxy.worker.srvport }}
volumeMounts:

View File

@@ -1,16 +0,0 @@
---
{{- if .Values.tap.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-ingress-class
namespace: {{ .Release.Namespace }}
spec:
controller: {{ .Values.tap.ingress.controller }}
{{- end }}

View File

@@ -4,37 +4,34 @@ apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
certmanager.k8s.io/cluster-issuer: {{ .Values.tap.ingress.certmanager }}
nginx.ingress.kubernetes.io/rewrite-target: /$2
{{- if .Values.tap.annotations }}
nginx.org/websocket-services: "kubeshark-front"
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
{{- if .Values.tap.ingress.annotations }}
{{- toYaml .Values.tap.ingress.annotations | nindent 4 }}
{{- end }}
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
name: kubeshark-ingress
namespace: {{ .Release.Namespace }}
spec:
{{- if .Values.tap.ingress.classname }}
ingressClassName: {{ .Values.tap.ingress.classname }}
{{- end }}
rules:
- host: {{ .Values.tap.ingress.host }}
http:
paths:
- backend:
service:
name: kubeshark-hub
port:
number: 80
path: /api(/|$)(.*)
pathType: Prefix
- backend:
service:
name: kubeshark-front
port:
number: 80
path: /()(.*)
path: /
pathType: Prefix
{{- if .Values.tap.ingress.tls }}
tls:
{{- if gt (len .Values.tap.ingress.tls) 0}}
{{- toYaml .Values.tap.ingress.tls | nindent 2 }}
{{- end }}
status:

View File

@@ -0,0 +1,46 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeshark-nginx-config-map
namespace: {{ .Release.Namespace }}
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
data:
default.conf: |
server {
listen 80;
{{- if .Values.tap.ipv6 }}
listen [::]:80;
{{- end }}
access_log /dev/stdout;
error_log /dev/stdout;
location /api {
rewrite ^/api(.*)$ $1 break;
proxy_pass http://kubeshark-hub;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $http_host;
proxy_set_header Upgrade websocket;
proxy_set_header Connection Upgrade;
proxy_set_header Authorization $http_authorization;
proxy_pass_header Authorization;
proxy_connect_timeout 4s;
proxy_read_timeout 120s;
proxy_send_timeout 12s;
proxy_pass_request_headers on;
}
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
expires -1;
add_header Cache-Control no-cache;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -14,3 +14,4 @@ data:
AUTH_ENABLED: '{{ .Values.tap.auth.enabled | ternary "true" "" }}'
AUTH_APPROVED_EMAILS: '{{ gt (len .Values.tap.auth.approvedemails) 0 | ternary (join "," .Values.tap.auth.approvedemails) "" }}'
AUTH_APPROVED_DOMAINS: '{{ gt (len .Values.tap.auth.approveddomains) 0 | ternary (join "," .Values.tap.auth.approveddomains) "" }}'
TELEMETRY_DISABLED: '{{ not .Values.tap.telemetry.enabled | ternary "true" "" }}'

View File

@@ -1,28 +0,0 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeshark-nginx-config-map
namespace: {{ .Release.Namespace }}
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
data:
default.conf: |
server {
listen 80;
{{- if .Values.tap.ipv6 }}
listen [::]:80;
{{- end }}
add_header Cache-Control no-cache;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
expires -1;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -2,16 +2,14 @@ Thank you for installing {{ title .Chart.Name }}.
Your deployment has been successful. The release is named {{ .Release.Name }} and it has been deployed in the {{ .Release.Namespace }} namespace.
{{- if .Values.tap.telemetry.enabled }}
Notice: Telemetry is enabled. Kubeshark will collect anonymous usage statistics.
{{ end }}
{{- if .Values.tap.ingress.enabled }}
{{ if not .Values.license -}}
warning:
> Ingress option enabled but license not set. The application should not work as expected.
> Get a license at https://console.kubeshark.co/
{{- else }}
You can now access the application through the following URL:
http{{ if .Values.tap.ingress.tls }}s{{ end }}://{{ .Values.tap.ingress.host }}
{{- end -}}
{{- else }}
To access the application, follow these steps:
@@ -24,4 +22,4 @@ To access the application, follow these steps:
2. Once port forwarding is done, you can access the application by visiting the following URL in your web browser:
http://0.0.0.0:8899
{{ end }}
{{ end }}

View File

@@ -1,74 +1,38 @@
config: {}
dumplogs: false
headless: false
kube:
configpath: ""
context: ""
license: ""
logs:
file: ""
manifests:
dump: false
scripting:
env: null
source: ""
watchscripts: true
tap:
annotations: {}
auth:
approveddomains: []
approvedemails: []
enabled: false
debug: false
docker:
imagepullpolicy: Always
imagepullsecrets: null
registry: docker.io/kubeshark
tag: latest
dryrun: false
ignoretainted: false
ingress:
certmanager: letsencrypt-prod
classname: kubeshark-ingress-class
controller: k8s.io/ingress-nginx
enabled: false
host: ks.svc.cluster.local
tls: null
ipv6: true
labels: {}
namespaces: []
nodeselectorterms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
packetcapture: libpcap
pcap: ""
persistentstorage: false
imagepullpolicy: Always
imagepullsecrets: []
proxy:
front:
port: 8899
host: 127.0.0.1
worker:
srvport: 8897
hub:
port: 8898
srvport: 8898
worker:
srvport: 8897
front:
port: 8899
host: 127.0.0.1
regex: .*
namespaces: []
release:
repo: https://helm.kubeshark.co
name: kubeshark
namespace: default
repo: https://helm.kubeshark.co
persistentstorage: false
storagelimit: 200Mi
storageclass: standard
dryrun: false
pcap: ""
resources:
hub:
worker:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
worker:
hub:
limits:
cpu: 750m
memory: 1Gi
@@ -76,6 +40,40 @@ tap:
cpu: 50m
memory: 50Mi
servicemesh: true
storageclass: standard
storagelimit: 200Mi
tls: true
packetcapture: libpcap
ignoretainted: false
labels: {}
annotations: {}
nodeselectorterms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
auth:
enabled: false
approvedemails: []
approveddomains: []
ingress:
enabled: false
classname: ""
host: ks.svc.cluster.local
tls: []
annotations: {}
ipv6: true
debug: false
telemetry:
enabled: true
logs:
file: ""
kube:
configpath: ""
context: ""
dumplogs: false
headless: false
license: ""
scripting:
env: {}
source: ""
watchscripts: true

View File

@@ -5,8 +5,10 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"time"
"github.com/kubeshark/kubeshark/config"
@@ -340,3 +342,40 @@ func (connector *Connector) PostScriptDone() {
time.Sleep(DefaultSleep)
}
}
func (connector *Connector) PostPcapsMerge(out *os.File) {
postEnvUrl := fmt.Sprintf("%s/pcaps/merge", connector.url)
if envMarshalled, err := json.Marshal(map[string]string{"query": ""}); err != nil {
log.Error().Err(err).Msg("Failed to marshal the env:")
} else {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postEnvUrl, "application/json", bytes.NewBuffer(envMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed exported PCAP download. Retrying...")
} else {
defer resp.Body.Close()
// Check server response
if resp.StatusCode != http.StatusOK {
log.Error().Str("status", resp.Status).Err(err).Msg("Failed exported PCAP download.")
return
}
// Writer the body to file
_, err = io.Copy(out, resp.Body)
if err != nil {
log.Error().Err(err).Msg("Failed writing PCAP export:")
return
}
log.Info().Str("path", out.Name()).Msg("Downloaded exported PCAP:")
return
}
time.Sleep(DefaultSleep)
}
}
}

View File

@@ -72,6 +72,10 @@ func GetProxyOnPort(port uint16) string {
return fmt.Sprintf("http://%s:%d", config.Config.Tap.Proxy.Host, port)
}
func GetHubUrl() string {
return fmt.Sprintf("%s/api", GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port))
}
func getRerouteHttpHandlerSelfAPI(proxyHandler http.Handler, selfNamespace string, selfServiceName string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")

View File

@@ -4,16 +4,16 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: kubeshark-50.0
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-service-account
namespace: default
---
# Source: kubeshark/templates/14-secret.yaml
# Source: kubeshark/templates/13-secret.yaml
kind: Secret
apiVersion: v1
metadata:
@@ -21,37 +21,55 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-50.0
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
stringData:
LICENSE: ''
---
# Source: kubeshark/templates/12-nginx-config-map.yaml
# Source: kubeshark/templates/11-nginx-config-map.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeshark-nginx-config-map
namespace: default
labels:
helm.sh/chart: kubeshark-50.0
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
data:
default.conf: |
server {
listen 80;
listen [::]:80;
add_header Cache-Control no-cache;
access_log /dev/stdout;
error_log /dev/stdout;
location /api {
rewrite ^/api(.*)$ $1 break;
proxy_pass http://kubeshark-hub;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $http_host;
proxy_set_header Upgrade websocket;
proxy_set_header Connection Upgrade;
proxy_set_header Authorization $http_authorization;
proxy_pass_header Authorization;
proxy_connect_timeout 4s;
proxy_read_timeout 120s;
proxy_send_timeout 12s;
proxy_pass_request_headers on;
}
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
expires -1;
add_header Cache-Control no-cache;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
@@ -59,7 +77,7 @@ data:
}
}
---
# Source: kubeshark/templates/13-config-map.yaml
# Source: kubeshark/templates/12-config-map.yaml
kind: ConfigMap
apiVersion: v1
metadata:
@@ -67,29 +85,30 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-50.0
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
data:
POD_REGEX: '.*'
NAMESPACES: ''
SCRIPTING_ENV: 'null'
SCRIPTING_ENV: '{}'
SCRIPTING_SCRIPTS: '[]'
AUTH_ENABLED: ''
AUTH_APPROVED_EMAILS: ''
AUTH_APPROVED_DOMAINS: ''
TELEMETRY_DISABLED: ''
---
# Source: kubeshark/templates/02-cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: kubeshark-50.0
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role
@@ -114,10 +133,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-50.0
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role-binding
@@ -131,16 +150,65 @@ subjects:
name: kubeshark-service-account
namespace: default
---
# Source: kubeshark/templates/02-cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-secrets-role
namespace: default
rules:
- apiGroups:
- "v1"
- ""
resourceNames:
- kubeshark-secret
resources:
- secrets
verbs:
- get
- watch
- update
- patch
---
# Source: kubeshark/templates/03-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubeshark-self-secrets-role-binding
labels:
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
namespace: default
subjects:
- kind: ServiceAccount
name: kubeshark-service-account
namespace: default
roleRef:
kind: Role
name: kubeshark-self-secrets-role
apiGroup: rbac.authorization.k8s.io
---
# Source: kubeshark/templates/05-hub-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-50.0
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub
@@ -153,18 +221,16 @@ spec:
selector:
app.kubeshark.co/app: hub
type: ClusterIP
status:
loadBalancer: {}
---
# Source: kubeshark/templates/07-front-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: kubeshark-50.0
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front
@@ -177,8 +243,6 @@ spec:
selector:
app.kubeshark.co/app: front
type: ClusterIP
status:
loadBalancer: {}
---
# Source: kubeshark/templates/09-worker-daemon-set.yaml
apiVersion: apps/v1
@@ -187,10 +251,10 @@ metadata:
labels:
app.kubeshark.co/app: worker
sidecar.istio.io/inject: "false"
helm.sh/chart: kubeshark-50.0
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-worker-daemon-set
@@ -199,19 +263,19 @@ spec:
selector:
matchLabels:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-50.0
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
template:
metadata:
labels:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-50.0
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
name: kubeshark-worker-daemon-set
namespace: kubeshark
@@ -255,12 +319,16 @@ spec:
- ALL
readinessProbe:
periodSeconds: 1
initialDelaySeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 5
tcpSocket:
port: 8897
livenessProbe:
periodSeconds: 1
initialDelaySeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 5
tcpSocket:
port: 8897
volumeMounts:
@@ -304,10 +372,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-50.0
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
spec:
@@ -337,11 +405,15 @@ spec:
imagePullPolicy: Always
readinessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 80
livenessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 80
@@ -361,10 +433,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-50.0
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
spec:
@@ -384,20 +456,22 @@ spec:
- name: REACT_APP_HUB_HOST
value: ' '
- name: REACT_APP_HUB_PORT
value: ':8898'
value: ':8899/api'
image: 'docker.io/kubeshark/front:latest'
imagePullPolicy: Always
name: kubeshark-front
livenessProbe:
failureThreshold: 3
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 80
readinessProbe:
failureThreshold: 3
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 80
timeoutSeconds: 1

View File

@@ -2,29 +2,15 @@ package utils
import (
"bytes"
"encoding/json"
"gopkg.in/yaml.v3"
"github.com/goccy/go-yaml"
)
func PrettyYaml(data interface{}) (result string, err error) {
var marshalled []byte
marshalled, err = json.Marshal(data)
if err != nil {
return
}
var unmarshalled interface{}
err = json.Unmarshal(marshalled, &unmarshalled)
if err != nil {
return
}
buffer := new(bytes.Buffer)
encoder := yaml.NewEncoder(buffer)
encoder.SetIndent(2)
encoder := yaml.NewEncoder(buffer, yaml.Indent(2))
err = encoder.Encode(unmarshalled)
err = encoder.Encode(data)
if err != nil {
return
}