Compare commits

...

25 Commits
40.0 ... 40.4

Author SHA1 Message Date
M. Mert Yildiran
be5bd6a372 Template the AUTH_APPROVED_DOMAINS and certmanager.k8s.io/cluster-issuer
Also add `networking.k8s.io` to `apiGroups` in `ClusterRole`
2023-05-25 05:07:42 +03:00
M. Mert Yildiran
42df7aa42f Update the Certificate resource name 2023-05-24 06:32:48 +03:00
M. Mert Yildiran
9a9052198f ⬆️ Bump the Helm chart version 2023-05-24 05:44:18 +03:00
M. Mert Yildiran
2fb83c3642 Fix the Bash script 2023-05-24 04:18:27 +03:00
M. Mert Yildiran
d44674fe86 Update the secret name of certificate 2023-05-24 04:10:46 +03:00
M. Mert Yildiran
c57ed1efd3 Run kubeshark manifests --dump && kubeshark helm-chart 2023-05-24 04:04:34 +03:00
M. Mert Yildiran
c19cd00c77 Add CertManager field to IngressConfig and add an Ingress TLS example 2023-05-24 04:01:45 +03:00
M. Mert Yildiran
39f8d40b76 Revert " Add Refresh-Token to the list of Access-Control-Allow-Headers"
This reverts commit bf731073c8.
2023-05-24 02:10:48 +03:00
M. Mert Yildiran
bf731073c8 Add Refresh-Token to the list of Access-Control-Allow-Headers 2023-05-24 02:04:56 +03:00
M. Mert Yildiran
4bb68afaaf Add AuthConfig struct and pass domains in AUTH_APPROVED_DOMAINS environment variable 2023-05-24 01:50:59 +03:00
M. Mert Yildiran
2126fc83a7 🐛 Remove the cancel() call 2023-05-22 19:20:39 +03:00
M. Mert Yildiran
d0c1dbcd5e Print and open a different URL in case of Ingress is enabled 2023-05-17 03:57:27 +03:00
M. Mert Yildiran
ad9dfbce40 Add Ingress (#1357)
*  Add `Ingress`

*  Rewrite the target in `Ingress`

*  Fix the path of front pod in `Ingress`

*  Add `IngressConfig` struct

*  Generate the correct Helm chart based on `tap.ingress` field of `values.yaml`
2023-05-16 19:46:47 +03:00
M. Mert Yildiran
139336d4ee Template hostPort(s) in the Helm chart 2023-05-10 14:38:38 +03:00
M. Mert Yildiran
f68fed0de8 🐛 Fix the effect of proxy config port changes 2023-05-10 01:28:43 +03:00
M. Mert Yildiran
1d7d242e6c Generate the missing new line in 08-persistent-volume-claim.yaml 2023-05-09 00:00:07 +03:00
M. Mert Yildiran
aa904e23c7 Add --persistentstorage option to tap command 2023-05-08 23:57:22 +03:00
M. Mert Yildiran
baf0e65337 Template the Helm chart based on persistentstorage value 2023-05-08 23:52:14 +03:00
M. Mert Yildiran
a33a3467fc Add persistentstorage option 2023-05-08 00:50:56 +03:00
M. Mert Yildiran
a9b598bc41 ⬆️ Bump the Helm chart version 2023-05-04 21:45:55 +03:00
M. Mert Yildiran
0aee367ad5 Omit the license string in helm-chart and manifests commands 2023-05-04 21:37:55 +03:00
M. Mert Yildiran
8c7d9ea8fd Fix the updateLicense method 2023-05-04 21:33:38 +03:00
M. Mert Yildiran
fab0f713ed 🐛 Pass the license string 2023-05-04 21:18:34 +03:00
M. Mert Yildiran
2563cc1922 🐛 Fix the imagePullPolicy to imagepullpolicy in helm-chart command 2023-04-24 02:03:58 +03:00
Jay R. Wren
26c9f42eba 📚 Remove kubeshark tap -A example from README.md (#1339) 2023-04-21 18:35:24 -07:00
42 changed files with 641 additions and 121 deletions

View File

@@ -45,10 +45,6 @@ Download **Kubeshark**'s binary distribution [latest release](https://github.com
kubeshark tap
```
```shell
kubeshark tap -A
```
```shell
kubeshark tap -n sock-shop "(catalo*|front-end*)"
```

View File

@@ -12,14 +12,14 @@ func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
var connectedToHub, connectedToFront bool
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), "/echo", kubernetesProvider); err != nil {
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), "/echo", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Hub using proxy!")
} else {
connectedToHub = true
log.Info().Msg("Connected successfully to Hub using proxy.")
}
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort), "", kubernetesProvider); err != nil {
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port), "", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Front using proxy!")
} else {
connectedToFront = true

View File

@@ -36,12 +36,12 @@ func init() {
log.Debug().Err(err).Send()
}
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
func runConsole() {
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
@@ -51,10 +51,10 @@ func runConsole() {
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.SrcPort).Msg("Connecting to:")
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.Port).Msg("Connecting to:")
u := url.URL{
Scheme: "ws",
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.SrcPort),
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.Port),
Path: "/scripts/logs",
}

View File

@@ -6,6 +6,7 @@ import (
"os"
"path/filepath"
"sort"
"strings"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
@@ -143,9 +144,13 @@ var hubPodMappings = map[string]interface{}{
"name": "SCRIPTING_SCRIPTS",
"value": "[]",
},
{
"name": "AUTH_APPROVED_DOMAINS",
"value": "{{ gt (len .Values.tap.ingress.auth.approvedDomains) 0 | ternary (join \",\" .Values.tap.ingress.auth.approvedDomains) \"\" }}",
},
},
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
"spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.hub.limits.cpu }}",
"spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.hub.limits.memory }}",
"spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.hub.requests.cpu }}",
@@ -156,7 +161,7 @@ var hubServiceMappings = serviceAccountMappings
var frontPodMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
}
var frontServiceMappings = serviceAccountMappings
var persistentVolumeMappings = map[string]interface{}{
@@ -167,14 +172,22 @@ var persistentVolumeMappings = map[string]interface{}{
var workerDaemonSetMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.template.spec.containers[0].image": "{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}",
"spec.template.spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
"spec.template.spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
"spec.template.spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.worker.limits.cpu }}",
"spec.template.spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.worker.limits.memory }}",
"spec.template.spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.worker.requests.cpu }}",
"spec.template.spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.worker.requests.memory }}",
"spec.template.spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./worker -debug\" \"./worker\" }}",
"spec.template.spec.containers[0].command[4]": "{{ .Values.tap.proxy.worker.srvport }}",
"spec.template.spec.containers[0].command[6]": "{{ .Values.tap.packetcapture }}",
}
var ingressClassMappings = serviceAccountMappings
var ingressMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"metadata.annotations[\"certmanager.k8s.io/cluster-issuer\"]": "{{ .Values.tap.ingress.certManager }}",
"spec.rules[0].host": "{{ .Values.tap.ingress.host }}",
"spec.tls": "{{ .Values.tap.ingress.tls | toYaml }}",
}
func init() {
rootCmd.AddCommand(helmChartCmd)
@@ -191,6 +204,8 @@ func runHelmChart() {
frontService,
persistentVolume,
workerDaemonSet,
ingressClass,
ingress,
err := generateManifests()
if err != nil {
log.Error().Err(err).Send()
@@ -208,6 +223,8 @@ func runHelmChart() {
"07-front-service.yaml": template(frontService, frontServiceMappings),
"08-persistent-volume-claim.yaml": template(persistentVolume, persistentVolumeMappings),
"09-worker-daemon-set.yaml": template(workerDaemonSet, workerDaemonSetMappings),
"10-ingress-class.yaml": template(ingressClass, ingressClassMappings),
"11-ingress.yaml": template(ingress, ingressMappings),
})
if err != nil {
log.Error().Err(err).Send()
@@ -257,6 +274,76 @@ func template(object interface{}, mappings map[string]interface{}) (template int
return
}
func handleHubPod(manifest string) string {
lines := strings.Split(manifest, "\n")
for i, line := range lines {
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
lines[i] = " hostPort: {{ .Values.tap.proxy.hub.srvport }}"
}
}
return strings.Join(lines, "\n")
}
func handleFrontPod(manifest string) string {
lines := strings.Split(manifest, "\n")
for i, line := range lines {
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
lines[i] = " hostPort: {{ .Values.tap.proxy.front.srvport }}"
}
}
return strings.Join(lines, "\n")
}
func handlePVCManifest(manifest string) string {
return fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s{{- end }}\n", manifest)
}
func handleDaemonSetManifest(manifest string) string {
lines := strings.Split(manifest, "\n")
for i, line := range lines {
if strings.TrimSpace(line) == "- mountPath: /app/data" {
lines[i] = fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s", line)
}
if strings.TrimSpace(line) == "name: kubeshark-persistent-volume" {
lines[i] = fmt.Sprintf("%s\n{{- end }}", line)
}
if strings.TrimSpace(line) == "- name: kubeshark-persistent-volume" {
lines[i] = fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s", line)
}
if strings.TrimSpace(line) == "claimName: kubeshark-persistent-volume-claim" {
lines[i] = fmt.Sprintf("%s\n{{- end }}", line)
}
if strings.HasPrefix(strings.TrimSpace(line), "- containerPort:") {
lines[i] = " - containerPort: {{ .Values.tap.proxy.worker.srvport }}"
}
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
lines[i] = " hostPort: {{ .Values.tap.proxy.worker.srvport }}"
}
}
return strings.Join(lines, "\n")
}
func handleIngressClass(manifest string) string {
return fmt.Sprintf("{{- if .Values.tap.ingress.enabled }}\n%s{{- end }}\n", manifest)
}
func handleIngress(manifest string) string {
manifest = strings.Replace(manifest, "'{{ .Values.tap.ingress.tls | toYaml }}'", "{{ .Values.tap.ingress.tls | toYaml }}", 1)
return handleIngressClass(manifest)
}
func dumpHelmChart(objects map[string]interface{}) error {
folder := filepath.Join(".", "helm-chart")
templatesFolder := filepath.Join(folder, "templates")
@@ -285,6 +372,30 @@ func dumpHelmChart(objects map[string]interface{}) error {
return err
}
if filename == "04-hub-pod.yaml" {
manifest = handleHubPod(manifest)
}
if filename == "06-front-pod.yaml" {
manifest = handleFrontPod(manifest)
}
if filename == "08-persistent-volume-claim.yaml" {
manifest = handlePVCManifest(manifest)
}
if filename == "09-worker-daemon-set.yaml" {
manifest = handleDaemonSetManifest(manifest)
}
if filename == "10-ingress-class.yaml" {
manifest = handleIngressClass(manifest)
}
if filename == "11-ingress.yaml" {
manifest = handleIngress(manifest)
}
path := filepath.Join(templatesFolder, filename)
err = os.WriteFile(path, []byte(manifestHeader+manifest), 0644)
if err != nil {

View File

@@ -15,6 +15,7 @@ import (
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
v1 "k8s.io/api/core/v1"
networking "k8s.io/api/networking/v1"
rbac "k8s.io/api/rbac/v1"
)
@@ -52,6 +53,8 @@ func runManifests() {
frontService,
persistentVolume,
workerDaemonSet,
ingressClass,
ingress,
err := generateManifests()
if err != nil {
log.Error().Err(err).Send()
@@ -70,6 +73,8 @@ func runManifests() {
"07-front-service.yaml": frontService,
"08-persistent-volume-claim.yaml": persistentVolume,
"09-worker-daemon-set.yaml": workerDaemonSet,
"10-ingress-class.yaml": ingressClass,
"11-ingress.yaml": ingress,
})
} else {
err = printManifests([]interface{}{
@@ -101,8 +106,14 @@ func generateManifests() (
frontService *v1.Service,
persistentVolumeClaim *v1.PersistentVolumeClaim,
workerDaemonSet *kubernetes.DaemonSet,
ingressClass *networking.IngressClass,
ingress *networking.Ingress,
err error,
) {
config.Config.License = ""
persistentStorage := config.Config.Tap.PersistentStorage
config.Config.Tap.PersistentStorage = true
var kubernetesProvider *kubernetes.Provider
kubernetesProvider, err = getKubernetesProviderForCli(true, true)
if err != nil {
@@ -142,7 +153,7 @@ func generateManifests() (
ImagePullPolicy: config.Config.ImagePullPolicy(),
ImagePullSecrets: config.Config.ImagePullSecrets(),
Debug: config.Config.Tap.Debug,
}, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
}, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port))
if err != nil {
return
}
@@ -169,6 +180,11 @@ func generateManifests() (
return
}
ingressClass = kubernetesProvider.BuildIngressClass()
ingress = kubernetesProvider.BuildIngress()
config.Config.Tap.PersistentStorage = persistentStorage
return
}

View File

@@ -40,19 +40,19 @@ func init() {
log.Debug().Err(err).Send()
}
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
proCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
func acquireLicense() {
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
log.Info().Str("url", PRO_URL).Msg("Opening in the browser:")
utils.OpenBrowser(PRO_URL)
@@ -61,17 +61,19 @@ func acquireLicense() {
}
func updateLicense(licenseKey string) {
log.Info().Str("key", licenseKey).Msg("Received license:")
config.Config.License = licenseKey
err := config.WriteConfig(&config.Config)
if err != nil {
log.Error().Err(err).Send()
}
connector.PostLicenseSingle(config.Config.License)
log.Info().Msg("Updated the license. Exiting.")
go func() {
connector.PostLicense(config.Config.License)
log.Info().Msg("Updated the license. Exiting.")
time.Sleep(2 * time.Second)
os.Exit(0)
}()
@@ -105,8 +107,6 @@ func runLicenseRecieverServer() {
licenseKey := string(data)
log.Info().Str("key", licenseKey).Msg("Received license:")
updateLicense(licenseKey)
})

View File

@@ -24,7 +24,7 @@ func init() {
log.Debug().Err(err).Send()
}
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
proxyCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
}

View File

@@ -63,12 +63,12 @@ func runProxy(block bool, noBrowser bool) {
var establishedProxy bool
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.HubServiceName).
Int("port", int(config.Config.Tap.Proxy.Hub.SrcPort)).
Int("port", int(config.Config.Tap.Proxy.Hub.Port)).
Msg("Found a running service.")
okToOpen("Hub", hubUrl, true)
@@ -79,8 +79,8 @@ func runProxy(block bool, noBrowser bool) {
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
connector := connect.NewConnector(hubUrl, connect.DefaultRetries, connect.DefaultTimeout)
@@ -93,12 +93,12 @@ func runProxy(block bool, noBrowser bool) {
okToOpen("Hub", hubUrl, true)
}
frontUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
frontUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port)
response, err = http.Get(fmt.Sprintf("%s/", frontUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.FrontServiceName).
Int("port", int(config.Config.Tap.Proxy.Front.SrcPort)).
Int("port", int(config.Config.Tap.Proxy.Front.Port)).
Msg("Found a running service.")
okToOpen("Kubeshark", frontUrl, noBrowser)
@@ -109,8 +109,8 @@ func runProxy(block bool, noBrowser bool) {
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
connector := connect.NewConnector(frontUrl, connect.DefaultRetries, connect.DefaultTimeout)

View File

@@ -34,7 +34,7 @@ func init() {
log.Debug().Err(err).Send()
}
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
@@ -44,14 +44,14 @@ func runScripts() {
return
}
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
watchScripts(true)
}

View File

@@ -47,11 +47,12 @@ func init() {
tapCmd.Flags().StringP(configStructs.DockerTagLabel, "t", defaultTapConfig.Docker.Tag, "The tag of the Docker images that are going to be pulled")
tapCmd.Flags().String(configStructs.DockerImagePullPolicy, defaultTapConfig.Docker.ImagePullPolicy, "ImagePullPolicy for the Docker images")
tapCmd.Flags().StringSlice(configStructs.DockerImagePullSecrets, defaultTapConfig.Docker.ImagePullSecrets, "ImagePullSecrets for the Docker images")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
tapCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
tapCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultTapConfig.Namespaces, "Namespaces selector")
tapCmd.Flags().StringP(configStructs.SelfNamespaceLabel, "s", defaultTapConfig.SelfNamespace, "Self-namespace of Kubeshark")
tapCmd.Flags().Bool(configStructs.PersistentStorageLabel, defaultTapConfig.PersistentStorage, "Enable persistent storage (PersistentVolumeClaim)")
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit (per node)")
tapCmd.Flags().String(configStructs.StorageClassLabel, defaultTapConfig.StorageClass, "Override the default storage class of the PersistentVolumeClaim (per node)")
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
@@ -59,5 +60,6 @@ func init() {
tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS")
tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries")
tapCmd.Flags().Bool(configStructs.IgnoreTaintedLabel, defaultTapConfig.IgnoreTainted, "Ignore tainted pods while running Worker DaemonSet")
tapCmd.Flags().Bool(configStructs.IngressEnabledLabel, defaultTapConfig.Ingress.Enabled, "Enable Ingress")
tapCmd.Flags().Bool(configStructs.DebugLabel, defaultTapConfig.Debug, "Enable the debug mode")
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
@@ -141,10 +142,10 @@ func createAndStartContainers(
hostConfigFront := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Front.DstPort)): []nat.PortBinding{
nat.Port(fmt.Sprintf("%d/tcp", configStructs.ContainerPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.SrcPort),
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.Port),
},
},
},
@@ -156,7 +157,7 @@ func createAndStartContainers(
Env: []string{
"REACT_APP_DEFAULT_FILTER= ",
"REACT_APP_HUB_HOST= ",
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.SrcPort),
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.Port),
},
}, hostConfigFront, nil, nil, nameFront)
if err != nil {
@@ -165,16 +166,16 @@ func createAndStartContainers(
hostConfigHub := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): []nat.PortBinding{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort),
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port),
},
},
},
}
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.DstPort)}
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrvPort)}
if config.DebugMode {
cmdHub = append(cmdHub, fmt.Sprintf("-%s", config.DebugFlag))
}
@@ -183,13 +184,13 @@ func createAndStartContainers(
Image: imageHub,
Cmd: cmdHub,
Tty: false,
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): {}},
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): {}},
}, hostConfigHub, nil, nil, nameHub)
if err != nil {
return
}
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.DstPort)}
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.SrvPort)}
if config.DebugMode {
cmdWorker = append(cmdWorker, fmt.Sprintf("-%s", config.DebugFlag))
}
@@ -328,7 +329,7 @@ func pcap(tarPath string) {
},
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
connector.PostWorkerPodToHub(workerPod)
// License
@@ -337,10 +338,10 @@ func pcap(tarPath string) {
}
log.Info().
Str("url", kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)).
Str("url", kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)).
Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {

View File

@@ -60,7 +60,7 @@ func tap() {
Str("limit", config.Config.Tap.StorageLimit).
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP/UDP streams will be removed once the limit is reached.", misc.Software))
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
@@ -113,7 +113,10 @@ func tap() {
// block until exit signal or error
utils.WaitForTermination(ctx, cancel)
printProxyCommandSuggestion()
if !config.Config.Tap.Ingress.Enabled {
printProxyCommandSuggestion()
}
}
func printProxyCommandSuggestion() {
@@ -315,7 +318,6 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
Str("namespace", config.Config.Tap.SelfNamespace).
Err(err).
Msg("Failed creating pod.")
cancel()
case <-timeAfter:
if !isPodReady {
@@ -409,8 +411,8 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
@@ -458,9 +460,9 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
connector.PostScriptDone()
}
if !update {
if !update && !config.Config.Tap.Ingress.Enabled {
// Hub proxy URL
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
}
@@ -476,12 +478,17 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
var url string
if config.Config.Tap.Ingress.Enabled {
url = fmt.Sprintf("http://%s", config.Config.Tap.Ingress.Host)
} else {
url = kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port)
}
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {

View File

@@ -5,6 +5,7 @@ import (
"regexp"
v1 "k8s.io/api/core/v1"
networking "k8s.io/api/networking/v1"
)
const (
@@ -17,6 +18,7 @@ const (
ProxyHostLabel = "proxy-host"
NamespacesLabel = "namespaces"
SelfNamespaceLabel = "selfnamespace"
PersistentStorageLabel = "persistentstorage"
StorageLimitLabel = "storagelimit"
StorageClassLabel = "storageclass"
DryRunLabel = "dryrun"
@@ -24,7 +26,10 @@ const (
ServiceMeshLabel = "servicemesh"
TlsLabel = "tls"
IgnoreTaintedLabel = "ignoreTainted"
IngressEnabledLabel = "ingress-enabled"
DebugLabel = "debug"
ContainerPort = 80
ContainerPortStr = "80"
)
type ResourceLimits struct {
@@ -43,18 +48,17 @@ type ResourceRequirements struct {
}
type WorkerConfig struct {
SrcPort uint16 `yaml:"port" default:"8897"`
DstPort uint16 `yaml:"srvport" default:"8897"`
SrvPort uint16 `yaml:"srvport" default:"8897"`
}
type HubConfig struct {
SrcPort uint16 `yaml:"port" default:"8898"`
DstPort uint16 `yaml:"srvport" default:"80"`
Port uint16 `yaml:"port" default:"8898"`
SrvPort uint16 `yaml:"srvport" default:"8898"`
}
type FrontConfig struct {
SrcPort uint16 `yaml:"port" default:"8899"`
DstPort uint16 `yaml:"srvport" default:"80"`
Port uint16 `yaml:"port" default:"8899"`
SrvPort uint16 `yaml:"srvport" default:"8899"`
}
type ProxyConfig struct {
@@ -76,12 +80,25 @@ type ResourcesConfig struct {
Hub ResourceRequirements `yaml:"hub"`
}
type AuthConfig struct {
ApprovedDomains []string `yaml:"approvedDomains"`
}
type IngressConfig struct {
Enabled bool `yaml:"enabled" default:"false"`
Host string `yaml:"host" default:"ks.svc.cluster.local"`
TLS []networking.IngressTLS `yaml:"tls"`
Auth AuthConfig `yaml:"auth"`
CertManager string `yaml:"certManager" default:"letsencrypt-prod"`
}
type TapConfig struct {
Docker DockerConfig `yaml:"docker"`
Proxy ProxyConfig `yaml:"proxy"`
PodRegexStr string `yaml:"regex" default:".*"`
Namespaces []string `yaml:"namespaces"`
SelfNamespace string `yaml:"selfnamespace" default:"kubeshark"`
PersistentStorage bool `yaml:"persistentstorage" default:"false"`
StorageLimit string `yaml:"storagelimit" default:"200Mi"`
StorageClass string `yaml:"storageclass" default:"standard"`
DryRun bool `yaml:"dryrun" default:"false"`
@@ -93,6 +110,7 @@ type TapConfig struct {
IgnoreTainted bool `yaml:"ignoreTainted" default:"false"`
ResourceLabels map[string]string `yaml:"resourceLabels" default:"{}"`
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeSelectorTerms" default:"[]"`
Ingress IngressConfig `yaml:"ingress"`
Debug bool `yaml:"debug" default:"false"`
}

View File

@@ -1,5 +1,5 @@
apiVersion: v2
appVersion: "40.0"
appVersion: "40.4"
description: The API Traffic Analyzer for Kubernetes
home: https://kubeshark.co
keywords:
@@ -22,4 +22,4 @@ name: kubeshark
sources:
- https://github.com/kubeshark/kubeshark/tree/master/helm-chart
type: application
version: "40.0"
version: "40.4"

View File

@@ -51,3 +51,18 @@ kubectl port-forward -n kubeshark service/kubeshark-front 8899:80
```
Visit [localhost:8899](http://localhost:8899)
## Installing with Ingress Enabled
```shell
helm install kubeshark kubeshark/kubeshark \
--set tap.ingress.enabled=true \
--set tap.ingress.host=ks.svc.cluster.local \
--set "tap.ingress.auth.approvedDomains={gmail.com}"
```
## Installing with Persistent Storage Enabled
```shell
helm install kubeshark kubeshark/kubeshark --set tap.persistentstorage=true
```

View File

@@ -15,11 +15,13 @@ rules:
- ""
- extensions
- apps
- networking.k8s.io
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- ingresses
verbs:
- list
- get

View File

@@ -25,9 +25,14 @@ spec:
value: '{}'
- name: SCRIPTING_SCRIPTS
value: '[]'
- name: AUTH_APPROVED_DOMAINS
value: '{{ gt (len .Values.tap.ingress.auth.approvedDomains) 0 | ternary (join "," .Values.tap.ingress.auth.approvedDomains) "" }}'
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
name: kubeshark-hub
ports:
- containerPort: 80
hostPort: {{ .Values.tap.proxy.hub.srvport }}
resources:
limits:
cpu: '{{ .Values.tap.resources.hub.limits.cpu }}'

View File

@@ -16,6 +16,6 @@ spec:
targetPort: 80
selector:
app: kubeshark-hub
type: ClusterIP
type: NodePort
status:
loadBalancer: {}

View File

@@ -20,8 +20,11 @@ spec:
- name: REACT_APP_HUB_PORT
value: "8898"
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
name: kubeshark-front
ports:
- containerPort: 80
hostPort: {{ .Values.tap.proxy.front.srvport }}
readinessProbe:
failureThreshold: 3
periodSeconds: 1

View File

@@ -16,6 +16,6 @@ spec:
targetPort: 80
selector:
app: kubeshark-front
type: ClusterIP
type: NodePort
status:
loadBalancer: {}

View File

@@ -1,5 +1,6 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
{{- if .Values.tap.persistentstorage }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
@@ -18,3 +19,4 @@ spec:
storage: '{{ .Values.tap.storagelimit }}'
storageClassName: '{{ .Values.tap.storageclass }}'
status: {}
{{- end }}

View File

@@ -32,7 +32,7 @@ spec:
- -i
- any
- -port
- "8897"
- '{{ .Values.tap.proxy.worker.srvport }}'
- -packet-capture
- '{{ .Values.tap.packetcapture }}'
- -servicemesh
@@ -40,8 +40,11 @@ spec:
- -procfs
- /hostproc
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
name: kubeshark-worker-daemon-set
ports:
- containerPort: {{ .Values.tap.proxy.worker.srvport }}
hostPort: {{ .Values.tap.proxy.worker.srvport }}
resources:
limits:
cpu: '{{ .Values.tap.resources.worker.limits.cpu }}'
@@ -67,8 +70,10 @@ spec:
- mountPath: /sys
name: sys
readOnly: true
{{- if .Values.tap.persistentstorage }}
- mountPath: /app/data
name: kubeshark-persistent-volume
{{- end }}
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
@@ -85,6 +90,8 @@ spec:
- hostPath:
path: /sys
name: sys
{{- if .Values.tap.persistentstorage }}
- name: kubeshark-persistent-volume
persistentVolumeClaim:
claimName: kubeshark-persistent-volume-claim
{{- end }}

View File

@@ -0,0 +1,16 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
{{- if .Values.tap.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress-class
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
controller: k8s.io/ingress-nginx
{{- end }}

View File

@@ -0,0 +1,40 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
{{- if .Values.tap.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
certmanager.k8s.io/cluster-issuer: '{{ .Values.tap.ingress.certManager }}'
nginx.ingress.kubernetes.io/rewrite-target: /$2
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
ingressClassName: kubeshark-ingress-class
rules:
- host: '{{ .Values.tap.ingress.host }}'
http:
paths:
- backend:
service:
name: kubeshark-hub
port:
number: 80
path: /api(/|$)(.*)
pathType: Prefix
- backend:
service:
name: kubeshark-front
port:
number: 80
path: /()(.*)
pathType: Prefix
tls: {{ .Values.tap.ingress.tls | toYaml }}
status:
loadBalancer: {}
{{- end }}

View File

@@ -6,18 +6,18 @@ tap:
imagepullsecrets: []
proxy:
worker:
port: 8897
srvport: 8897
hub:
port: 8898
srvport: 80
srvport: 8898
front:
port: 8899
srvport: 80
srvport: 8899
host: 127.0.0.1
regex: .*
namespaces: []
selfnamespace: kubeshark
persistentstorage: false
storagelimit: 200Mi
storageclass: standard
dryrun: false
@@ -43,6 +43,13 @@ tap:
ignoreTainted: false
resourceLabels: {}
nodeSelectorTerms: []
ingress:
enabled: false
host: ks.svc.cluster.local
tls: []
auth:
approvedDomains: []
certManager: letsencrypt-prod
debug: false
logs:
file: ""

View File

@@ -151,6 +151,26 @@ func (connector *Connector) PostLicense(license string) {
}
}
func (connector *Connector) PostLicenseSingle(license string) {
postLicenseUrl := fmt.Sprintf("%s/license", connector.url)
payload := postLicenseRequest{
License: license,
}
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the payload:")
} else {
var resp *http.Response
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
log.Warn().Err(err).Msg("Failed sending the license to Hub.")
} else {
log.Debug().Str("license", license).Msg("Reported license to Hub:")
return
}
}
}
func (connector *Connector) PostEnv(env map[string]interface{}) {
if len(env) == 0 {
return

View File

@@ -16,6 +16,8 @@ const (
WorkerPodName = SelfResourcesPrefix + "worker"
PersistentVolumeName = SelfResourcesPrefix + "persistent-volume"
PersistentVolumeClaimName = SelfResourcesPrefix + "persistent-volume-claim"
IngressName = SelfResourcesPrefix + "ingress"
IngressClassName = SelfResourcesPrefix + "ingress-class"
PersistentVolumeHostPath = "/app/data"
MinKubernetesServerVersion = "1.16.0"
)

View File

@@ -20,6 +20,7 @@ import (
"github.com/rs/zerolog/log"
auth "k8s.io/api/authorization/v1"
core "k8s.io/api/core/v1"
networking "k8s.io/api/networking/v1"
rbac "k8s.io/api/rbac/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
@@ -225,8 +226,12 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
containers := []core.Container{
{
Name: opts.PodName,
Image: opts.PodImage,
Name: opts.PodName,
Image: opts.PodImage,
Ports: []core.ContainerPort{{
HostPort: int32(config.Config.Tap.Proxy.Hub.SrvPort),
ContainerPort: configStructs.ContainerPort,
}},
ImagePullPolicy: opts.ImagePullPolicy,
Command: command,
Resources: core.ResourceRequirements{
@@ -250,7 +255,7 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
},
{
Name: "LICENSE",
Value: "",
Value: config.Config.License,
},
{
Name: "SCRIPTING_ENV",
@@ -260,6 +265,10 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
Name: "SCRIPTING_SCRIPTS",
Value: string(scriptsMarshalled),
},
{
Name: "AUTH_APPROVED_DOMAINS",
Value: strings.Join(config.Config.Tap.Ingress.Auth.ApprovedDomains, ","),
},
},
},
}
@@ -320,17 +329,25 @@ func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPor
volumeMounts := []core.VolumeMount{}
volumes := []core.Volume{}
if config.Config.Tap.Ingress.Enabled {
hubPort = "80/api"
}
containers := []core.Container{
{
Name: opts.PodName,
Image: docker.GetFrontImage(),
Name: opts.PodName,
Image: docker.GetFrontImage(),
Ports: []core.ContainerPort{{
HostPort: int32(config.Config.Tap.Proxy.Front.SrvPort),
ContainerPort: configStructs.ContainerPort,
}},
ImagePullPolicy: opts.ImagePullPolicy,
VolumeMounts: volumeMounts,
ReadinessProbe: &core.Probe{
FailureThreshold: 3,
ProbeHandler: core.ProbeHandler{
TCPSocket: &core.TCPSocketAction{
Port: intstr.Parse("80"),
Port: intstr.Parse(configStructs.ContainerPortStr),
},
},
PeriodSeconds: 1,
@@ -419,11 +436,11 @@ func (provider *Provider) BuildHubService(namespace string) *core.Service {
Ports: []core.ServicePort{
{
Name: HubServiceName,
TargetPort: intstr.FromInt(80),
Port: 80,
TargetPort: intstr.FromInt(configStructs.ContainerPort),
Port: configStructs.ContainerPort,
},
},
Type: core.ServiceTypeClusterIP,
Type: core.ServiceTypeNodePort,
Selector: map[string]string{"app": HubServiceName},
},
}
@@ -444,16 +461,24 @@ func (provider *Provider) BuildFrontService(namespace string) *core.Service {
Ports: []core.ServicePort{
{
Name: FrontServiceName,
TargetPort: intstr.FromInt(80),
Port: 80,
TargetPort: intstr.FromInt(configStructs.ContainerPort),
Port: configStructs.ContainerPort,
},
},
Type: core.ServiceTypeClusterIP,
Type: core.ServiceTypeNodePort,
Selector: map[string]string{"app": FrontServiceName},
},
}
}
func (provider *Provider) CreateIngressClass(ctx context.Context, ingressClass *networking.IngressClass) (*networking.IngressClass, error) {
return provider.clientSet.NetworkingV1().IngressClasses().Create(ctx, ingressClass, metav1.CreateOptions{})
}
func (provider *Provider) CreateIngress(ctx context.Context, namespace string, ingress *networking.Ingress) (*networking.Ingress, error) {
return provider.clientSet.NetworkingV1().Ingresses(namespace).Create(ctx, ingress, metav1.CreateOptions{})
}
func (provider *Provider) CreateService(ctx context.Context, namespace string, service *core.Service) (*core.Service, error) {
return provider.clientSet.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{})
}
@@ -526,6 +551,87 @@ func (provider *Provider) doesResourceExist(resource interface{}, err error) (bo
return resource != nil, nil
}
func (provider *Provider) BuildIngressClass() *networking.IngressClass {
return &networking.IngressClass{
TypeMeta: metav1.TypeMeta{
Kind: "IngressClass",
APIVersion: "networking.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: IngressClassName,
Namespace: config.Config.Tap.SelfNamespace,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): misc.RBACVersion,
}, provider),
},
Spec: networking.IngressClassSpec{
Controller: "k8s.io/ingress-nginx",
},
}
}
func (provider *Provider) BuildIngress() *networking.Ingress {
pathTypePrefix := networking.PathTypePrefix
ingressClassName := IngressClassName
return &networking.Ingress{
TypeMeta: metav1.TypeMeta{
Kind: "Ingress",
APIVersion: "networking.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: IngressName,
Namespace: config.Config.Tap.SelfNamespace,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): misc.RBACVersion,
}, provider),
Annotations: map[string]string{
"nginx.ingress.kubernetes.io/rewrite-target": "/$2",
"certmanager.k8s.io/cluster-issuer": config.Config.Tap.Ingress.CertManager,
},
},
Spec: networking.IngressSpec{
IngressClassName: &ingressClassName,
TLS: config.Config.Tap.Ingress.TLS,
Rules: []networking.IngressRule{
{
Host: config.Config.Tap.Ingress.Host,
IngressRuleValue: networking.IngressRuleValue{
HTTP: &networking.HTTPIngressRuleValue{
Paths: []networking.HTTPIngressPath{
{
Path: "/api(/|$)(.*)",
PathType: &pathTypePrefix,
Backend: networking.IngressBackend{
Service: &networking.IngressServiceBackend{
Name: HubServiceName,
Port: networking.ServiceBackendPort{
Number: configStructs.ContainerPort,
},
},
},
},
{
Path: "/()(.*)",
PathType: &pathTypePrefix,
Backend: networking.IngressBackend{
Service: &networking.IngressServiceBackend{
Name: FrontServiceName,
Port: networking.ServiceBackendPort{
Number: configStructs.ContainerPort,
},
},
},
},
},
},
},
},
},
},
}
}
func (provider *Provider) BuildServiceAccount() *core.ServiceAccount {
return &core.ServiceAccount{
TypeMeta: metav1.TypeMeta{
@@ -561,12 +667,14 @@ func (provider *Provider) BuildClusterRole() *rbac.ClusterRole {
"",
"extensions",
"apps",
"networking.k8s.io",
},
Resources: []string{
"pods",
"services",
"endpoints",
"persistentvolumeclaims",
"ingresses",
},
Verbs: []string{
"list",
@@ -626,6 +734,11 @@ func (provider *Provider) CreateSelfRBAC(ctx context.Context, namespace string)
return nil
}
func (provider *Provider) RemoveIngressClass(ctx context.Context, name string) error {
err := provider.clientSet.NetworkingV1().IngressClasses().Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveNamespace(ctx context.Context, name string) error {
err := provider.clientSet.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
@@ -758,7 +871,7 @@ func (provider *Provider) BuildWorkerDaemonSet(
"-i",
"any",
"-port",
"8897",
fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.SrvPort),
"-packet-capture",
config.Config.Tap.PacketCapture,
}
@@ -855,18 +968,27 @@ func (provider *Provider) BuildWorkerDaemonSet(
MountPath: PersistentVolumeHostPath,
}
// VolumeMount(s)
volumeMounts := []core.VolumeMount{
procfsVolumeMount,
sysfsVolumeMount,
}
if config.Config.Tap.PersistentStorage {
volumeMounts = append(volumeMounts, persistentVolumeMount)
}
// Containers
containers := []core.Container{
{
Name: podName,
Image: podImage,
Name: podName,
Image: podImage,
Ports: []core.ContainerPort{{
HostPort: int32(config.Config.Tap.Proxy.Worker.SrvPort),
ContainerPort: int32(config.Config.Tap.Proxy.Worker.SrvPort),
}},
ImagePullPolicy: imagePullPolicy,
VolumeMounts: []core.VolumeMount{
procfsVolumeMount,
sysfsVolumeMount,
persistentVolumeMount,
},
Command: command,
VolumeMounts: volumeMounts,
Command: command,
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
"cpu": cpuLimit,
@@ -887,6 +1009,15 @@ func (provider *Provider) BuildWorkerDaemonSet(
},
}
// Volume(s)
volumes := []core.Volume{
procfsVolume,
sysfsVolume,
}
if config.Config.Tap.PersistentStorage {
volumes = append(volumes, persistentVolume)
}
// Pod
pod := DaemonSetPod{
ObjectMeta: metav1.ObjectMeta{
@@ -897,14 +1028,10 @@ func (provider *Provider) BuildWorkerDaemonSet(
}, provider),
},
Spec: core.PodSpec{
ServiceAccountName: ServiceAccountName,
HostNetwork: true,
Containers: containers,
Volumes: []core.Volume{
procfsVolume,
sysfsVolume,
persistentVolume,
},
ServiceAccountName: ServiceAccountName,
HostNetwork: true,
Containers: containers,
Volumes: volumes,
DNSPolicy: core.DNSClusterFirstWithHostNet,
TerminationGracePeriodSeconds: new(int64),
Tolerations: provider.BuildTolerations(),

View File

@@ -3,6 +3,7 @@ package kubernetes
import (
"context"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/rs/zerolog/log"
@@ -21,17 +22,19 @@ func CreateWorkers(
tls bool,
debug bool,
) error {
persistentVolumeClaim, err := kubernetesProvider.BuildPersistentVolumeClaim()
if err != nil {
return err
}
if config.Config.Tap.PersistentStorage {
persistentVolumeClaim, err := kubernetesProvider.BuildPersistentVolumeClaim()
if err != nil {
return err
}
if _, err = kubernetesProvider.CreatePersistentVolumeClaim(
ctx,
namespace,
persistentVolumeClaim,
); err != nil {
return err
if _, err = kubernetesProvider.CreatePersistentVolumeClaim(
ctx,
namespace,
persistentVolumeClaim,
); err != nil {
return err
}
}
image := docker.GetWorkerImage()

View File

@@ -15,11 +15,13 @@ rules:
- ""
- extensions
- apps
- networking.k8s.io
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- ingresses
verbs:
- list
- get

View File

@@ -23,9 +23,13 @@ spec:
value: '{}'
- name: SCRIPTING_SCRIPTS
value: '[]'
- name: AUTH_APPROVED_DOMAINS
image: docker.io/kubeshark/hub:latest
imagePullPolicy: Always
name: kubeshark-hub
ports:
- containerPort: 80
hostPort: 8898
resources:
limits:
cpu: 750m

View File

@@ -16,6 +16,6 @@ spec:
targetPort: 80
selector:
app: kubeshark-hub
type: ClusterIP
type: NodePort
status:
loadBalancer: {}

View File

@@ -22,6 +22,9 @@ spec:
image: docker.io/kubeshark/front:latest
imagePullPolicy: Always
name: kubeshark-front
ports:
- containerPort: 80
hostPort: 8899
readinessProbe:
failureThreshold: 3
periodSeconds: 1

View File

@@ -16,6 +16,6 @@ spec:
targetPort: 80
selector:
app: kubeshark-front
type: ClusterIP
type: NodePort
status:
loadBalancer: {}

View File

@@ -42,6 +42,9 @@ spec:
image: docker.io/kubeshark/worker:latest
imagePullPolicy: Always
name: kubeshark-worker-daemon-set
ports:
- containerPort: 8897
hostPort: 8897
resources:
limits:
cpu: 750m

View File

@@ -0,0 +1,14 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress-class
namespace: kubeshark
spec:
controller: k8s.io/ingress-nginx

37
manifests/11-ingress.yaml Normal file
View File

@@ -0,0 +1,37 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
certmanager.k8s.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/rewrite-target: /$2
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress
namespace: kubeshark
spec:
ingressClassName: kubeshark-ingress-class
rules:
- host: ks.svc.cluster.local
http:
paths:
- backend:
service:
name: kubeshark-hub
port:
number: 80
path: /api(/|$)(.*)
pathType: Prefix
- backend:
service:
name: kubeshark-front
port:
number: 80
path: /()(.*)
pathType: Prefix
status:
loadBalancer: {}

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: kubeshark-tls
namespace: default
spec:
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
secretName: cert-kubeshark
dnsNames:
- ks.svc.cluster.local

View File

@@ -0,0 +1,14 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: info@kubeshark.com
privateKeySecretRef:
name: letsencrypt-prod-key
solvers:
- http01:
ingress:
class: kubeshark-ingress-class

15
manifests/tls/run.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
__dir="$(cd -P -- "$(dirname -- "$0")" && pwd -P)"
helm repo add jetstack https://charts.jetstack.io
helm repo update
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.crds.yaml
helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.9.1
kubectl apply -f ${__dir}/cluster-issuer.yaml
kubectl apply -f ${__dir}/certificate.yaml

View File

@@ -35,6 +35,11 @@ func CleanUpSelfResources(ctx context.Context, cancel context.CancelFunc, kubern
func cleanUpNonRestrictedMode(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string {
leftoverResources := make([]string, 0)
if err := kubernetesProvider.RemoveIngressClass(ctx, kubernetes.IngressClassName); err != nil {
resourceDesc := kubernetes.IngressClassName
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemoveNamespace(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("Namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)

View File

@@ -58,21 +58,32 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
return selfServiceAccountExists, err
}
// TODO: Why the port values need to be 80?
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildHubService(selfNamespace))
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("service", kubernetes.HubServiceName).Msg("Successfully created a service.")
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildFrontService(selfNamespace))
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("service", kubernetes.FrontServiceName).Msg("Successfully created a service.")
if config.Config.Tap.Ingress.Enabled {
_, err = kubernetesProvider.CreateIngressClass(ctx, kubernetesProvider.BuildIngressClass())
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("ingress-class", kubernetes.IngressClassName).Msg("Successfully created an ingress class.")
_, err = kubernetesProvider.CreateIngress(ctx, selfNamespace, kubernetesProvider.BuildIngress())
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("ingress", kubernetes.IngressName).Msg("Successfully created an ingress.")
}
return selfServiceAccountExists, nil
}
@@ -94,7 +105,7 @@ func createSelfHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provid
}
func createFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
pod, err := kubernetesProvider.BuildFrontPod(opts, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
pod, err := kubernetesProvider.BuildFrontPod(opts, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port))
if err != nil {
return err
}