Compare commits

...

41 Commits
40.0 ... 41.1

Author SHA1 Message Date
M. Mert Yildiran
bada6dae68 🐛 Fix <len .Values.tap.namespaces>: error calling len: len of nil pointer Helm install error 2023-06-20 22:14:06 +03:00
M. Mert Yildiran
8814e08871 ⬆️ Bump the Helm chart version 2023-06-19 23:59:38 +03:00
M. Mert Yildiran
6b7a94a850 Revert values.yaml 2023-06-19 02:40:07 +03:00
M. Mert Yildiran
7b004e7a1f Change GetLocalhostOnPort method to GetProxyOnPort 2023-06-19 02:19:52 +03:00
M. Mert Yildiran
836b87d517 Template the SCRIPTING_ENV env in Hub pod (Helm) 2023-06-19 01:46:51 +03:00
M. Mert Yildiran
646da4810d Allow license key holders to bypass the auth 2023-06-19 01:44:01 +03:00
Alon Girmonsky
a6d349a8fa Update README.md
Changed the announcement part
2023-06-13 10:11:44 -07:00
Alon Girmonsky
9d58c662a8 Update README.md
Announcing Self-hosted Kubeshark
2023-06-13 10:01:28 -07:00
M. Mert Yildiran
e4a09be4e2 Change the PRO_URL constant 2023-06-07 01:09:30 +03:00
Alon Girmonsky
7208ed85d3 Update README.md
Adding a way to get the license where relevant.
2023-06-06 17:43:35 +03:00
M. Mert Yildiran
7a5bf83336 Use the Helm chart in tap command to install Kubeshark (#1362)
*  Use the Helm chart in `tap` command to install Kubeshark

* ⬆️ Set Go version to `1.19` in `go.mod` file

*  Add `Helm` struct`, `NewHelm` and `NewHelmDefault` methods

*  Better logging and error return

*  Pass the config as `values.yaml` to Helm install

* 🔥 Remove `helm-chart`, `manifests` and `check` commands

*  Run `go mod tidy`

* 🎨 Move `helm` package into `kubernetes` package

* 🔥 Remove `# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!` notice from the manifests and Helm templates

* 🔥 Remove the unused `GenerateApplyConfiguration` and `buildWithDefaultLabels` methods
2023-06-06 12:16:03 +03:00
Alon Girmonskys
87b8a067c9 changed com to co 2023-05-28 20:54:57 -07:00
M. Mert Yildiran
3fe765e072 ⬆️ Bump the Helm chart version 2023-05-26 00:07:46 +03:00
M. Mert Yildiran
a163f9cc0e Change the new release warning 2023-05-25 20:42:57 +03:00
M. Mert Yildiran
2edb987c07 Template REACT_APP_HUB_PORT in the Helm chart 2023-05-25 20:24:29 +03:00
M. Mert Yildiran
c0d7d0fe80 Update Helm README.md 2023-05-25 05:46:10 +03:00
M. Mert Yildiran
be5bd6a372 Template the AUTH_APPROVED_DOMAINS and certmanager.k8s.io/cluster-issuer
Also add `networking.k8s.io` to `apiGroups` in `ClusterRole`
2023-05-25 05:07:42 +03:00
M. Mert Yildiran
42df7aa42f Update the Certificate resource name 2023-05-24 06:32:48 +03:00
M. Mert Yildiran
9a9052198f ⬆️ Bump the Helm chart version 2023-05-24 05:44:18 +03:00
M. Mert Yildiran
2fb83c3642 Fix the Bash script 2023-05-24 04:18:27 +03:00
M. Mert Yildiran
d44674fe86 Update the secret name of certificate 2023-05-24 04:10:46 +03:00
M. Mert Yildiran
c57ed1efd3 Run kubeshark manifests --dump && kubeshark helm-chart 2023-05-24 04:04:34 +03:00
M. Mert Yildiran
c19cd00c77 Add CertManager field to IngressConfig and add an Ingress TLS example 2023-05-24 04:01:45 +03:00
M. Mert Yildiran
39f8d40b76 Revert " Add Refresh-Token to the list of Access-Control-Allow-Headers"
This reverts commit bf731073c8.
2023-05-24 02:10:48 +03:00
M. Mert Yildiran
bf731073c8 Add Refresh-Token to the list of Access-Control-Allow-Headers 2023-05-24 02:04:56 +03:00
M. Mert Yildiran
4bb68afaaf Add AuthConfig struct and pass domains in AUTH_APPROVED_DOMAINS environment variable 2023-05-24 01:50:59 +03:00
M. Mert Yildiran
2126fc83a7 🐛 Remove the cancel() call 2023-05-22 19:20:39 +03:00
M. Mert Yildiran
d0c1dbcd5e Print and open a different URL in case of Ingress is enabled 2023-05-17 03:57:27 +03:00
M. Mert Yildiran
ad9dfbce40 Add Ingress (#1357)
*  Add `Ingress`

*  Rewrite the target in `Ingress`

*  Fix the path of front pod in `Ingress`

*  Add `IngressConfig` struct

*  Generate the correct Helm chart based on `tap.ingress` field of `values.yaml`
2023-05-16 19:46:47 +03:00
M. Mert Yildiran
139336d4ee Template hostPort(s) in the Helm chart 2023-05-10 14:38:38 +03:00
M. Mert Yildiran
f68fed0de8 🐛 Fix the effect of proxy config port changes 2023-05-10 01:28:43 +03:00
M. Mert Yildiran
1d7d242e6c Generate the missing new line in 08-persistent-volume-claim.yaml 2023-05-09 00:00:07 +03:00
M. Mert Yildiran
aa904e23c7 Add --persistentstorage option to tap command 2023-05-08 23:57:22 +03:00
M. Mert Yildiran
baf0e65337 Template the Helm chart based on persistentstorage value 2023-05-08 23:52:14 +03:00
M. Mert Yildiran
a33a3467fc Add persistentstorage option 2023-05-08 00:50:56 +03:00
M. Mert Yildiran
a9b598bc41 ⬆️ Bump the Helm chart version 2023-05-04 21:45:55 +03:00
M. Mert Yildiran
0aee367ad5 Omit the license string in helm-chart and manifests commands 2023-05-04 21:37:55 +03:00
M. Mert Yildiran
8c7d9ea8fd Fix the updateLicense method 2023-05-04 21:33:38 +03:00
M. Mert Yildiran
fab0f713ed 🐛 Pass the license string 2023-05-04 21:18:34 +03:00
M. Mert Yildiran
2563cc1922 🐛 Fix the imagePullPolicy to imagepullpolicy in helm-chart command 2023-04-24 02:03:58 +03:00
Jay R. Wren
26c9f42eba 📚 Remove kubeshark tap -A example from README.md (#1339) 2023-04-21 18:35:24 -07:00
71 changed files with 1016 additions and 3070 deletions

View File

@@ -3,9 +3,6 @@
</p>
<p align="center">
<a href="https://github.com/kubeshark/kubeshark/blob/main/LICENSE">
<img alt="GitHub License" src="https://img.shields.io/github/license/kubeshark/kubeshark?logo=GitHub&style=flat-square">
</a>
<a href="https://github.com/kubeshark/kubeshark/releases/latest">
<img alt="GitHub Latest Release" src="https://img.shields.io/github/v/release/kubeshark/kubeshark?logo=GitHub&style=flat-square">
</a>
@@ -25,9 +22,11 @@
<p align="center">
<b>
<span>NEW: </span><a href="https://github.com/kubeshark/kubeshark/releases/tag/39.4">Version 39.4</a> is out, introducing
<a href="https://docs.kubeshark.co/en/automation_scripting">Scripting</a>,
<a href="https://docs.kubeshark.co/en/automation_hooks">L4/L7 hooks</a>, and so much more...
<span>NEW: Introducing </span>
<a href="https://docs.kubeshark.co/en/self_hosted">self-hosted Kubeshark</a>, with
<a href="https://docs.kubeshark.co/en/install#helm">Helm</a>,
<a href="https://docs.kubeshark.co/en/self_hosted">Ingress & Authentication</a>. Read more about it
<a href="https://kubeshark.co/self-hosting">here</a>.
</b>
</p>
@@ -45,10 +44,6 @@ Download **Kubeshark**'s binary distribution [latest release](https://github.com
kubeshark tap
```
```shell
kubeshark tap -A
```
```shell
kubeshark tap -n sock-shop "(catalo*|front-end*)"
```

View File

@@ -1,21 +0,0 @@
package cmd
import (
"fmt"
"github.com/kubeshark/kubeshark/misc"
"github.com/spf13/cobra"
)
var checkCmd = &cobra.Command{
Use: "check",
Short: fmt.Sprintf("Check the %s resources for potential problems", misc.Software),
RunE: func(cmd *cobra.Command, args []string) error {
runCheck()
return nil
},
}
func init() {
rootCmd.AddCommand(checkCmd)
}

View File

@@ -1,28 +0,0 @@
package check
import (
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/semver"
"github.com/rs/zerolog/log"
)
func KubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
log.Info().Str("procedure", "kubernetes-api").Msg("Checking:")
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.Kube.Context)
if err != nil {
log.Error().Err(err).Msg("Can't initialize the client!")
return nil, nil, false
}
log.Info().Msg("Initialization of the client is passed.")
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
log.Error().Err(err).Msg("Can't query the Kubernetes API!")
return nil, nil, false
}
log.Info().Msg("Querying the Kubernetes API is passed.")
return kubernetesProvider, kubernetesVersion, true
}

View File

@@ -1,59 +0,0 @@
package check
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
rbac "k8s.io/api/rbac/v1"
)
func KubernetesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "kubernetes-permissions").Msg("Checking:")
return checkRulesPermissions(ctx, kubernetesProvider, kubernetesProvider.BuildClusterRole().Rules, "")
}
func checkRulesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider, rules []rbac.PolicyRule, namespace string) bool {
permissionsExist := true
for _, rule := range rules {
for _, group := range rule.APIGroups {
for _, resource := range rule.Resources {
for _, verb := range rule.Verbs {
exist, err := kubernetesProvider.CanI(ctx, namespace, resource, verb, group)
permissionsExist = checkPermissionExist(group, resource, verb, namespace, exist, err) && permissionsExist
}
}
}
}
return permissionsExist
}
func checkPermissionExist(group string, resource string, verb string, namespace string, exist bool, err error) bool {
var groupAndNamespace string
if group != "" && namespace != "" {
groupAndNamespace = fmt.Sprintf("in api group '%v' and namespace '%v'", group, namespace)
} else if group != "" {
groupAndNamespace = fmt.Sprintf("in api group '%v'", group)
} else if namespace != "" {
groupAndNamespace = fmt.Sprintf("in namespace '%v'", namespace)
}
if err != nil {
log.Error().
Str("verb", verb).
Str("resource", resource).
Str("group-and-namespace", groupAndNamespace).
Err(err).
Msg("While checking Kubernetes permissions!")
return false
} else if !exist {
log.Error().Msg(fmt.Sprintf("Can't %v %v %v", verb, resource, groupAndNamespace))
return false
}
log.Info().Msg(fmt.Sprintf("Can %v %v %v", verb, resource, groupAndNamespace))
return true
}

View File

@@ -1,118 +0,0 @@
package check
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
)
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "k8s-components").Msg("Checking:")
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.Tap.SelfNamespace)
allResourcesExist := checkResourceExist(config.Config.Tap.SelfNamespace, "namespace", exist, err)
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.ServiceAccountName)
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
if config.Config.IsNsRestrictedMode() {
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.RoleName)
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.RoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
} else {
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesClusterRoleBindingExist(ctx, kubernetes.ClusterRoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
}
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.HubServiceName)
allResourcesExist = checkResourceExist(kubernetes.HubServiceName, "service", exist, err) && allResourcesExist
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
return allResourcesExist
}
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.Tap.SelfNamespace, kubernetes.HubPodName); err != nil {
log.Error().
Str("name", kubernetes.HubPodName).
Err(err).
Msg("While checking if pod is running!")
return false
} else if len(pods) == 0 {
log.Error().
Str("name", kubernetes.HubPodName).
Msg("Pod doesn't exist!")
return false
} else if !kubernetes.IsPodRunning(&pods[0]) {
log.Error().
Str("name", kubernetes.HubPodName).
Msg("Pod is not running!")
return false
}
log.Info().
Str("name", kubernetes.HubPodName).
Msg("Pod is running.")
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.Tap.SelfNamespace, kubernetes.WorkerPodName); err != nil {
log.Error().
Str("name", kubernetes.WorkerPodName).
Err(err).
Msg("While checking if pods are running!")
return false
} else {
workers := 0
notRunningWorkers := 0
for _, pod := range pods {
workers += 1
if !kubernetes.IsPodRunning(&pod) {
notRunningWorkers += 1
}
}
if notRunningWorkers > 0 {
log.Error().
Str("name", kubernetes.WorkerPodName).
Msg(fmt.Sprintf("%d/%d pods are not running!", notRunningWorkers, workers))
return false
}
log.Info().
Str("name", kubernetes.WorkerPodName).
Msg(fmt.Sprintf("All %d pods are running.", workers))
return true
}
}
func checkResourceExist(resourceName string, resourceType string, exist bool, err error) bool {
if err != nil {
log.Error().
Str("name", resourceName).
Str("type", resourceType).
Err(err).
Msg("Checking if resource exists!")
return false
} else if !exist {
log.Error().
Str("name", resourceName).
Str("type", resourceType).
Msg("Resource doesn't exist!")
return false
}
log.Info().
Str("name", resourceName).
Str("type", resourceType).
Msg("Resource exist.")
return true
}

View File

@@ -1,22 +0,0 @@
package check
import (
"fmt"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/semver"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
)
func KubernetesVersion(kubernetesVersion *semver.SemVersion) bool {
log.Info().Str("procedure", "kubernetes-version").Msg("Checking:")
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
log.Error().Str("k8s-version", string(*kubernetesVersion)).Err(err).Msg(fmt.Sprintf(utils.Red, "The cluster does not have the minimum required Kubernetes API version!"))
return false
}
log.Info().Str("k8s-version", string(*kubernetesVersion)).Msg("Minimum required Kubernetes API version is passed.")
return true
}

View File

@@ -1,40 +0,0 @@
package check
import (
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
)
func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "server-connectivity").Msg("Checking:")
var connectedToHub, connectedToFront bool
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), "/echo", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Hub using proxy!")
} else {
connectedToHub = true
log.Info().Msg("Connected successfully to Hub using proxy.")
}
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort), "", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Front using proxy!")
} else {
connectedToFront = true
log.Info().Msg("Connected successfully to Front using proxy.")
}
return connectedToHub && connectedToFront
}
func checkProxy(serverUrl string, path string, kubernetesProvider *kubernetes.Provider) error {
log.Info().Str("url", serverUrl).Msg("Connecting:")
connector := connect.NewConnector(serverUrl, connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(path); err != nil {
return err
}
return nil
}

View File

@@ -1,47 +0,0 @@
package cmd
import (
"context"
"fmt"
"os"
"github.com/kubeshark/kubeshark/cmd/check"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
)
func runCheck() {
log.Info().Msg(fmt.Sprintf("Checking the %s resources...", misc.Software))
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel will be called when this function exits
kubernetesProvider, kubernetesVersion, checkPassed := check.KubernetesApi()
if checkPassed {
checkPassed = check.KubernetesVersion(kubernetesVersion)
}
if checkPassed {
checkPassed = check.KubernetesPermissions(ctx, kubernetesProvider)
}
if checkPassed {
checkPassed = check.KubernetesResources(ctx, kubernetesProvider)
}
if checkPassed {
checkPassed = check.ServerConnection(kubernetesProvider)
}
if checkPassed {
log.Info().Msg(fmt.Sprintf(utils.Green, "All checks are passed."))
} else {
log.Error().
Str("command1", fmt.Sprintf("%s %s", misc.Program, cleanCmd.Use)).
Str("command2", fmt.Sprintf("%s %s", misc.Program, tapCmd.Use)).
Msg(fmt.Sprintf(utils.Red, fmt.Sprintf("There are issues in your %s resources! Run these commands:", misc.Software)))
os.Exit(1)
}
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/kubernetes/helm"
"github.com/kubeshark/kubeshark/misc"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
@@ -14,7 +15,12 @@ var cleanCmd = &cobra.Command{
Use: "clean",
Short: fmt.Sprintf("Removes all %s resources", misc.Software),
RunE: func(cmd *cobra.Command, args []string) error {
performCleanCommand()
resp, err := helm.NewHelmDefault().Uninstall()
if err != nil {
log.Error().Err(err).Send()
} else {
log.Info().Msgf("Uninstalled the Helm release: %s", resp.Release.Name)
}
return nil
},
}

View File

@@ -1,14 +0,0 @@
package cmd
import (
"github.com/kubeshark/kubeshark/config"
)
func performCleanCommand() {
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return
}
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, false)
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/misc/fsUtils"
"github.com/kubeshark/kubeshark/resources"
"github.com/rs/zerolog/log"
)
@@ -27,7 +26,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
return
}
connector := connect.NewConnector(kubernetes.GetLocalhostOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector := connect.NewConnector(kubernetes.GetProxyOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(healthCheck); err != nil {
log.Warn().
Str("service", serviceName).
@@ -47,7 +46,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
return
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(healthCheck); err != nil {
log.Error().
Str("service", serviceName).
@@ -100,13 +99,10 @@ func handleKubernetesProviderError(err error) {
}
}
func finishSelfExecution(kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, withoutCleanup bool) {
func finishSelfExecution(kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string) {
removalCtx, cancel := context.WithTimeout(context.Background(), cleanupTimeout)
defer cancel()
dumpLogsIfNeeded(removalCtx, kubernetesProvider)
if !withoutCleanup {
resources.CleanUpSelfResources(removalCtx, cancel, kubernetesProvider, isNsRestrictedMode, selfNamespace)
}
}
func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provider) {

View File

@@ -36,12 +36,12 @@ func init() {
log.Debug().Err(err).Send()
}
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
func runConsole() {
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
@@ -51,10 +51,10 @@ func runConsole() {
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.SrcPort).Msg("Connecting to:")
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.Port).Msg("Connecting to:")
u := url.URL{
Scheme: "ws",
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.SrcPort),
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.Port),
Path: "/scripts/logs",
}

View File

@@ -1,363 +0,0 @@
package cmd
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sort"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/misc/fsUtils"
"github.com/kubeshark/kubeshark/utils"
"github.com/ohler55/ojg/jp"
"github.com/ohler55/ojg/oj"
"github.com/otiai10/copy"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
var helmChartCmd = &cobra.Command{
Use: "helm-chart",
Short: "Generate Helm chart of Kubeshark",
RunE: func(cmd *cobra.Command, args []string) error {
runHelmChart()
return nil
},
}
// Maintainer describes a Chart maintainer.
type Maintainer struct {
// Name is a user name or organization name
Name string `json:"name,omitempty"`
// Email is an optional email address to contact the named maintainer
Email string `json:"email,omitempty"`
// URL is an optional URL to an address for the named maintainer
URL string `json:"url,omitempty"`
}
// Metadata for a Chart file. This models the structure of a Chart.yaml file.
type Metadata struct {
// The name of the chart. Required.
Name string `json:"name,omitempty"`
// The URL to a relevant project page, git repo, or contact person
Home string `json:"home,omitempty"`
// Source is the URL to the source code of this chart
Sources []string `json:"sources,omitempty"`
// A SemVer 2 conformant version string of the chart. Required.
Version string `json:"version,omitempty"`
// A one-sentence description of the chart
Description string `json:"description,omitempty"`
// A list of string keywords
Keywords []string `json:"keywords,omitempty"`
// A list of name and URL/email address combinations for the maintainer(s)
Maintainers []*Maintainer `json:"maintainers,omitempty"`
// The URL to an icon file.
Icon string `json:"icon,omitempty"`
// The API Version of this chart. Required.
APIVersion string `json:"apiVersion,omitempty"`
// The condition to check to enable chart
Condition string `json:"condition,omitempty"`
// The tags to check to enable chart
Tags string `json:"tags,omitempty"`
// The version of the application enclosed inside of this chart.
AppVersion string `json:"appVersion,omitempty"`
// Whether or not this chart is deprecated
Deprecated bool `json:"deprecated,omitempty"`
// Annotations are additional mappings uninterpreted by Helm,
// made available for inspection by other applications.
Annotations map[string]string `json:"annotations,omitempty"`
// KubeVersion is a SemVer constraint specifying the version of Kubernetes required.
KubeVersion string `json:"kubeVersion,omitempty"`
// Dependencies are a list of dependencies for a chart.
Dependencies []*Dependency `json:"dependencies,omitempty"`
// Specifies the chart type: application or library
Type string `json:"type,omitempty"`
}
// Dependency describes a chart upon which another chart depends.
//
// Dependencies can be used to express developer intent, or to capture the state
// of a chart.
type Dependency struct {
// Name is the name of the dependency.
//
// This must mach the name in the dependency's Chart.yaml.
Name string `json:"name"`
// Version is the version (range) of this chart.
//
// A lock file will always produce a single version, while a dependency
// may contain a semantic version range.
Version string `json:"version,omitempty"`
// The URL to the repository.
//
// Appending `index.yaml` to this string should result in a URL that can be
// used to fetch the repository index.
Repository string `json:"repository"`
// A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled )
Condition string `json:"condition,omitempty"`
// Tags can be used to group charts for enabling/disabling together
Tags []string `json:"tags,omitempty"`
// Enabled bool determines if chart should be loaded
Enabled bool `json:"enabled,omitempty"`
// ImportValues holds the mapping of source values to parent key to be imported. Each item can be a
// string or pair of child/parent sublist items.
ImportValues []interface{} `json:"import-values,omitempty"`
// Alias usable alias to be used for the chart
Alias string `json:"alias,omitempty"`
}
var namespaceMappings = map[string]interface{}{
"metadata.name": "{{ .Values.tap.selfnamespace }}",
}
var serviceAccountMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
}
var clusterRoleMappings = serviceAccountMappings
var clusterRoleBindingMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"subjects[0].namespace": "{{ .Values.tap.selfnamespace }}",
}
var hubPodMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.containers[0].env": []map[string]interface{}{
{
"name": "POD_REGEX",
"value": "{{ .Values.tap.regex }}",
},
{
"name": "NAMESPACES",
"value": "{{ gt (len .Values.tap.namespaces) 0 | ternary (join \",\" .Values.tap.namespaces) \"\" }}",
},
{
"name": "LICENSE",
"value": "{{ .Values.license }}",
},
{
"name": "SCRIPTING_ENV",
"value": "{}",
},
{
"name": "SCRIPTING_SCRIPTS",
"value": "[]",
},
},
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
"spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.hub.limits.cpu }}",
"spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.hub.limits.memory }}",
"spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.hub.requests.cpu }}",
"spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.hub.requests.memory }}",
"spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./hub -debug\" \"./hub\" }}",
}
var hubServiceMappings = serviceAccountMappings
var frontPodMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
}
var frontServiceMappings = serviceAccountMappings
var persistentVolumeMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.resources.requests.storage": "{{ .Values.tap.storagelimit }}",
"spec.storageClassName": "{{ .Values.tap.storageclass }}",
}
var workerDaemonSetMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.template.spec.containers[0].image": "{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}",
"spec.template.spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
"spec.template.spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.worker.limits.cpu }}",
"spec.template.spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.worker.limits.memory }}",
"spec.template.spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.worker.requests.cpu }}",
"spec.template.spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.worker.requests.memory }}",
"spec.template.spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./worker -debug\" \"./worker\" }}",
"spec.template.spec.containers[0].command[6]": "{{ .Values.tap.packetcapture }}",
}
func init() {
rootCmd.AddCommand(helmChartCmd)
}
func runHelmChart() {
namespace,
serviceAccount,
clusterRole,
clusterRoleBinding,
hubPod,
hubService,
frontPod,
frontService,
persistentVolume,
workerDaemonSet,
err := generateManifests()
if err != nil {
log.Error().Err(err).Send()
return
}
err = dumpHelmChart(map[string]interface{}{
"00-namespace.yaml": template(namespace, namespaceMappings),
"01-service-account.yaml": template(serviceAccount, serviceAccountMappings),
"02-cluster-role.yaml": template(clusterRole, clusterRoleMappings),
"03-cluster-role-binding.yaml": template(clusterRoleBinding, clusterRoleBindingMappings),
"04-hub-pod.yaml": template(hubPod, hubPodMappings),
"05-hub-service.yaml": template(hubService, hubServiceMappings),
"06-front-pod.yaml": template(frontPod, frontPodMappings),
"07-front-service.yaml": template(frontService, frontServiceMappings),
"08-persistent-volume-claim.yaml": template(persistentVolume, persistentVolumeMappings),
"09-worker-daemon-set.yaml": template(workerDaemonSet, workerDaemonSetMappings),
})
if err != nil {
log.Error().Err(err).Send()
return
}
}
func template(object interface{}, mappings map[string]interface{}) (template interface{}) {
var err error
var data []byte
data, err = json.Marshal(object)
if err != nil {
log.Error().Err(err).Send()
return
}
var obj interface{}
obj, err = oj.Parse(data)
if err != nil {
log.Error().Err(err).Send()
return
}
for path, value := range mappings {
var x jp.Expr
x, err = jp.ParseString(path)
if err != nil {
log.Error().Err(err).Send()
return
}
err = x.Set(obj, value)
if err != nil {
log.Error().Err(err).Send()
return
}
}
newJson := oj.JSON(obj)
err = json.Unmarshal([]byte(newJson), &template)
if err != nil {
log.Error().Err(err).Send()
return
}
return
}
func dumpHelmChart(objects map[string]interface{}) error {
folder := filepath.Join(".", "helm-chart")
templatesFolder := filepath.Join(folder, "templates")
err := fsUtils.RemoveFilesByExtension(templatesFolder, "yaml")
if err != nil {
return err
}
err = os.MkdirAll(templatesFolder, os.ModePerm)
if err != nil {
return err
}
// Sort by filenames
filenames := make([]string, 0)
for filename := range objects {
filenames = append(filenames, filename)
}
sort.Strings(filenames)
// Generate templates
for _, filename := range filenames {
manifest, err := utils.PrettyYamlOmitEmpty(objects[filename])
if err != nil {
return err
}
path := filepath.Join(templatesFolder, filename)
err = os.WriteFile(path, []byte(manifestHeader+manifest), 0644)
if err != nil {
return err
}
log.Info().Msgf("Helm chart template generated: %s", path)
}
// Copy LICENSE
licenseSrcPath := filepath.Join(".", "LICENSE")
licenseDstPath := filepath.Join(folder, "LICENSE")
err = copy.Copy(licenseSrcPath, licenseDstPath)
if err != nil {
log.Warn().Err(err).Str("path", licenseSrcPath).Msg("Couldn't find the license:")
} else {
log.Info().Msgf("Helm chart license copied: %s", licenseDstPath)
}
// Generate Chart.yaml
chartMetadata := Metadata{
APIVersion: "v2",
Name: misc.Program,
Description: misc.Description,
Home: misc.Website,
Sources: []string{"https://github.com/kubeshark/kubeshark/tree/master/helm-chart"},
Keywords: []string{
"kubeshark",
"packet capture",
"traffic capture",
"traffic analyzer",
"network sniffer",
"observability",
"devops",
"microservice",
"forensics",
"api",
},
Maintainers: []*Maintainer{
{
Name: misc.Software,
Email: misc.Email,
URL: misc.Website,
},
},
Version: misc.Ver,
AppVersion: misc.Ver,
KubeVersion: fmt.Sprintf(">= %s-0", kubernetes.MinKubernetesServerVersion),
Type: "application",
}
chart, err := utils.PrettyYamlOmitEmpty(chartMetadata)
if err != nil {
return err
}
path := filepath.Join(folder, "Chart.yaml")
err = os.WriteFile(path, []byte(chart), 0644)
if err != nil {
return err
}
log.Info().Msgf("Helm chart Chart.yaml generated: %s", path)
// Generate values.yaml
values, err := utils.PrettyYaml(config.Config)
if err != nil {
return err
}
path = filepath.Join(folder, "values.yaml")
err = os.WriteFile(path, []byte(values), 0644)
if err != nil {
return err
}
log.Info().Msgf("Helm chart values.yaml generated: %s", path)
return nil
}

View File

@@ -1,223 +0,0 @@
package cmd
import (
"fmt"
"os"
"path/filepath"
"sort"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc/fsUtils"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
v1 "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
)
const manifestSeperator = "---"
const manifestHeader = "# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!\n" + manifestSeperator + "\n"
var manifestsCmd = &cobra.Command{
Use: "manifests",
Short: "Generate Kubernetes manifests of Kubeshark",
RunE: func(cmd *cobra.Command, args []string) error {
runManifests()
return nil
},
}
func init() {
rootCmd.AddCommand(manifestsCmd)
defaultManifestsConfig := config.ManifestsConfig{}
if err := defaults.Set(&defaultManifestsConfig); err != nil {
log.Debug().Err(err).Send()
}
manifestsCmd.Flags().Bool("dump", defaultManifestsConfig.Dump, "Enable the debug mode")
}
func runManifests() {
namespace,
serviceAccount,
clusterRole,
clusterRoleBinding,
hubPod,
hubService,
frontPod,
frontService,
persistentVolume,
workerDaemonSet,
err := generateManifests()
if err != nil {
log.Error().Err(err).Send()
return
}
if config.Config.Manifests.Dump {
err = dumpManifests(map[string]interface{}{
"00-namespace.yaml": namespace,
"01-service-account.yaml": serviceAccount,
"02-cluster-role.yaml": clusterRole,
"03-cluster-role-binding.yaml": clusterRoleBinding,
"04-hub-pod.yaml": hubPod,
"05-hub-service.yaml": hubService,
"06-front-pod.yaml": frontPod,
"07-front-service.yaml": frontService,
"08-persistent-volume-claim.yaml": persistentVolume,
"09-worker-daemon-set.yaml": workerDaemonSet,
})
} else {
err = printManifests([]interface{}{
namespace,
serviceAccount,
clusterRole,
clusterRoleBinding,
hubPod,
hubService,
frontPod,
frontService,
workerDaemonSet,
})
}
if err != nil {
log.Error().Err(err).Send()
return
}
}
func generateManifests() (
namespace *v1.Namespace,
serviceAccount *v1.ServiceAccount,
clusterRole *rbac.ClusterRole,
clusterRoleBinding *rbac.ClusterRoleBinding,
hubPod *v1.Pod,
hubService *v1.Service,
frontPod *v1.Pod,
frontService *v1.Service,
persistentVolumeClaim *v1.PersistentVolumeClaim,
workerDaemonSet *kubernetes.DaemonSet,
err error,
) {
var kubernetesProvider *kubernetes.Provider
kubernetesProvider, err = getKubernetesProviderForCli(true, true)
if err != nil {
return
}
namespace = kubernetesProvider.BuildNamespace(config.Config.Tap.SelfNamespace)
serviceAccount = kubernetesProvider.BuildServiceAccount()
clusterRole = kubernetesProvider.BuildClusterRole()
clusterRoleBinding = kubernetesProvider.BuildClusterRoleBinding()
hubPod, err = kubernetesProvider.BuildHubPod(&kubernetes.PodOptions{
Namespace: config.Config.Tap.SelfNamespace,
PodName: kubernetes.HubPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: config.Config.Tap.Resources.Hub,
ImagePullPolicy: config.Config.ImagePullPolicy(),
ImagePullSecrets: config.Config.ImagePullSecrets(),
Debug: config.Config.Tap.Debug,
})
if err != nil {
return
}
hubService = kubernetesProvider.BuildHubService(config.Config.Tap.SelfNamespace)
frontPod, err = kubernetesProvider.BuildFrontPod(&kubernetes.PodOptions{
Namespace: config.Config.Tap.SelfNamespace,
PodName: kubernetes.FrontPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: config.Config.Tap.Resources.Hub,
ImagePullPolicy: config.Config.ImagePullPolicy(),
ImagePullSecrets: config.Config.ImagePullSecrets(),
Debug: config.Config.Tap.Debug,
}, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
if err != nil {
return
}
frontService = kubernetesProvider.BuildFrontService(config.Config.Tap.SelfNamespace)
persistentVolumeClaim, err = kubernetesProvider.BuildPersistentVolumeClaim()
if err != nil {
return
}
workerDaemonSet, err = kubernetesProvider.BuildWorkerDaemonSet(
docker.GetWorkerImage(),
kubernetes.WorkerDaemonSetName,
kubernetes.ServiceAccountName,
config.Config.Tap.Resources.Worker,
config.Config.ImagePullPolicy(),
config.Config.ImagePullSecrets(),
config.Config.Tap.ServiceMesh,
config.Config.Tap.Tls,
config.Config.Tap.Debug,
)
if err != nil {
return
}
return
}
func dumpManifests(objects map[string]interface{}) error {
folder := filepath.Join(".", "manifests")
err := fsUtils.RemoveFilesByExtension(folder, "yaml")
if err != nil {
return err
}
err = os.MkdirAll(folder, os.ModePerm)
if err != nil {
return err
}
// Sort by filenames
filenames := make([]string, 0)
for filename := range objects {
filenames = append(filenames, filename)
}
sort.Strings(filenames)
for _, filename := range filenames {
manifest, err := utils.PrettyYamlOmitEmpty(objects[filename])
if err != nil {
return err
}
path := filepath.Join(folder, filename)
err = os.WriteFile(path, []byte(manifestHeader+manifest), 0644)
if err != nil {
return err
}
log.Info().Msgf("Manifest generated: %s", path)
}
return nil
}
func printManifests(objects []interface{}) error {
for _, object := range objects {
manifest, err := utils.PrettyYamlOmitEmpty(object)
if err != nil {
return err
}
fmt.Println(manifestSeperator)
fmt.Println(manifest)
}
return nil
}

View File

@@ -2,7 +2,7 @@ package cmd
import (
"fmt"
"io/ioutil"
"io"
"net/http"
"os"
"time"
@@ -28,7 +28,7 @@ var proCmd = &cobra.Command{
}
const (
PRO_URL = "https://console.kubeshark.co"
PRO_URL = "https://console.kubeshark.co/cli"
PRO_PORT = 5252
)
@@ -40,19 +40,19 @@ func init() {
log.Debug().Err(err).Send()
}
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
proCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
func acquireLicense() {
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
log.Info().Str("url", PRO_URL).Msg("Opening in the browser:")
utils.OpenBrowser(PRO_URL)
@@ -61,17 +61,19 @@ func acquireLicense() {
}
func updateLicense(licenseKey string) {
log.Info().Str("key", licenseKey).Msg("Received license:")
config.Config.License = licenseKey
err := config.WriteConfig(&config.Config)
if err != nil {
log.Error().Err(err).Send()
}
connector.PostLicenseSingle(config.Config.License)
log.Info().Msg("Updated the license. Exiting.")
go func() {
connector.PostLicense(config.Config.License)
log.Info().Msg("Updated the license. Exiting.")
time.Sleep(2 * time.Second)
os.Exit(0)
}()
@@ -96,7 +98,7 @@ func runLicenseRecieverServer() {
})
ginApp.POST("/", func(c *gin.Context) {
data, err := ioutil.ReadAll(c.Request.Body)
data, err := io.ReadAll(c.Request.Body)
if err != nil {
log.Error().Err(err).Send()
c.AbortWithStatus(http.StatusBadRequest)
@@ -105,8 +107,6 @@ func runLicenseRecieverServer() {
licenseKey := string(data)
log.Info().Str("key", licenseKey).Msg("Received license:")
updateLicense(licenseKey)
})

View File

@@ -24,7 +24,7 @@ func init() {
log.Debug().Err(err).Send()
}
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
proxyCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
}

View File

@@ -63,12 +63,12 @@ func runProxy(block bool, noBrowser bool) {
var establishedProxy bool
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.HubServiceName).
Int("port", int(config.Config.Tap.Proxy.Hub.SrcPort)).
Int("port", int(config.Config.Tap.Proxy.Hub.Port)).
Msg("Found a running service.")
okToOpen("Hub", hubUrl, true)
@@ -79,8 +79,8 @@ func runProxy(block bool, noBrowser bool) {
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
connector := connect.NewConnector(hubUrl, connect.DefaultRetries, connect.DefaultTimeout)
@@ -93,12 +93,12 @@ func runProxy(block bool, noBrowser bool) {
okToOpen("Hub", hubUrl, true)
}
frontUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
frontUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
response, err = http.Get(fmt.Sprintf("%s/", frontUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.FrontServiceName).
Int("port", int(config.Config.Tap.Proxy.Front.SrcPort)).
Int("port", int(config.Config.Tap.Proxy.Front.Port)).
Msg("Found a running service.")
okToOpen("Kubeshark", frontUrl, noBrowser)
@@ -109,8 +109,8 @@ func runProxy(block bool, noBrowser bool) {
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
connector := connect.NewConnector(frontUrl, connect.DefaultRetries, connect.DefaultTimeout)

View File

@@ -34,7 +34,7 @@ func init() {
log.Debug().Err(err).Send()
}
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
@@ -44,14 +44,14 @@ func runScripts() {
return
}
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
watchScripts(true)
}

View File

@@ -47,11 +47,12 @@ func init() {
tapCmd.Flags().StringP(configStructs.DockerTagLabel, "t", defaultTapConfig.Docker.Tag, "The tag of the Docker images that are going to be pulled")
tapCmd.Flags().String(configStructs.DockerImagePullPolicy, defaultTapConfig.Docker.ImagePullPolicy, "ImagePullPolicy for the Docker images")
tapCmd.Flags().StringSlice(configStructs.DockerImagePullSecrets, defaultTapConfig.Docker.ImagePullSecrets, "ImagePullSecrets for the Docker images")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
tapCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
tapCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultTapConfig.Namespaces, "Namespaces selector")
tapCmd.Flags().StringP(configStructs.SelfNamespaceLabel, "s", defaultTapConfig.SelfNamespace, "Self-namespace of Kubeshark")
tapCmd.Flags().Bool(configStructs.PersistentStorageLabel, defaultTapConfig.PersistentStorage, "Enable persistent storage (PersistentVolumeClaim)")
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit (per node)")
tapCmd.Flags().String(configStructs.StorageClassLabel, defaultTapConfig.StorageClass, "Override the default storage class of the PersistentVolumeClaim (per node)")
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
@@ -59,5 +60,6 @@ func init() {
tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS")
tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries")
tapCmd.Flags().Bool(configStructs.IgnoreTaintedLabel, defaultTapConfig.IgnoreTainted, "Ignore tainted pods while running Worker DaemonSet")
tapCmd.Flags().Bool(configStructs.IngressEnabledLabel, defaultTapConfig.Ingress.Enabled, "Enable Ingress")
tapCmd.Flags().Bool(configStructs.DebugLabel, defaultTapConfig.Debug, "Enable the debug mode")
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
@@ -141,10 +142,10 @@ func createAndStartContainers(
hostConfigFront := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Front.DstPort)): []nat.PortBinding{
nat.Port(fmt.Sprintf("%d/tcp", configStructs.ContainerPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.SrcPort),
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.Port),
},
},
},
@@ -156,7 +157,7 @@ func createAndStartContainers(
Env: []string{
"REACT_APP_DEFAULT_FILTER= ",
"REACT_APP_HUB_HOST= ",
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.SrcPort),
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.Port),
},
}, hostConfigFront, nil, nil, nameFront)
if err != nil {
@@ -165,16 +166,16 @@ func createAndStartContainers(
hostConfigHub := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): []nat.PortBinding{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort),
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port),
},
},
},
}
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.DstPort)}
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrvPort)}
if config.DebugMode {
cmdHub = append(cmdHub, fmt.Sprintf("-%s", config.DebugFlag))
}
@@ -183,13 +184,13 @@ func createAndStartContainers(
Image: imageHub,
Cmd: cmdHub,
Tty: false,
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): {}},
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): {}},
}, hostConfigHub, nil, nil, nameHub)
if err != nil {
return
}
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.DstPort)}
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.SrvPort)}
if config.DebugMode {
cmdWorker = append(cmdWorker, fmt.Sprintf("-%s", config.DebugFlag))
}
@@ -328,7 +329,7 @@ func pcap(tarPath string) {
},
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
connector.PostWorkerPodToHub(workerPod)
// License
@@ -337,10 +338,10 @@ func pcap(tarPath string) {
}
log.Info().
Str("url", kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)).
Str("url", kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)).
Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
url := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {

View File

@@ -2,7 +2,6 @@ package cmd
import (
"context"
"errors"
"fmt"
"regexp"
"sync"
@@ -10,13 +9,11 @@ import (
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes/helm"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/resources"
"github.com/kubeshark/kubeshark/utils"
core "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
@@ -28,9 +25,8 @@ import (
const cleanupTimeout = time.Minute
type tapState struct {
startTime time.Time
targetNamespaces []string
selfServiceAccountExists bool
startTime time.Time
targetNamespaces []string
}
var state tapState
@@ -60,7 +56,7 @@ func tap() {
Str("limit", config.Config.Tap.StorageLimit).
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP/UDP streams will be removed once the limit is reached.", misc.Software))
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
@@ -90,19 +86,13 @@ func tap() {
}
log.Info().Msg(fmt.Sprintf("Waiting for the creation of %s resources...", misc.Software))
if state.selfServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, config.Config.Tap.Resources.Hub, config.Config.ImagePullPolicy(), config.Config.ImagePullSecrets(), config.Config.Tap.Debug); err != nil {
var statusError *k8serrors.StatusError
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
log.Info().Msg(fmt.Sprintf("%s is already running in this namespace, change the `selfnamespace` configuration or run `%s clean` to remove the currently running %s instance.", misc.Software, misc.Program, misc.Software))
postHubStarted(ctx, kubernetesProvider, cancel, true)
log.Info().Msg("Updated Hub about the changes in the config. Exiting.")
printProxyCommandSuggestion()
} else {
defer resources.CleanUpSelfResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace)
log.Error().Err(errormessage.FormatError(err)).Msg("Error creating resources!")
}
rel, err := helm.NewHelmDefault().Install()
if err != nil {
log.Error().Err(err).Send()
return
} else {
log.Info().Msgf("Installed the Helm release: %s", rel.Name)
}
defer finishTapExecution(kubernetesProvider)
@@ -113,7 +103,10 @@ func tap() {
// block until exit signal or error
utils.WaitForTermination(ctx, cancel)
printProxyCommandSuggestion()
if !config.Config.Tap.Ingress.Enabled {
printProxyCommandSuggestion()
}
}
func printProxyCommandSuggestion() {
@@ -123,7 +116,7 @@ func printProxyCommandSuggestion() {
}
func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, true)
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace)
}
/*
@@ -315,7 +308,6 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
Str("namespace", config.Config.Tap.SelfNamespace).
Err(err).
Msg("Failed creating pod.")
cancel()
case <-timeAfter:
if !isPodReady {
@@ -409,29 +401,12 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
if !update {
// Create workers
err := kubernetes.CreateWorkers(
kubernetesProvider,
state.selfServiceAccountExists,
ctx,
config.Config.Tap.SelfNamespace,
config.Config.Tap.Resources.Worker,
config.Config.ImagePullPolicy(),
config.Config.ImagePullSecrets(),
config.Config.Tap.ServiceMesh,
config.Config.Tap.Tls,
config.Config.Tap.Debug,
)
if err != nil {
log.Error().Err(err).Send()
}
} else {
if update {
// Pod regex
connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces)
@@ -458,9 +433,9 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
connector.PostScriptDone()
}
if !update {
if !update && !config.Config.Tap.Ingress.Enabled {
// Hub proxy URL
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
url := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
}
@@ -476,12 +451,17 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
var url string
if config.Config.Tap.Ingress.Enabled {
url = fmt.Sprintf("http://%s", config.Config.Tap.Ingress.Host)
} else {
url = kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
}
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {

View File

@@ -19,24 +19,24 @@ func CreateDefaultConfig() ConfigStruct {
}
type KubeConfig struct {
ConfigPathStr string `yaml:"configpath"`
Context string `yaml:"context"`
ConfigPathStr string `yaml:"configpath" json:"configpath"`
Context string `yaml:"context" json:"context"`
}
type ManifestsConfig struct {
Dump bool `yaml:"dump"`
Dump bool `yaml:"dump" json:"dump"`
}
type ConfigStruct struct {
Tap configStructs.TapConfig `yaml:"tap"`
Logs configStructs.LogsConfig `yaml:"logs"`
Config configStructs.ConfigConfig `yaml:"config,omitempty"`
Kube KubeConfig `yaml:"kube"`
DumpLogs bool `yaml:"dumplogs" default:"false"`
HeadlessMode bool `yaml:"headless" default:"false"`
License string `yaml:"license" default:""`
Scripting configStructs.ScriptingConfig `yaml:"scripting"`
Manifests ManifestsConfig `yaml:"manifests,omitempty"`
Tap configStructs.TapConfig `yaml:"tap" json:"tap"`
Logs configStructs.LogsConfig `yaml:"logs" json:"logs"`
Config configStructs.ConfigConfig `yaml:"config,omitempty" json:"config,omitempty"`
Kube KubeConfig `yaml:"kube" json:"kube"`
DumpLogs bool `yaml:"dumplogs" json:"dumplogs" default:"false"`
HeadlessMode bool `yaml:"headless" json:"headless" default:"false"`
License string `yaml:"license" json:"license" default:""`
Scripting configStructs.ScriptingConfig `yaml:"scripting" json:"scripting"`
Manifests ManifestsConfig `yaml:"manifests,omitempty" json:"manifests,omitempty"`
}
func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy {

View File

@@ -5,5 +5,5 @@ const (
)
type ConfigConfig struct {
Regenerate bool `yaml:"regenerate,omitempty" default:"false" readonly:""`
Regenerate bool `yaml:"regenerate,omitempty" json:"regenerate,omitempty" default:"false" readonly:""`
}

View File

@@ -13,7 +13,7 @@ const (
)
type LogsConfig struct {
FileStr string `yaml:"file"`
FileStr string `yaml:"file" json:"file"`
}
func (config *LogsConfig) Validate() error {

View File

@@ -2,7 +2,7 @@ package configStructs
import (
"io/fs"
"io/ioutil"
"os"
"path/filepath"
"github.com/kubeshark/kubeshark/misc"
@@ -10,9 +10,9 @@ import (
)
type ScriptingConfig struct {
Env map[string]interface{} `yaml:"env"`
Source string `yaml:"source" default:""`
WatchScripts bool `yaml:"watchScripts" default:"true"`
Env map[string]interface{} `yaml:"env" json:"env"`
Source string `yaml:"source" json:"source" default:""`
WatchScripts bool `yaml:"watchScripts" json:"watchScripts" default:"true"`
}
func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error) {
@@ -20,8 +20,8 @@ func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error)
return
}
var files []fs.FileInfo
files, err = ioutil.ReadDir(config.Source)
var files []fs.DirEntry
files, err = os.ReadDir(config.Source)
if err != nil {
return
}

View File

@@ -5,6 +5,7 @@ import (
"regexp"
v1 "k8s.io/api/core/v1"
networking "k8s.io/api/networking/v1"
)
const (
@@ -17,6 +18,7 @@ const (
ProxyHostLabel = "proxy-host"
NamespacesLabel = "namespaces"
SelfNamespaceLabel = "selfnamespace"
PersistentStorageLabel = "persistentstorage"
StorageLimitLabel = "storagelimit"
StorageClassLabel = "storageclass"
DryRunLabel = "dryrun"
@@ -24,51 +26,53 @@ const (
ServiceMeshLabel = "servicemesh"
TlsLabel = "tls"
IgnoreTaintedLabel = "ignoreTainted"
IngressEnabledLabel = "ingress-enabled"
DebugLabel = "debug"
ContainerPort = 80
ContainerPortStr = "80"
)
type ResourceLimits struct {
CPU string `yaml:"cpu" default:"750m"`
Memory string `yaml:"memory" default:"1Gi"`
CPU string `yaml:"cpu" json:"cpu" default:"750m"`
Memory string `yaml:"memory" json:"memory" default:"1Gi"`
}
type ResourceRequests struct {
CPU string `yaml:"cpu" default:"50m"`
Memory string `yaml:"memory" default:"50Mi"`
CPU string `yaml:"cpu" json:"cpu" default:"50m"`
Memory string `yaml:"memory" json:"memory" default:"50Mi"`
}
type ResourceRequirements struct {
Limits ResourceLimits `json:"limits"`
Requests ResourceRequests `json:"requests"`
Limits ResourceLimits `yaml:"limits" json:"limits"`
Requests ResourceRequests `yaml:"requests" json:"requests"`
}
type WorkerConfig struct {
SrcPort uint16 `yaml:"port" default:"8897"`
DstPort uint16 `yaml:"srvport" default:"8897"`
SrvPort uint16 `yaml:"srvport" json:"srvport" default:"8897"`
}
type HubConfig struct {
SrcPort uint16 `yaml:"port" default:"8898"`
DstPort uint16 `yaml:"srvport" default:"80"`
Port uint16 `yaml:"port" json:"port" default:"8898"`
SrvPort uint16 `yaml:"srvport" json:"srvport" default:"8898"`
}
type FrontConfig struct {
SrcPort uint16 `yaml:"port" default:"8899"`
DstPort uint16 `yaml:"srvport" default:"80"`
Port uint16 `yaml:"port" json:"port" default:"8899"`
SrvPort uint16 `yaml:"srvport" json:"srvport" default:"8899"`
}
type ProxyConfig struct {
Worker WorkerConfig `yaml:"worker"`
Hub HubConfig `yaml:"hub"`
Front FrontConfig `yaml:"front"`
Host string `yaml:"host" default:"127.0.0.1"`
Worker WorkerConfig `yaml:"worker" json:"worker"`
Hub HubConfig `yaml:"hub" json:"hub"`
Front FrontConfig `yaml:"front" json:"front"`
Host string `yaml:"host" json:"host" default:"127.0.0.1"`
}
type DockerConfig struct {
Registry string `yaml:"registry" default:"docker.io/kubeshark"`
Tag string `yaml:"tag" default:"latest"`
ImagePullPolicy string `yaml:"imagepullpolicy" default:"Always"`
ImagePullSecrets []string `yaml:"imagepullsecrets"`
Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"`
Tag string `yaml:"tag" json:"tag" default:"latest"`
ImagePullPolicy string `yaml:"imagepullpolicy" json:"imagepullpolicy" default:"Always"`
ImagePullSecrets []string `yaml:"imagepullsecrets" json:"imagepullsecrets"`
}
type ResourcesConfig struct {
@@ -76,24 +80,38 @@ type ResourcesConfig struct {
Hub ResourceRequirements `yaml:"hub"`
}
type AuthConfig struct {
ApprovedDomains []string `yaml:"approvedDomains" json:"approvedDomains" default:"[]"`
}
type IngressConfig struct {
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
Host string `yaml:"host" json:"host" default:"ks.svc.cluster.local"`
TLS []networking.IngressTLS `yaml:"tls" json:"tls"`
Auth AuthConfig `yaml:"auth" json:"auth"`
CertManager string `yaml:"certManager" json:"certManager" default:"letsencrypt-prod"`
}
type TapConfig struct {
Docker DockerConfig `yaml:"docker"`
Proxy ProxyConfig `yaml:"proxy"`
PodRegexStr string `yaml:"regex" default:".*"`
Namespaces []string `yaml:"namespaces"`
SelfNamespace string `yaml:"selfnamespace" default:"kubeshark"`
StorageLimit string `yaml:"storagelimit" default:"200Mi"`
StorageClass string `yaml:"storageclass" default:"standard"`
DryRun bool `yaml:"dryrun" default:"false"`
Pcap string `yaml:"pcap" default:""`
Resources ResourcesConfig `yaml:"resources"`
ServiceMesh bool `yaml:"servicemesh" default:"true"`
Tls bool `yaml:"tls" default:"true"`
PacketCapture string `yaml:"packetcapture" default:"libpcap"`
IgnoreTainted bool `yaml:"ignoreTainted" default:"false"`
ResourceLabels map[string]string `yaml:"resourceLabels" default:"{}"`
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeSelectorTerms" default:"[]"`
Debug bool `yaml:"debug" default:"false"`
Docker DockerConfig `yaml:"docker" json:"docker"`
Proxy ProxyConfig `yaml:"proxy" json:"proxy"`
PodRegexStr string `yaml:"regex" json:"regex" default:".*"`
Namespaces []string `yaml:"namespaces" json:"namespaces" default:"[]"`
SelfNamespace string `yaml:"selfnamespace" json:"selfnamespace" default:"kubeshark"`
PersistentStorage bool `yaml:"persistentstorage" json:"persistentstorage" default:"false"`
StorageLimit string `yaml:"storagelimit" json:"storagelimit" default:"200Mi"`
StorageClass string `yaml:"storageclass" json:"storageclass" default:"standard"`
DryRun bool `yaml:"dryrun" json:"dryrun" default:"false"`
Pcap string `yaml:"pcap" json:"pcap" default:""`
Resources ResourcesConfig `yaml:"resources" json:"resources"`
ServiceMesh bool `yaml:"servicemesh" json:"servicemesh" default:"true"`
Tls bool `yaml:"tls" json:"tls" default:"true"`
PacketCapture string `yaml:"packetcapture" json:"packetcapture" default:"libpcap"`
IgnoreTainted bool `yaml:"ignoreTainted" json:"ignoreTainted" default:"false"`
ResourceLabels map[string]string `yaml:"resourceLabels" json:"resourceLabels" default:"{}"`
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeSelectorTerms" json:"nodeSelectorTerms" default:"[]"`
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
Debug bool `yaml:"debug" json:"debug" default:"false"`
}
func (config *TapConfig) PodRegex() *regexp.Regexp {

155
go.mod
View File

@@ -1,118 +1,157 @@
module github.com/kubeshark/kubeshark
go 1.17
go 1.19
require (
github.com/creasty/defaults v1.5.2
github.com/docker/docker v20.10.24+incompatible
github.com/docker/go-connections v0.4.0
github.com/fsnotify/fsnotify v1.5.1
github.com/fsnotify/fsnotify v1.6.0
github.com/gin-gonic/gin v1.7.7
github.com/google/go-github/v37 v37.0.0
github.com/gorilla/websocket v1.4.2
github.com/ohler55/ojg v1.14.5
github.com/otiai10/copy v1.10.0
github.com/pkg/errors v0.9.1
github.com/robertkrimen/otto v0.2.1
github.com/rs/zerolog v1.28.0
github.com/spf13/cobra v1.3.0
github.com/spf13/cobra v1.6.1
github.com/spf13/pflag v1.0.5
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.23.3
k8s.io/apimachinery v0.23.3
k8s.io/client-go v0.23.3
k8s.io/kubectl v0.23.3
helm.sh/helm/v3 v3.12.0
k8s.io/api v0.27.1
k8s.io/apimachinery v0.27.1
k8s.io/client-go v0.27.1
k8s.io/kubectl v0.27.1
)
require (
cloud.google.com/go/compute v1.2.0 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/BurntSushi/toml v1.2.1 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.2.0 // indirect
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
github.com/Masterminds/squirrel v1.5.3 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/containerd/containerd v1.7.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.0+incompatible // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/docker/cli v20.10.21+incompatible // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.0.5 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/swag v0.21.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.1 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-playground/locales v0.13.0 // indirect
github.com/go-playground/universal-translator v0.17.0 // indirect
github.com/go-playground/validator/v10 v10.4.1 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/go-cmp v0.5.7 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/huandu/xstrings v1.4.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jmoiron/sqlx v1.3.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/leodido/go-urn v1.2.0 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/russross/blackfriday v1.6.0 // indirect
github.com/sirupsen/logrus v1.7.0 // indirect
github.com/stretchr/testify v1.8.1 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/rubenv/sql-migrate v1.3.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/ugorji/go/codec v1.1.7 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
go.opentelemetry.io/otel v1.14.0 // indirect
go.opentelemetry.io/otel/trace v1.14.0 // indirect
go.starlark.net v0.0.0-20220203230714-bb14e151c28f // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
golang.org/x/tools v0.1.12 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/oauth2 v0.4.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.7.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.27.1 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
google.golang.org/grpc v1.53.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/sourcemap.v1 v1.0.5 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/cli-runtime v0.23.3 // indirect
k8s.io/component-base v0.23.3 // indirect
k8s.io/klog/v2 v2.40.1 // indirect
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect
k8s.io/utils v0.0.0-20220127004650-9b3446523e65 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/kustomize/api v0.11.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
k8s.io/apiextensions-apiserver v0.27.1 // indirect
k8s.io/apiserver v0.27.1 // indirect
k8s.io/cli-runtime v0.27.1 // indirect
k8s.io/component-base v0.27.1 // indirect
k8s.io/klog/v2 v2.90.1 // indirect
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
oras.land/oras-go v1.2.2 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/api v0.13.2 // indirect
sigs.k8s.io/kustomize/kyaml v0.14.1 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

717
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
apiVersion: v2
appVersion: "40.0"
appVersion: "41.0"
description: The API Traffic Analyzer for Kubernetes
home: https://kubeshark.co
keywords:
@@ -22,4 +22,4 @@ name: kubeshark
sources:
- https://github.com/kubeshark/kubeshark/tree/master/helm-chart
type: application
version: "40.0"
version: "41.0"

View File

@@ -51,3 +51,23 @@ kubectl port-forward -n kubeshark service/kubeshark-front 8899:80
```
Visit [localhost:8899](http://localhost:8899)
## Installing with Ingress Enabled
```shell
helm install kubeshark kubeshark/kubeshark \
--set tap.ingress.enabled=true \
--set tap.ingress.host=ks.svc.cluster.local \
--set "tap.ingress.auth.approvedDomains={gmail.com}" \
--set license=LICENSE_GOES_HERE
```
You can get your license [here](https://console.kubeshark.co/).
## Installing with Persistent Storage Enabled
```shell
helm install kubeshark kubeshark/kubeshark \
--set tap.persistentstorage=true \
--set license=LICENSE_GOES_HERE
```
You can get your license [here](https://console.kubeshark.co/).

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Namespace

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: ServiceAccount

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
@@ -15,11 +14,13 @@ rules:
- ""
- extensions
- apps
- networking.k8s.io
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- ingresses
verbs:
- list
- get

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Pod
@@ -22,12 +21,17 @@ spec:
- name: LICENSE
value: '{{ .Values.license }}'
- name: SCRIPTING_ENV
value: '{}'
value: '{{ .Values.scripting.env | toJson }}'
- name: SCRIPTING_SCRIPTS
value: '[]'
- name: AUTH_APPROVED_DOMAINS
value: '{{ gt (len .Values.tap.ingress.auth.approvedDomains) 0 | ternary (join "," .Values.tap.ingress.auth.approvedDomains) "" }}'
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
name: kubeshark-hub
ports:
- containerPort: 80
hostPort: {{ .Values.tap.proxy.hub.srvport }}
resources:
limits:
cpu: '{{ .Values.tap.resources.hub.limits.cpu }}'

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Service
@@ -16,6 +15,6 @@ spec:
targetPort: 80
selector:
app: kubeshark-hub
type: ClusterIP
type: NodePort
status:
loadBalancer: {}

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Pod
@@ -18,10 +17,13 @@ spec:
- name: REACT_APP_HUB_HOST
value: ' '
- name: REACT_APP_HUB_PORT
value: "8898"
value: '{{ .Values.tap.ingress.enabled | ternary "80/api" "8898" }}'
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
name: kubeshark-front
ports:
- containerPort: 80
hostPort: {{ .Values.tap.proxy.front.srvport }}
readinessProbe:
failureThreshold: 3
periodSeconds: 1

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Service
@@ -16,6 +15,6 @@ spec:
targetPort: 80
selector:
app: kubeshark-front
type: ClusterIP
type: NodePort
status:
loadBalancer: {}

View File

@@ -1,5 +1,5 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
{{- if .Values.tap.persistentstorage }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
@@ -18,3 +18,4 @@ spec:
storage: '{{ .Values.tap.storagelimit }}'
storageClassName: '{{ .Values.tap.storageclass }}'
status: {}
{{- end }}

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: apps/v1
kind: DaemonSet
@@ -32,7 +31,7 @@ spec:
- -i
- any
- -port
- "8897"
- '{{ .Values.tap.proxy.worker.srvport }}'
- -packet-capture
- '{{ .Values.tap.packetcapture }}'
- -servicemesh
@@ -40,8 +39,11 @@ spec:
- -procfs
- /hostproc
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
name: kubeshark-worker-daemon-set
ports:
- containerPort: {{ .Values.tap.proxy.worker.srvport }}
hostPort: {{ .Values.tap.proxy.worker.srvport }}
resources:
limits:
cpu: '{{ .Values.tap.resources.worker.limits.cpu }}'
@@ -67,8 +69,10 @@ spec:
- mountPath: /sys
name: sys
readOnly: true
{{- if .Values.tap.persistentstorage }}
- mountPath: /app/data
name: kubeshark-persistent-volume
{{- end }}
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
@@ -85,6 +89,8 @@ spec:
- hostPath:
path: /sys
name: sys
{{- if .Values.tap.persistentstorage }}
- name: kubeshark-persistent-volume
persistentVolumeClaim:
claimName: kubeshark-persistent-volume-claim
{{- end }}

View File

@@ -0,0 +1,15 @@
---
{{- if .Values.tap.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress-class
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
controller: k8s.io/ingress-nginx
{{- end }}

View File

@@ -0,0 +1,39 @@
---
{{- if .Values.tap.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
certmanager.k8s.io/cluster-issuer: '{{ .Values.tap.ingress.certManager }}'
nginx.ingress.kubernetes.io/rewrite-target: /$2
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
ingressClassName: kubeshark-ingress-class
rules:
- host: '{{ .Values.tap.ingress.host }}'
http:
paths:
- backend:
service:
name: kubeshark-hub
port:
number: 80
path: /api(/|$)(.*)
pathType: Prefix
- backend:
service:
name: kubeshark-front
port:
number: 80
path: /()(.*)
pathType: Prefix
tls: {{ .Values.tap.ingress.tls | toYaml }}
status:
loadBalancer: {}
{{- end }}

View File

@@ -6,18 +6,18 @@ tap:
imagepullsecrets: []
proxy:
worker:
port: 8897
srvport: 8897
hub:
port: 8898
srvport: 80
srvport: 8898
front:
port: 8899
srvport: 80
srvport: 8899
host: 127.0.0.1
regex: .*
namespaces: []
selfnamespace: kubeshark
persistentstorage: false
storagelimit: 200Mi
storageclass: standard
dryrun: false
@@ -43,6 +43,13 @@ tap:
ignoreTainted: false
resourceLabels: {}
nodeSelectorTerms: []
ingress:
enabled: false
host: ks.svc.cluster.local
tls: []
auth:
approvedDomains: []
certManager: letsencrypt-prod
debug: false
logs:
file: ""

View File

@@ -9,6 +9,7 @@ import (
"net/url"
"time"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
@@ -73,7 +74,7 @@ func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postWorkerUrl, "application/json", bytes.NewBuffer(podMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postWorkerUrl, "application/json", bytes.NewBuffer(podMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
@@ -106,7 +107,7 @@ func (connector *Connector) PostRegexToHub(regex string, namespaces []string) {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postRegexUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postRegexUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
@@ -137,7 +138,7 @@ func (connector *Connector) PostLicense(license string) {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
@@ -151,6 +152,26 @@ func (connector *Connector) PostLicense(license string) {
}
}
func (connector *Connector) PostLicenseSingle(license string) {
postLicenseUrl := fmt.Sprintf("%s/license", connector.url)
payload := postLicenseRequest{
License: license,
}
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the payload:")
} else {
var resp *http.Response
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
log.Warn().Err(err).Msg("Failed sending the license to Hub.")
} else {
log.Debug().Str("license", license).Msg("Reported license to Hub:")
return
}
}
}
func (connector *Connector) PostEnv(env map[string]interface{}) {
if len(env) == 0 {
return
@@ -164,7 +185,7 @@ func (connector *Connector) PostEnv(env map[string]interface{}) {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postEnvUrl, "application/json", bytes.NewBuffer(envMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postEnvUrl, "application/json", bytes.NewBuffer(envMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
@@ -188,7 +209,7 @@ func (connector *Connector) PostScript(script *misc.Script) (index int64, err er
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postScriptUrl, "application/json", bytes.NewBuffer(scriptMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postScriptUrl, "application/json", bytes.NewBuffer(scriptMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
@@ -237,6 +258,7 @@ func (connector *Connector) PutScript(script *misc.Script, index int64) (err err
return
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("License-Key", config.Config.License)
var resp *http.Response
resp, err = client.Do(req)
@@ -275,6 +297,7 @@ func (connector *Connector) DeleteScript(index int64) (err error) {
return
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("License-Key", config.Config.License)
var resp *http.Response
resp, err = client.Do(req)
@@ -305,7 +328,7 @@ func (connector *Connector) PostScriptDone() {
var err error
for !ok {
var resp *http.Response
if resp, err = utils.Post(postScripDonetUrl, "application/json", nil, connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postScripDonetUrl, "application/json", nil, connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}

View File

@@ -16,6 +16,8 @@ const (
WorkerPodName = SelfResourcesPrefix + "worker"
PersistentVolumeName = SelfResourcesPrefix + "persistent-volume"
PersistentVolumeClaimName = SelfResourcesPrefix + "persistent-volume-claim"
IngressName = SelfResourcesPrefix + "ingress"
IngressClassName = SelfResourcesPrefix + "ingress-class"
PersistentVolumeHostPath = "/app/data"
MinKubernetesServerVersion = "1.16.0"
)

187
kubernetes/helm/helm.go Normal file
View File

@@ -0,0 +1,187 @@
package helm
import (
"encoding/json"
"os"
"path/filepath"
"regexp"
"github.com/kubeshark/kubeshark/config"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chart/loader"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/downloader"
"helm.sh/helm/v3/pkg/getter"
"helm.sh/helm/v3/pkg/kube"
"helm.sh/helm/v3/pkg/registry"
"helm.sh/helm/v3/pkg/release"
"helm.sh/helm/v3/pkg/repo"
)
const ENV_HELM_DRIVER = "HELM_DRIVER"
var settings = cli.New()
type Helm struct {
repo string
releaseName string
releaseNamespace string
}
func NewHelm(repo string, releaseName string, releaseNamespace string) *Helm {
return &Helm{
repo: repo,
releaseName: releaseName,
releaseNamespace: releaseNamespace,
}
}
func NewHelmDefault() *Helm {
return &Helm{
repo: "https://helm.kubeshark.co",
releaseName: "kubeshark",
releaseNamespace: "default",
}
}
func parseOCIRef(chartRef string) (string, string, error) {
refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`)
caps := refTagRegexp.FindStringSubmatch(chartRef)
if len(caps) != 4 {
return "", "", errors.Errorf("improperly formatted oci chart reference: %s", chartRef)
}
chartRef = caps[1]
tag := caps[3]
return chartRef, tag, nil
}
func (h *Helm) Install() (rel *release.Release, err error) {
kubeConfigPath := config.Config.KubeConfigPath()
actionConfig := new(action.Configuration)
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
log.Info().Msgf(format, v...)
}); err != nil {
return
}
client := action.NewInstall(actionConfig)
client.Namespace = h.releaseNamespace
client.ReleaseName = h.releaseName
var chartURL string
chartURL, err = repo.FindChartInRepoURL(h.repo, h.releaseName, "", "", "", "", getter.All(&cli.EnvSettings{}))
if err != nil {
return
}
var cp string
cp, err = client.ChartPathOptions.LocateChart(chartURL, settings)
if err != nil {
return
}
m := &downloader.Manager{
Out: os.Stdout,
ChartPath: cp,
Keyring: client.ChartPathOptions.Keyring,
SkipUpdate: false,
Getters: getter.All(settings),
RepositoryConfig: settings.RepositoryConfig,
RepositoryCache: settings.RepositoryCache,
Debug: settings.Debug,
}
dl := downloader.ChartDownloader{
Out: m.Out,
Verify: m.Verify,
Keyring: m.Keyring,
RepositoryConfig: m.RepositoryConfig,
RepositoryCache: m.RepositoryCache,
RegistryClient: m.RegistryClient,
Getters: m.Getters,
Options: []getter.Option{
getter.WithInsecureSkipVerifyTLS(false),
},
}
repoPath := filepath.Dir(m.ChartPath)
err = os.MkdirAll(repoPath, os.ModePerm)
if err != nil {
return
}
version := ""
if registry.IsOCI(chartURL) {
chartURL, version, err = parseOCIRef(chartURL)
if err != nil {
return
}
dl.Options = append(dl.Options,
getter.WithRegistryClient(m.RegistryClient),
getter.WithTagName(version))
}
log.Info().
Str("url", chartURL).
Str("repo-path", repoPath).
Msg("Downloading Helm chart:")
if _, _, err = dl.DownloadTo(chartURL, version, repoPath); err != nil {
return
}
var chart *chart.Chart
chart, err = loader.Load(m.ChartPath)
if err != nil {
return
}
log.Info().
Str("release", chart.Metadata.Name).
Str("version", chart.Metadata.Version).
Strs("source", chart.Metadata.Sources).
Str("kube-version", chart.Metadata.KubeVersion).
Msg("Installing using Helm:")
var configMarshalled []byte
configMarshalled, err = json.Marshal(config.Config)
if err != nil {
return
}
var configUnmarshalled map[string]interface{}
err = json.Unmarshal(configMarshalled, &configUnmarshalled)
if err != nil {
return
}
rel, err = client.Run(chart, configUnmarshalled)
if err != nil {
return
}
return
}
func (h *Helm) Uninstall() (resp *release.UninstallReleaseResponse, err error) {
kubeConfigPath := config.Config.KubeConfigPath()
actionConfig := new(action.Configuration)
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
log.Info().Msgf(format, v...)
}); err != nil {
return
}
client := action.NewUninstall(actionConfig)
resp, err = client.Run(h.releaseName)
if err != nil {
return
}
return
}

View File

@@ -3,37 +3,26 @@ package kubernetes
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/url"
"path/filepath"
"regexp"
"strings"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/semver"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
auth "k8s.io/api/authorization/v1"
core "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/version"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
watchtools "k8s.io/client-go/tools/watch"
)
type Provider struct {
@@ -44,14 +33,6 @@ type Provider struct {
createdBy string
}
const (
fieldManagerName = "kubeshark-manager"
procfsVolumeName = "proc"
procfsMountPath = "/hostproc"
sysfsVolumeName = "sys"
sysfsMountPath = "/sys"
)
func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) {
kubernetesConfig := loadKubernetesConfiguration(kubeConfigPath, contextName)
restClientConfig, err := kubernetesConfig.ClientConfig()
@@ -90,374 +71,6 @@ func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) {
}, nil
}
//NewProviderInCluster Used in another repo that calls this function
func NewProviderInCluster() (*Provider, error) {
restClientConfig, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
clientSet, err := getClientSet(restClientConfig)
if err != nil {
return nil, err
}
return &Provider{
clientSet: clientSet,
kubernetesConfig: nil, // not relevant in cluster
clientConfig: *restClientConfig,
managedBy: misc.Program,
createdBy: misc.Program,
}, nil
}
func (provider *Provider) WaitUtilNamespaceDeleted(ctx context.Context, name string) error {
fieldSelector := fmt.Sprintf("metadata.name=%s", name)
var limit int64 = 1
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector
options.Limit = limit
return provider.clientSet.CoreV1().Namespaces().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fieldSelector
options.Limit = limit
return provider.clientSet.CoreV1().Namespaces().Watch(ctx, options)
},
}
var preconditionFunc watchtools.PreconditionFunc = func(store cache.Store) (bool, error) {
_, exists, err := store.Get(&core.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}})
if err != nil {
return false, err
}
if exists {
return false, nil
}
return true, nil
}
conditionFunc := func(e watch.Event) (bool, error) {
if e.Type == watch.Deleted {
return true, nil
}
return false, nil
}
obj := &core.Namespace{}
_, err := watchtools.UntilWithSync(ctx, lw, obj, preconditionFunc, conditionFunc)
return err
}
func (provider *Provider) BuildNamespace(name string) *core.Namespace {
return &core.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: buildWithDefaultLabels(map[string]string{}, provider),
},
}
}
func (provider *Provider) CreateNamespace(ctx context.Context, namespace *core.Namespace) (*core.Namespace, error) {
return provider.clientSet.CoreV1().Namespaces().Create(ctx, namespace, metav1.CreateOptions{})
}
type PodOptions struct {
Namespace string
PodName string
PodImage string
ServiceAccountName string
Resources configStructs.ResourceRequirements
ImagePullPolicy core.PullPolicy
ImagePullSecrets []core.LocalObjectReference
Debug bool
}
func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
cpuLimit, err := resource.ParseQuantity(opts.Resources.Limits.CPU)
if err != nil {
return nil, fmt.Errorf("invalid cpu limit for %s pod", opts.PodName)
}
memLimit, err := resource.ParseQuantity(opts.Resources.Limits.Memory)
if err != nil {
return nil, fmt.Errorf("invalid memory limit for %s pod", opts.PodName)
}
cpuRequests, err := resource.ParseQuantity(opts.Resources.Requests.CPU)
if err != nil {
return nil, fmt.Errorf("invalid cpu request for %s pod", opts.PodName)
}
memRequests, err := resource.ParseQuantity(opts.Resources.Requests.Memory)
if err != nil {
return nil, fmt.Errorf("invalid memory request for %s pod", opts.PodName)
}
command := []string{
"./hub",
}
if opts.Debug {
command = append(command, "-debug")
}
// Scripting environment variables
scriptingEnvMarshalled, err := json.Marshal(config.Config.Scripting.Env)
if err != nil {
return nil, err
}
// Scripting scripts
scripts, err := config.Config.Scripting.GetScripts()
if err != nil {
return nil, err
}
if scripts == nil {
scripts = []*misc.Script{}
}
scriptsMarshalled, err := json.Marshal(scripts)
if err != nil {
return nil, err
}
containers := []core.Container{
{
Name: opts.PodName,
Image: opts.PodImage,
ImagePullPolicy: opts.ImagePullPolicy,
Command: command,
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
"cpu": cpuLimit,
"memory": memLimit,
},
Requests: core.ResourceList{
"cpu": cpuRequests,
"memory": memRequests,
},
},
Env: []core.EnvVar{
{
Name: "POD_REGEX",
Value: config.Config.Tap.PodRegexStr,
},
{
Name: "NAMESPACES",
Value: strings.Join(provider.GetNamespaces(), ","),
},
{
Name: "LICENSE",
Value: "",
},
{
Name: "SCRIPTING_ENV",
Value: string(scriptingEnvMarshalled),
},
{
Name: "SCRIPTING_SCRIPTS",
Value: string(scriptsMarshalled),
},
},
},
}
pod := &core.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: opts.PodName,
Namespace: config.Config.Tap.SelfNamespace,
Labels: buildWithDefaultLabels(map[string]string{
"app": opts.PodName,
}, provider),
},
Spec: core.PodSpec{
ServiceAccountName: ServiceAccountName,
Containers: containers,
DNSPolicy: core.DNSClusterFirstWithHostNet,
TerminationGracePeriodSeconds: new(int64),
Tolerations: provider.BuildTolerations(),
ImagePullSecrets: opts.ImagePullSecrets,
},
}
if len(config.Config.Tap.NodeSelectorTerms) > 0 {
pod.Spec.Affinity = &core.Affinity{
NodeAffinity: &core.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{
NodeSelectorTerms: config.Config.Tap.NodeSelectorTerms,
},
},
}
}
return pod, nil
}
func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPort string) (*core.Pod, error) {
cpuLimit, err := resource.ParseQuantity(opts.Resources.Limits.CPU)
if err != nil {
return nil, fmt.Errorf("invalid cpu limit for %s pod", opts.PodName)
}
memLimit, err := resource.ParseQuantity(opts.Resources.Limits.Memory)
if err != nil {
return nil, fmt.Errorf("invalid memory limit for %s pod", opts.PodName)
}
cpuRequests, err := resource.ParseQuantity(opts.Resources.Requests.CPU)
if err != nil {
return nil, fmt.Errorf("invalid cpu request for %s pod", opts.PodName)
}
memRequests, err := resource.ParseQuantity(opts.Resources.Requests.Memory)
if err != nil {
return nil, fmt.Errorf("invalid memory request for %s pod", opts.PodName)
}
volumeMounts := []core.VolumeMount{}
volumes := []core.Volume{}
containers := []core.Container{
{
Name: opts.PodName,
Image: docker.GetFrontImage(),
ImagePullPolicy: opts.ImagePullPolicy,
VolumeMounts: volumeMounts,
ReadinessProbe: &core.Probe{
FailureThreshold: 3,
ProbeHandler: core.ProbeHandler{
TCPSocket: &core.TCPSocketAction{
Port: intstr.Parse("80"),
},
},
PeriodSeconds: 1,
SuccessThreshold: 1,
TimeoutSeconds: 1,
},
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
"cpu": cpuLimit,
"memory": memLimit,
},
Requests: core.ResourceList{
"cpu": cpuRequests,
"memory": memRequests,
},
},
Env: []core.EnvVar{
{
Name: "REACT_APP_DEFAULT_FILTER",
Value: " ",
},
{
Name: "REACT_APP_HUB_HOST",
Value: " ",
},
{
Name: "REACT_APP_HUB_PORT",
Value: hubPort,
},
},
},
}
pod := &core.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: opts.PodName,
Namespace: config.Config.Tap.SelfNamespace,
Labels: buildWithDefaultLabels(map[string]string{
"app": opts.PodName,
}, provider),
},
Spec: core.PodSpec{
ServiceAccountName: ServiceAccountName,
Containers: containers,
Volumes: volumes,
DNSPolicy: core.DNSClusterFirstWithHostNet,
TerminationGracePeriodSeconds: new(int64),
Tolerations: provider.BuildTolerations(),
ImagePullSecrets: opts.ImagePullSecrets,
},
}
if len(config.Config.Tap.NodeSelectorTerms) > 0 {
pod.Spec.Affinity = &core.Affinity{
NodeAffinity: &core.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{
NodeSelectorTerms: config.Config.Tap.NodeSelectorTerms,
},
},
}
}
return pod, nil
}
func (provider *Provider) CreatePod(ctx context.Context, namespace string, podSpec *core.Pod) (*core.Pod, error) {
return provider.clientSet.CoreV1().Pods(namespace).Create(ctx, podSpec, metav1.CreateOptions{})
}
func (provider *Provider) BuildHubService(namespace string) *core.Service {
return &core.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: HubServiceName,
Namespace: config.Config.Tap.SelfNamespace,
Labels: buildWithDefaultLabels(map[string]string{}, provider),
},
Spec: core.ServiceSpec{
Ports: []core.ServicePort{
{
Name: HubServiceName,
TargetPort: intstr.FromInt(80),
Port: 80,
},
},
Type: core.ServiceTypeClusterIP,
Selector: map[string]string{"app": HubServiceName},
},
}
}
func (provider *Provider) BuildFrontService(namespace string) *core.Service {
return &core.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: FrontServiceName,
Namespace: config.Config.Tap.SelfNamespace,
Labels: buildWithDefaultLabels(map[string]string{}, provider),
},
Spec: core.ServiceSpec{
Ports: []core.ServicePort{
{
Name: FrontServiceName,
TargetPort: intstr.FromInt(80),
Port: 80,
},
},
Type: core.ServiceTypeClusterIP,
Selector: map[string]string{"app": FrontServiceName},
},
}
}
func (provider *Provider) CreateService(ctx context.Context, namespace string, service *core.Service) (*core.Service, error) {
return provider.clientSet.CoreV1().Services(namespace).Create(ctx, service, metav1.CreateOptions{})
}
func (provider *Provider) CanI(ctx context.Context, namespace string, resource string, verb string, group string) (bool, error) {
selfSubjectAccessReview := &auth.SelfSubjectAccessReview{
Spec: auth.SelfSubjectAccessReviewSpec{
@@ -526,501 +139,6 @@ func (provider *Provider) doesResourceExist(resource interface{}, err error) (bo
return resource != nil, nil
}
func (provider *Provider) BuildServiceAccount() *core.ServiceAccount {
return &core.ServiceAccount{
TypeMeta: metav1.TypeMeta{
Kind: "ServiceAccount",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: ServiceAccountName,
Namespace: config.Config.Tap.SelfNamespace,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): misc.RBACVersion,
}, provider),
},
}
}
func (provider *Provider) BuildClusterRole() *rbac.ClusterRole {
return &rbac.ClusterRole{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterRole",
APIVersion: "rbac.authorization.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: ClusterRoleName,
Namespace: config.Config.Tap.SelfNamespace,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): misc.RBACVersion,
}, provider),
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{
"",
"extensions",
"apps",
},
Resources: []string{
"pods",
"services",
"endpoints",
"persistentvolumeclaims",
},
Verbs: []string{
"list",
"get",
"watch",
},
},
},
}
}
func (provider *Provider) BuildClusterRoleBinding() *rbac.ClusterRoleBinding {
return &rbac.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterRoleBinding",
APIVersion: "rbac.authorization.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: ClusterRoleBindingName,
Namespace: config.Config.Tap.SelfNamespace,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): misc.RBACVersion,
}, provider),
},
RoleRef: rbac.RoleRef{
Name: ClusterRoleName,
Kind: "ClusterRole",
APIGroup: "rbac.authorization.k8s.io",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: ServiceAccountName,
Namespace: config.Config.Tap.SelfNamespace,
},
},
}
}
func (provider *Provider) CreateSelfRBAC(ctx context.Context, namespace string) error {
serviceAccount := provider.BuildServiceAccount()
clusterRole := provider.BuildClusterRole()
clusterRoleBinding := provider.BuildClusterRoleBinding()
_, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Create(ctx, serviceAccount, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().ClusterRoles().Create(ctx, clusterRole, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().ClusterRoleBindings().Create(ctx, clusterRoleBinding, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
return nil
}
func (provider *Provider) RemoveNamespace(ctx context.Context, name string) error {
err := provider.clientSet.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveClusterRole(ctx context.Context, name string) error {
err := provider.clientSet.RbacV1().ClusterRoles().Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveClusterRoleBinding(ctx context.Context, name string) error {
err := provider.clientSet.RbacV1().ClusterRoleBindings().Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveRoleBinding(ctx context.Context, namespace string, name string) error {
err := provider.clientSet.RbacV1().RoleBindings(namespace).Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveRole(ctx context.Context, namespace string, name string) error {
err := provider.clientSet.RbacV1().Roles(namespace).Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveServiceAccount(ctx context.Context, namespace string, name string) error {
err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemovePod(ctx context.Context, namespace string, podName string) error {
err := provider.clientSet.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveConfigMap(ctx context.Context, namespace string, configMapName string) error {
err := provider.clientSet.CoreV1().ConfigMaps(namespace).Delete(ctx, configMapName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveService(ctx context.Context, namespace string, serviceName string) error {
err := provider.clientSet.CoreV1().Services(namespace).Delete(ctx, serviceName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemovePersistentVolumeClaim(ctx context.Context, namespace string, persistentVolumeClaimName string) error {
err := provider.clientSet.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, persistentVolumeClaimName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveDaemonSet(ctx context.Context, namespace string, daemonSetName string) error {
err := provider.clientSet.AppsV1().DaemonSets(namespace).Delete(ctx, daemonSetName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) handleRemovalError(err error) error {
// Ignore NotFound - There is nothing to delete.
// Ignore Forbidden - Assume that a user could not have created the resource in the first place.
if k8serrors.IsNotFound(err) || k8serrors.IsForbidden(err) {
return nil
}
return err
}
func (provider *Provider) BuildPersistentVolumeClaim() (*core.PersistentVolumeClaim, error) {
capacity, err := resource.ParseQuantity(config.Config.Tap.StorageLimit)
if err != nil {
return nil, fmt.Errorf("invalid capacity for the workers: %s", config.Config.Tap.StorageLimit)
}
storageClassName := config.Config.Tap.StorageClass
return &core.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: PersistentVolumeClaimName,
Namespace: config.Config.Tap.SelfNamespace,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): misc.RBACVersion,
}, provider),
},
Spec: core.PersistentVolumeClaimSpec{
Resources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceStorage: capacity,
},
},
AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteMany},
StorageClassName: &storageClassName,
},
}, nil
}
func (provider *Provider) BuildWorkerDaemonSet(
podImage string,
podName string,
serviceAccountName string,
resources configStructs.ResourceRequirements,
imagePullPolicy core.PullPolicy,
imagePullSecrets []core.LocalObjectReference,
serviceMesh bool,
tls bool,
debug bool,
) (*DaemonSet, error) {
// Resource limits
cpuLimit, err := resource.ParseQuantity(resources.Limits.CPU)
if err != nil {
return nil, fmt.Errorf("invalid cpu limit for %s pod", podName)
}
memLimit, err := resource.ParseQuantity(resources.Limits.Memory)
if err != nil {
return nil, fmt.Errorf("invalid memory limit for %s pod", podName)
}
cpuRequests, err := resource.ParseQuantity(resources.Requests.CPU)
if err != nil {
return nil, fmt.Errorf("invalid cpu request for %s pod", podName)
}
memRequests, err := resource.ParseQuantity(resources.Requests.Memory)
if err != nil {
return nil, fmt.Errorf("invalid memory request for %s pod", podName)
}
// Command
command := []string{
"./worker",
"-i",
"any",
"-port",
"8897",
"-packet-capture",
config.Config.Tap.PacketCapture,
}
if debug {
command = append(command, "-debug")
}
if serviceMesh {
command = append(command, "-servicemesh")
}
if tls {
command = append(command, "-tls")
}
if serviceMesh || tls {
command = append(command, "-procfs", procfsMountPath)
}
// Linux capabilities
dropCaps := []core.Capability{"ALL"}
addCaps := []core.Capability{"NET_RAW", "NET_ADMIN"} // to listen to traffic using libpcap
if serviceMesh || tls {
addCaps = append(addCaps, "SYS_ADMIN") // to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
addCaps = append(addCaps, "SYS_PTRACE") // to set netns to other process + to open libssl.so of other process
if serviceMesh {
addCaps = append(addCaps, "DAC_OVERRIDE") // to read /proc/PID/environ
}
if tls {
addCaps = append(addCaps, "SYS_RESOURCE") // to change rlimits for eBPF
}
}
// Environment variables
var env []core.EnvVar
if debug {
env = append(env, core.EnvVar{
Name: "MEMORY_PROFILING_ENABLED",
Value: "true",
})
env = append(env, core.EnvVar{
Name: "MEMORY_PROFILING_INTERVAL_SECONDS",
Value: "10",
})
env = append(env, core.EnvVar{
Name: "MEMORY_USAGE_INTERVAL_MILLISECONDS",
Value: "500",
})
}
// Volumes and volume mounts
// Host procfs is needed inside the container because we need access to
// the network namespaces of processes on the machine.
procfsVolume := core.Volume{
Name: procfsVolumeName,
VolumeSource: core.VolumeSource{
HostPath: &core.HostPathVolumeSource{
Path: "/proc",
},
},
}
procfsVolumeMount := core.VolumeMount{
Name: procfsVolumeName,
MountPath: procfsMountPath,
ReadOnly: true,
}
// We need access to /sys in order to install certain eBPF tracepoints
sysfsVolume := core.Volume{
Name: sysfsVolumeName,
VolumeSource: core.VolumeSource{
HostPath: &core.HostPathVolumeSource{
Path: "/sys",
},
},
}
sysfsVolumeMount := core.VolumeMount{
Name: sysfsVolumeName,
MountPath: sysfsMountPath,
ReadOnly: true,
}
// Persistent volume and its mount
persistentVolume := core.Volume{
Name: PersistentVolumeName,
VolumeSource: core.VolumeSource{
PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{
ClaimName: PersistentVolumeClaimName,
},
},
}
persistentVolumeMount := core.VolumeMount{
Name: PersistentVolumeName,
MountPath: PersistentVolumeHostPath,
}
// Containers
containers := []core.Container{
{
Name: podName,
Image: podImage,
ImagePullPolicy: imagePullPolicy,
VolumeMounts: []core.VolumeMount{
procfsVolumeMount,
sysfsVolumeMount,
persistentVolumeMount,
},
Command: command,
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
"cpu": cpuLimit,
"memory": memLimit,
},
Requests: core.ResourceList{
"cpu": cpuRequests,
"memory": memRequests,
},
},
SecurityContext: &core.SecurityContext{
Capabilities: &core.Capabilities{
Add: addCaps,
Drop: dropCaps,
},
},
Env: env,
},
}
// Pod
pod := DaemonSetPod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: config.Config.Tap.SelfNamespace,
Labels: buildWithDefaultLabels(map[string]string{
"app": podName,
}, provider),
},
Spec: core.PodSpec{
ServiceAccountName: ServiceAccountName,
HostNetwork: true,
Containers: containers,
Volumes: []core.Volume{
procfsVolume,
sysfsVolume,
persistentVolume,
},
DNSPolicy: core.DNSClusterFirstWithHostNet,
TerminationGracePeriodSeconds: new(int64),
Tolerations: provider.BuildTolerations(),
ImagePullSecrets: imagePullSecrets,
},
}
if len(config.Config.Tap.NodeSelectorTerms) > 0 {
pod.Spec.Affinity = &core.Affinity{
NodeAffinity: &core.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{
NodeSelectorTerms: config.Config.Tap.NodeSelectorTerms,
},
},
}
}
return &DaemonSet{
TypeMeta: metav1.TypeMeta{
Kind: "DaemonSet",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: config.Config.Tap.SelfNamespace,
Labels: buildWithDefaultLabels(map[string]string{
"app": podName,
}, provider),
},
Spec: DaemonSetSpec{
Selector: metav1.LabelSelector{
MatchLabels: buildWithDefaultLabels(map[string]string{
"app": podName,
}, provider),
},
Template: pod,
},
}, nil
}
func (provider *Provider) BuildTolerations() []core.Toleration {
tolerations := []core.Toleration{
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoExecute,
},
}
if !config.Config.Tap.IgnoreTainted {
tolerations = append(tolerations, core.Toleration{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoSchedule,
})
}
return tolerations
}
func (provider *Provider) CreatePersistentVolumeClaim(ctx context.Context, namespace string, persistentVolumeClaim *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) {
return provider.clientSet.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, persistentVolumeClaim, metav1.CreateOptions{})
}
func (provider *Provider) ApplyWorkerDaemonSet(
ctx context.Context,
namespace string,
daemonSetName string,
podImage string,
podName string,
serviceAccountName string,
resources configStructs.ResourceRequirements,
imagePullPolicy core.PullPolicy,
imagePullSecrets []core.LocalObjectReference,
serviceMesh bool,
tls bool,
debug bool,
) error {
log.Debug().
Str("namespace", namespace).
Str("daemonset-name", daemonSetName).
Str("image", podImage).
Str("pod", podName).
Msg("Applying worker DaemonSets.")
daemonSet, err := provider.BuildWorkerDaemonSet(
podImage,
podName,
serviceAccountName,
resources,
imagePullPolicy,
imagePullSecrets,
serviceMesh,
tls,
debug,
)
if err != nil {
return err
}
applyOptions := metav1.ApplyOptions{
Force: true,
FieldManager: fieldManagerName,
}
_, err = provider.clientSet.AppsV1().DaemonSets(namespace).Apply(
ctx,
daemonSet.GenerateApplyConfiguration(
daemonSetName,
namespace,
podName,
provider,
),
applyOptions,
)
return err
}
func (provider *Provider) listPodsImpl(ctx context.Context, regex *regexp.Regexp, namespaces []string, listOptions metav1.ListOptions) ([]core.Pod, error) {
var pods []core.Pod
for _, namespace := range namespaces {
@@ -1045,10 +163,6 @@ func (provider *Provider) ListAllPodsMatchingRegex(ctx context.Context, regex *r
return provider.listPodsImpl(ctx, regex, namespaces, metav1.ListOptions{})
}
func (provider *Provider) GetPod(ctx context.Context, namespaces string, podName string) (*core.Pod, error) {
return provider.clientSet.CoreV1().Pods(namespaces).Get(ctx, podName, metav1.GetOptions{})
}
func (provider *Provider) ListAllRunningPodsMatchingRegex(ctx context.Context, regex *regexp.Regexp, namespaces []string) ([]core.Pod, error) {
pods, err := provider.ListAllPodsMatchingRegex(ctx, regex, namespaces)
if err != nil {
@@ -1107,41 +221,6 @@ func (provider *Provider) GetNamespaceEvents(ctx context.Context, namespace stri
return eventList.String(), nil
}
func (provider *Provider) ListManagedServiceAccounts(ctx context.Context, namespace string) (*core.ServiceAccountList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.CoreV1().ServiceAccounts(namespace).List(ctx, listOptions)
}
func (provider *Provider) ListManagedClusterRoles(ctx context.Context) (*rbac.ClusterRoleList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.RbacV1().ClusterRoles().List(ctx, listOptions)
}
func (provider *Provider) ListManagedClusterRoleBindings(ctx context.Context) (*rbac.ClusterRoleBindingList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.RbacV1().ClusterRoleBindings().List(ctx, listOptions)
}
func (provider *Provider) ListManagedRoles(ctx context.Context, namespace string) (*rbac.RoleList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.RbacV1().Roles(namespace).List(ctx, listOptions)
}
func (provider *Provider) ListManagedRoleBindings(ctx context.Context, namespace string) (*rbac.RoleBindingList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.RbacV1().RoleBindings(namespace).List(ctx, listOptions)
}
// ValidateNotProxy We added this after a customer tried to run kubeshark from lens, which used len's kube config, which have cluster server configuration, which points to len's local proxy.
// The workaround was to use the user's local default kube config.
// For now - we are blocking the option to run kubeshark through a proxy to k8s server

View File

@@ -11,6 +11,7 @@ import (
"strings"
"time"
"github.com/kubeshark/kubeshark/config"
"github.com/rs/zerolog/log"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/client-go/tools/portforward"
@@ -66,8 +67,8 @@ func getSelfHubProxiedHostAndPath(selfNamespace string, selfServiceName string)
return fmt.Sprintf("/api/v1/namespaces/%s/services/%s:%d/proxy", selfNamespace, selfServiceName, selfServicePort)
}
func GetLocalhostOnPort(port uint16) string {
return fmt.Sprintf("http://localhost:%d", port)
func GetProxyOnPort(port uint16) string {
return fmt.Sprintf("http://%s:%d", config.Config.Tap.Proxy.Host, port)
}
func getRerouteHttpHandlerSelfAPI(proxyHandler http.Handler, selfNamespace string, selfServiceName string) http.Handler {

View File

@@ -1,147 +0,0 @@
package kubernetes
import (
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
applyconfapp "k8s.io/client-go/applyconfigurations/apps/v1"
applyconfcore "k8s.io/client-go/applyconfigurations/core/v1"
v1 "k8s.io/client-go/applyconfigurations/core/v1"
applyconfmeta "k8s.io/client-go/applyconfigurations/meta/v1"
)
type DaemonSetPod struct {
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Spec core.PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
type DaemonSetSpec struct {
Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"`
Template DaemonSetPod `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
}
type DaemonSet struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
func (d *DaemonSet) GenerateApplyConfiguration(name string, namespace string, podName string, provider *Provider) *applyconfapp.DaemonSetApplyConfiguration {
// Pod
p := d.Spec.Template.Spec
podSpec := applyconfcore.PodSpec()
podSpec.WithHostNetwork(p.HostNetwork)
podSpec.WithDNSPolicy(p.DNSPolicy)
podSpec.WithTerminationGracePeriodSeconds(*p.TerminationGracePeriodSeconds)
podSpec.WithServiceAccountName(p.ServiceAccountName)
// Containers
for _, c := range d.Spec.Template.Spec.Containers {
// Common
container := applyconfcore.Container()
container.WithName(c.Name)
container.WithImage(c.Image)
container.WithImagePullPolicy(c.ImagePullPolicy)
container.WithCommand(c.Command...)
// Linux capabilities
caps := applyconfcore.Capabilities().WithAdd(c.SecurityContext.Capabilities.Add...).WithDrop(c.SecurityContext.Capabilities.Drop...)
container.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
// Environment variables
var envvars []*v1.EnvVarApplyConfiguration
for _, e := range c.Env {
envvars = append(envvars, applyconfcore.EnvVar().WithName(e.Name).WithValue(e.Value))
}
container.WithEnv(envvars...)
// Resource limits
resources := applyconfcore.ResourceRequirements().WithRequests(c.Resources.Requests).WithLimits(c.Resources.Limits)
container.WithResources(resources)
// Volume mounts
for _, m := range c.VolumeMounts {
volumeMount := applyconfcore.VolumeMount().WithName(m.Name).WithMountPath(m.MountPath).WithReadOnly(m.ReadOnly)
container.WithVolumeMounts(volumeMount)
}
podSpec.WithContainers(container)
}
// Node affinity (RequiredDuringSchedulingIgnoredDuringExecution only)
if p.Affinity != nil {
nodeSelector := applyconfcore.NodeSelector()
for _, term := range p.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
nodeSelectorTerm := applyconfcore.NodeSelectorTerm()
for _, selector := range term.MatchExpressions {
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
nodeSelectorRequirement.WithKey(selector.Key)
nodeSelectorRequirement.WithOperator(selector.Operator)
nodeSelectorRequirement.WithValues(selector.Values...)
nodeSelectorTerm.WithMatchExpressions(nodeSelectorRequirement)
}
for _, selector := range term.MatchFields {
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
nodeSelectorRequirement.WithKey(selector.Key)
nodeSelectorRequirement.WithOperator(selector.Operator)
nodeSelectorRequirement.WithValues(selector.Values...)
nodeSelectorTerm.WithMatchFields(nodeSelectorRequirement)
}
nodeSelector.WithNodeSelectorTerms(nodeSelectorTerm)
}
nodeAffinity := applyconfcore.NodeAffinity()
nodeAffinity.WithRequiredDuringSchedulingIgnoredDuringExecution(nodeSelector)
affinity := applyconfcore.Affinity()
affinity.WithNodeAffinity(nodeAffinity)
podSpec.WithAffinity(affinity)
}
// Tolerations
for _, t := range p.Tolerations {
toleration := applyconfcore.Toleration()
toleration.WithKey(t.Key)
toleration.WithOperator(t.Operator)
toleration.WithValue(t.Value)
toleration.WithEffect(t.Effect)
if t.TolerationSeconds != nil {
toleration.WithTolerationSeconds(*t.TolerationSeconds)
}
podSpec.WithTolerations(toleration)
}
// Volumes
for _, v := range p.Volumes {
volume := applyconfcore.Volume()
if v.HostPath != nil {
volume.WithName(v.Name).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath(v.HostPath.Path))
}
if v.PersistentVolumeClaim != nil {
volume.WithName(v.Name).WithPersistentVolumeClaim(applyconfcore.PersistentVolumeClaimVolumeSource().WithClaimName(v.PersistentVolumeClaim.ClaimName))
}
podSpec.WithVolumes(volume)
}
// Image pull secrets
if len(p.ImagePullSecrets) > 0 {
localObjectReference := applyconfcore.LocalObjectReference()
for _, o := range p.ImagePullSecrets {
localObjectReference.WithName(o.Name)
}
podSpec.WithImagePullSecrets(localObjectReference)
}
podTemplate := applyconfcore.PodTemplateSpec()
podTemplate.WithLabels(buildWithDefaultLabels(map[string]string{
"app": podName,
}, provider))
podTemplate.WithSpec(podSpec)
labelSelector := applyconfmeta.LabelSelector()
labelSelector.WithMatchLabels(map[string]string{"app": podName})
daemonSet := applyconfapp.DaemonSet(name, namespace)
daemonSet.
WithLabels(buildWithDefaultLabels(map[string]string{}, provider)).
WithSpec(applyconfapp.DaemonSetSpec().WithSelector(labelSelector).WithTemplate(podTemplate))
return daemonSet
}

View File

@@ -1,16 +0,0 @@
package kubernetes
import (
"github.com/kubeshark/kubeshark/config"
)
func buildWithDefaultLabels(labels map[string]string, provider *Provider) map[string]string {
labels[LabelManagedBy] = provider.managedBy
labels[LabelCreatedBy] = provider.createdBy
for k, v := range config.Config.Tap.ResourceLabels {
labels[k] = v
}
return labels
}

View File

@@ -1,68 +0,0 @@
package kubernetes
import (
"context"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/rs/zerolog/log"
core "k8s.io/api/core/v1"
)
func CreateWorkers(
kubernetesProvider *Provider,
selfServiceAccountExists bool,
ctx context.Context,
namespace string,
resources configStructs.ResourceRequirements,
imagePullPolicy core.PullPolicy,
imagePullSecrets []core.LocalObjectReference,
serviceMesh bool,
tls bool,
debug bool,
) error {
persistentVolumeClaim, err := kubernetesProvider.BuildPersistentVolumeClaim()
if err != nil {
return err
}
if _, err = kubernetesProvider.CreatePersistentVolumeClaim(
ctx,
namespace,
persistentVolumeClaim,
); err != nil {
return err
}
image := docker.GetWorkerImage()
var serviceAccountName string
if selfServiceAccountExists {
serviceAccountName = ServiceAccountName
} else {
serviceAccountName = ""
}
log.Info().Msg("Creating the worker DaemonSet...")
if err := kubernetesProvider.ApplyWorkerDaemonSet(
ctx,
namespace,
WorkerDaemonSetName,
image,
WorkerPodName,
serviceAccountName,
resources,
imagePullPolicy,
imagePullSecrets,
serviceMesh,
tls,
debug,
); err != nil {
return err
}
log.Info().Msg("Successfully created the worker DaemonSet.")
return nil
}

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Namespace

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: ServiceAccount

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
@@ -15,11 +14,13 @@ rules:
- ""
- extensions
- apps
- networking.k8s.io
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- ingresses
verbs:
- list
- get

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Pod
@@ -23,9 +22,13 @@ spec:
value: '{}'
- name: SCRIPTING_SCRIPTS
value: '[]'
- name: AUTH_APPROVED_DOMAINS
image: docker.io/kubeshark/hub:latest
imagePullPolicy: Always
name: kubeshark-hub
ports:
- containerPort: 80
hostPort: 8898
resources:
limits:
cpu: 750m

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Service
@@ -16,6 +15,6 @@ spec:
targetPort: 80
selector:
app: kubeshark-hub
type: ClusterIP
type: NodePort
status:
loadBalancer: {}

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Pod
@@ -22,6 +21,9 @@ spec:
image: docker.io/kubeshark/front:latest
imagePullPolicy: Always
name: kubeshark-front
ports:
- containerPort: 80
hostPort: 8899
readinessProbe:
failureThreshold: 3
periodSeconds: 1

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Service
@@ -16,6 +15,6 @@ spec:
targetPort: 80
selector:
app: kubeshark-front
type: ClusterIP
type: NodePort
status:
loadBalancer: {}

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: PersistentVolumeClaim

View File

@@ -1,4 +1,3 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: apps/v1
kind: DaemonSet
@@ -42,6 +41,9 @@ spec:
image: docker.io/kubeshark/worker:latest
imagePullPolicy: Always
name: kubeshark-worker-daemon-set
ports:
- containerPort: 8897
hostPort: 8897
resources:
limits:
cpu: 750m

View File

@@ -0,0 +1,13 @@
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress-class
namespace: kubeshark
spec:
controller: k8s.io/ingress-nginx

36
manifests/11-ingress.yaml Normal file
View File

@@ -0,0 +1,36 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
certmanager.k8s.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/rewrite-target: /$2
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress
namespace: kubeshark
spec:
ingressClassName: kubeshark-ingress-class
rules:
- host: ks.svc.cluster.local
http:
paths:
- backend:
service:
name: kubeshark-hub
port:
number: 80
path: /api(/|$)(.*)
pathType: Prefix
- backend:
service:
name: kubeshark-front
port:
number: 80
path: /()(.*)
pathType: Prefix
status:
loadBalancer: {}

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: kubeshark-tls
namespace: default
spec:
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
secretName: cert-kubeshark
dnsNames:
- ks.svc.cluster.local

View File

@@ -0,0 +1,14 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: info@kubeshark.co
privateKeySecretRef:
name: letsencrypt-prod-key
solvers:
- http01:
ingress:
class: kubeshark-ingress-class

15
manifests/tls/run.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
__dir="$(cd -P -- "$(dirname -- "$0")" && pwd -P)"
helm repo add jetstack https://charts.jetstack.io
helm repo update
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.crds.yaml
helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.9.1
kubectl apply -f ${__dir}/cluster-issuer.yaml
kubectl apply -f ${__dir}/certificate.yaml

View File

@@ -44,7 +44,7 @@ func CheckNewerVersion() {
} else {
downloadCommand = fmt.Sprintf("sh <(curl -Ls %s/install)", misc.Website)
}
msg := fmt.Sprintf("There is a new release! %v -> %v run:", misc.Ver, latestVersion)
msg := fmt.Sprintf("There is a new release! %v -> %v Please upgrade to the latest release, as new releases are not always backward compatible. Run:", misc.Ver, latestVersion)
log.Warn().Str("command", downloadCommand).Msg(fmt.Sprintf(utils.Yellow, msg))
}
}

View File

@@ -1,173 +0,0 @@
package resources
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/errormessage"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"k8s.io/apimachinery/pkg/util/wait"
)
func CleanUpSelfResources(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfResourcesNamespace string) {
log.Warn().Msg(fmt.Sprintf("Removing %s resources...", misc.Software))
var leftoverResources []string
if isNsRestrictedMode {
leftoverResources = cleanUpRestrictedMode(ctx, kubernetesProvider, selfResourcesNamespace)
} else {
leftoverResources = cleanUpNonRestrictedMode(ctx, cancel, kubernetesProvider, selfResourcesNamespace)
}
if len(leftoverResources) > 0 {
errMsg := "Failed to remove the following resources."
for _, resource := range leftoverResources {
errMsg += "\n- " + resource
}
log.Error().Msg(fmt.Sprintf(utils.Red, errMsg))
}
}
func cleanUpNonRestrictedMode(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string {
leftoverResources := make([]string, 0)
if err := kubernetesProvider.RemoveNamespace(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("Namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
defer waitUntilNamespaceDeleted(ctx, cancel, kubernetesProvider, selfResourcesNamespace)
}
if resources, err := kubernetesProvider.ListManagedClusterRoles(ctx); err != nil {
resourceDesc := "ClusterRoles"
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveClusterRole(ctx, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("ClusterRole %s", resource.Name)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if resources, err := kubernetesProvider.ListManagedClusterRoleBindings(ctx); err != nil {
resourceDesc := "ClusterRoleBindings"
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveClusterRoleBinding(ctx, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("ClusterRoleBinding %s", resource.Name)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
return leftoverResources
}
func waitUntilNamespaceDeleted(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) {
// Call cancel if a terminating signal was received. Allows user to skip the wait.
go func() {
utils.WaitForTermination(ctx, cancel)
}()
if err := kubernetesProvider.WaitUtilNamespaceDeleted(ctx, selfResourcesNamespace); err != nil {
switch {
case ctx.Err() == context.Canceled:
log.Printf("Do nothing. User interrupted the wait")
log.Warn().
Str("namespace", selfResourcesNamespace).
Msg("Did nothing. User interrupted the wait.")
case err == wait.ErrWaitTimeout:
log.Warn().
Str("namespace", selfResourcesNamespace).
Msg("Timed out while deleting the namespace.")
default:
log.Warn().
Err(errormessage.FormatError(err)).
Str("namespace", selfResourcesNamespace).
Msg("Unknown error while deleting the namespace.")
}
}
}
func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string {
leftoverResources := make([]string, 0)
if err := kubernetesProvider.RemoveService(ctx, selfResourcesNamespace, kubernetes.FrontServiceName); err != nil {
resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.FrontServiceName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemoveService(ctx, selfResourcesNamespace, kubernetes.HubServiceName); err != nil {
resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.HubServiceName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemovePersistentVolumeClaim(ctx, selfResourcesNamespace, kubernetes.PersistentVolumeClaimName); err != nil {
resourceDesc := fmt.Sprintf("Persistent Volume %s in namespace %s", kubernetes.PersistentVolumeClaimName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemoveDaemonSet(ctx, selfResourcesNamespace, kubernetes.WorkerDaemonSetName); err != nil {
resourceDesc := fmt.Sprintf("DaemonSet %s in namespace %s", kubernetes.WorkerDaemonSetName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if resources, err := kubernetesProvider.ListManagedServiceAccounts(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("ServiceAccounts in namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveServiceAccount(ctx, selfResourcesNamespace, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("ServiceAccount %s in namespace %s", resource.Name, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if resources, err := kubernetesProvider.ListManagedRoles(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("Roles in namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveRole(ctx, selfResourcesNamespace, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("Role %s in namespace %s", resource.Name, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if resources, err := kubernetesProvider.ListManagedRoleBindings(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("RoleBindings in namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveRoleBinding(ctx, selfResourcesNamespace, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("RoleBinding %s in namespace %s", resource.Name, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if err := kubernetesProvider.RemovePod(ctx, selfResourcesNamespace, kubernetes.HubPodName); err != nil {
resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.HubPodName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemovePod(ctx, selfResourcesNamespace, kubernetes.FrontPodName); err != nil {
resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.FrontPodName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
return leftoverResources
}
func handleDeletionError(err error, resourceDesc string, leftoverResources *[]string) {
log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Error while removing %s", resourceDesc))
*leftoverResources = append(*leftoverResources, resourceDesc)
}

View File

@@ -1,106 +0,0 @@
package resources
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/errormessage"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/rs/zerolog/log"
core "k8s.io/api/core/v1"
)
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, hubResources configStructs.ResourceRequirements, imagePullPolicy core.PullPolicy, imagePullSecrets []core.LocalObjectReference, debug bool) (bool, error) {
if !isNsRestrictedMode {
if err := createSelfNamespace(ctx, kubernetesProvider, selfNamespace); err != nil {
log.Debug().Err(err).Send()
}
}
err := kubernetesProvider.CreateSelfRBAC(ctx, selfNamespace)
var selfServiceAccountExists bool
if err != nil {
selfServiceAccountExists = true
log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Failed to ensure the resources required for IP resolving. %s will not resolve target IPs to names.", misc.Software))
}
hubOpts := &kubernetes.PodOptions{
Namespace: selfNamespace,
PodName: kubernetes.HubPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Debug: debug,
}
frontOpts := &kubernetes.PodOptions{
Namespace: selfNamespace,
PodName: kubernetes.FrontPodName,
PodImage: docker.GetWorkerImage(),
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Debug: debug,
}
if err := createSelfHubPod(ctx, kubernetesProvider, hubOpts); err != nil {
return selfServiceAccountExists, err
}
if err := createFrontPod(ctx, kubernetesProvider, frontOpts); err != nil {
return selfServiceAccountExists, err
}
// TODO: Why the port values need to be 80?
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildHubService(selfNamespace))
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("service", kubernetes.HubServiceName).Msg("Successfully created a service.")
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildFrontService(selfNamespace))
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("service", kubernetes.FrontServiceName).Msg("Successfully created a service.")
return selfServiceAccountExists, nil
}
func createSelfNamespace(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfNamespace string) error {
_, err := kubernetesProvider.CreateNamespace(ctx, kubernetesProvider.BuildNamespace(selfNamespace))
return err
}
func createSelfHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
pod, err := kubernetesProvider.BuildHubPod(opts)
if err != nil {
return err
}
if _, err = kubernetesProvider.CreatePod(ctx, opts.Namespace, pod); err != nil {
return err
}
log.Info().Str("pod", pod.Name).Msg("Successfully created a pod.")
return nil
}
func createFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
pod, err := kubernetesProvider.BuildFrontPod(opts, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
if err != nil {
return err
}
if _, err = kubernetesProvider.CreatePod(ctx, opts.Namespace, pod); err != nil {
return err
}
log.Info().Str("pod", pod.Name).Msg("Successfully created a pod.")
return nil
}

View File

@@ -16,8 +16,15 @@ func Get(url string, client *http.Client) (*http.Response, error) {
// Post - When err is nil, resp always contains a non-nil resp.Body.
// Caller should close resp.Body when done reading from it.
func Post(url, contentType string, body io.Reader, client *http.Client) (*http.Response, error) {
return checkError(client.Post(url, contentType, body))
func Post(url, contentType string, body io.Reader, client *http.Client, licenseKey string) (*http.Response, error) {
req, err := http.NewRequest(http.MethodPost, url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("License-Key", licenseKey)
return checkError(client.Do(req))
}
// Do - When err is nil, resp always contains a non-nil resp.Body.

View File

@@ -2,7 +2,6 @@ package utils
import (
"bytes"
"encoding/json"
"gopkg.in/yaml.v3"
)
@@ -12,18 +11,6 @@ const (
tab = "\t"
)
func PrettyJson(data interface{}) (string, error) {
buffer := new(bytes.Buffer)
encoder := json.NewEncoder(buffer)
encoder.SetIndent(empty, tab)
err := encoder.Encode(data)
if err != nil {
return empty, err
}
return buffer.String(), nil
}
func PrettyYaml(data interface{}) (string, error) {
buffer := new(bytes.Buffer)
encoder := yaml.NewEncoder(buffer)
@@ -35,18 +22,3 @@ func PrettyYaml(data interface{}) (string, error) {
}
return buffer.String(), nil
}
func PrettyYamlOmitEmpty(data interface{}) (string, error) {
d, err := json.Marshal(data)
if err != nil {
return empty, err
}
var cleanData map[string]interface{}
err = json.Unmarshal(d, &cleanData)
if err != nil {
return empty, err
}
return PrettyYaml(cleanData)
}