Compare commits

..

89 Commits
39.3 ... 40.5

Author SHA1 Message Date
M. Mert Yildiran
a163f9cc0e Change the new release warning 2023-05-25 20:42:57 +03:00
M. Mert Yildiran
2edb987c07 Template REACT_APP_HUB_PORT in the Helm chart 2023-05-25 20:24:29 +03:00
M. Mert Yildiran
c0d7d0fe80 Update Helm README.md 2023-05-25 05:46:10 +03:00
M. Mert Yildiran
be5bd6a372 Template the AUTH_APPROVED_DOMAINS and certmanager.k8s.io/cluster-issuer
Also add `networking.k8s.io` to `apiGroups` in `ClusterRole`
2023-05-25 05:07:42 +03:00
M. Mert Yildiran
42df7aa42f Update the Certificate resource name 2023-05-24 06:32:48 +03:00
M. Mert Yildiran
9a9052198f ⬆️ Bump the Helm chart version 2023-05-24 05:44:18 +03:00
M. Mert Yildiran
2fb83c3642 Fix the Bash script 2023-05-24 04:18:27 +03:00
M. Mert Yildiran
d44674fe86 Update the secret name of certificate 2023-05-24 04:10:46 +03:00
M. Mert Yildiran
c57ed1efd3 Run kubeshark manifests --dump && kubeshark helm-chart 2023-05-24 04:04:34 +03:00
M. Mert Yildiran
c19cd00c77 Add CertManager field to IngressConfig and add an Ingress TLS example 2023-05-24 04:01:45 +03:00
M. Mert Yildiran
39f8d40b76 Revert " Add Refresh-Token to the list of Access-Control-Allow-Headers"
This reverts commit bf731073c8.
2023-05-24 02:10:48 +03:00
M. Mert Yildiran
bf731073c8 Add Refresh-Token to the list of Access-Control-Allow-Headers 2023-05-24 02:04:56 +03:00
M. Mert Yildiran
4bb68afaaf Add AuthConfig struct and pass domains in AUTH_APPROVED_DOMAINS environment variable 2023-05-24 01:50:59 +03:00
M. Mert Yildiran
2126fc83a7 🐛 Remove the cancel() call 2023-05-22 19:20:39 +03:00
M. Mert Yildiran
d0c1dbcd5e Print and open a different URL in case of Ingress is enabled 2023-05-17 03:57:27 +03:00
M. Mert Yildiran
ad9dfbce40 Add Ingress (#1357)
*  Add `Ingress`

*  Rewrite the target in `Ingress`

*  Fix the path of front pod in `Ingress`

*  Add `IngressConfig` struct

*  Generate the correct Helm chart based on `tap.ingress` field of `values.yaml`
2023-05-16 19:46:47 +03:00
M. Mert Yildiran
139336d4ee Template hostPort(s) in the Helm chart 2023-05-10 14:38:38 +03:00
M. Mert Yildiran
f68fed0de8 🐛 Fix the effect of proxy config port changes 2023-05-10 01:28:43 +03:00
M. Mert Yildiran
1d7d242e6c Generate the missing new line in 08-persistent-volume-claim.yaml 2023-05-09 00:00:07 +03:00
M. Mert Yildiran
aa904e23c7 Add --persistentstorage option to tap command 2023-05-08 23:57:22 +03:00
M. Mert Yildiran
baf0e65337 Template the Helm chart based on persistentstorage value 2023-05-08 23:52:14 +03:00
M. Mert Yildiran
a33a3467fc Add persistentstorage option 2023-05-08 00:50:56 +03:00
M. Mert Yildiran
a9b598bc41 ⬆️ Bump the Helm chart version 2023-05-04 21:45:55 +03:00
M. Mert Yildiran
0aee367ad5 Omit the license string in helm-chart and manifests commands 2023-05-04 21:37:55 +03:00
M. Mert Yildiran
8c7d9ea8fd Fix the updateLicense method 2023-05-04 21:33:38 +03:00
M. Mert Yildiran
fab0f713ed 🐛 Pass the license string 2023-05-04 21:18:34 +03:00
M. Mert Yildiran
2563cc1922 🐛 Fix the imagePullPolicy to imagepullpolicy in helm-chart command 2023-04-24 02:03:58 +03:00
Jay R. Wren
26c9f42eba 📚 Remove kubeshark tap -A example from README.md (#1339) 2023-04-21 18:35:24 -07:00
M. Mert Yildiran
00dd3a93df Update the Helm chart version 2023-04-20 21:20:50 +03:00
M. Mert Yildiran
d02293ab55 Add a workflow for publishing Helm chart 2023-04-20 21:20:07 +03:00
M. Mert Yildiran
60cfa92efb Apply the same Kubernetes tolerations to all pods 2023-04-20 20:27:22 +03:00
M. Mert Yildiran
01b187aaa3 Update a log message 2023-04-20 20:22:33 +03:00
M. Mert Yildiran
38d121556c Add storageclass option to config.yaml 2023-04-20 20:20:24 +03:00
M. Mert Yildiran
2d73b46b44 Change the Docker Hub repository in the badges to kubeshark/worker 2023-04-20 20:10:52 +03:00
M. Mert Yildiran
466b9099bd Ignore the Kubernetes version check in certain commands while creating the Kubernetes provider 2023-04-20 20:09:19 +03:00
M. Mert Yildiran
bbe3338c3c Rename 08-persistent-volume.yaml to 08-persistent-volume-claim.yaml 2023-04-20 20:04:47 +03:00
M. Mert Yildiran
2780791068 Clean YAML files before generating the new ones in manifests and helm-chart command 2023-04-20 04:00:37 +03:00
M. Mert Yildiran
e65656c1df 🔥 Delete permissionFiles folder 2023-04-20 03:52:15 +03:00
M. Mert Yildiran
df7d1ac10c Give the permission of listing or watching the persistentvolumeclaims to the ClusterRole 2023-04-20 03:01:25 +03:00
M. Mert Yildiran
c342885cae Set the default storage limit to 200Mi 2023-04-20 02:48:18 +03:00
M. Mert Yildiran
44adb397c1 🔥 Remove the old DaemonSet manifests 2023-04-20 00:26:01 +03:00
M. Mert Yildiran
657ea8570c Add PersistentVolumeClaim and mount it to worker DaemonSet 2023-04-20 00:09:22 +03:00
M. Mert Yildiran
686dd5fba1 🔥 Remove the -A flag and allnamespaces field from config.yaml 2023-04-19 20:52:28 +03:00
M. Mert Yildiran
90e6e99386 Run the manifests --dump and helm-chart commands 2023-04-19 20:30:11 +03:00
M. Mert Yildiran
aa9109df12 Update helm-chart command 2023-04-19 20:29:17 +03:00
M. Mert Yildiran
9a37781355 🔥 Remove the grace period 2023-04-19 05:18:59 +03:00
M. Mert Yildiran
5ce10b626f Pass every config through environment variables and don't make HTTP calls in first tap command 2023-04-18 03:21:23 +03:00
M. Mert Yildiran
26d75da588 Set the Helm chart version to 39.6 2023-04-13 01:57:26 +03:00
M. Mert Yildiran
95edac9f8f 📚 Remove the WIP notice in the Helm chart README.md 2023-04-13 01:53:04 +03:00
M. Mert Yildiran
f6c4d43eb1 Update a README.md(s) 2023-04-12 03:12:32 +03:00
M. Mert Yildiran
47b9cd0c8d Update the README.md(s) 2023-04-12 03:10:36 +03:00
M. Mert Yildiran
fb06545887 Add a header comment to generated manifests and Helm chart templates 2023-04-12 03:10:23 +03:00
M. Mert Yildiran
ea594ea70a Update helm-chart/README.md 2023-04-12 02:51:34 +03:00
M. Mert Yildiran
3cc543827a Fix all of the remaining issues in the Helm chart 2023-04-12 02:50:12 +03:00
M. Mert Yildiran
18addbb980 Fix the issues in Helm chart such that helm template succeeds 2023-04-12 02:12:12 +03:00
M. Mert Yildiran
d2b9bddf78 Do more Helm templating 2023-04-12 01:33:41 +03:00
M. Mert Yildiran
3ebf816a68 Generate Helm chart templates 2023-04-12 01:06:39 +03:00
M. Mert Yildiran
504ecc4f83 Generate values.yaml out of config.yaml 2023-04-11 23:01:29 +03:00
dependabot[bot]
562dff0d6c ⬆️ Bump github.com/docker/docker (#1333)
Bumps [github.com/docker/docker](https://github.com/docker/docker) from 20.10.22+incompatible to 20.10.24+incompatible.
- [Release notes](https://github.com/docker/docker/releases)
- [Commits](https://github.com/docker/docker/compare/v20.10.22...v20.10.24)

---
updated-dependencies:
- dependency-name: github.com/docker/docker
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-04-11 23:00:11 +03:00
M. Mert Yildiran
02990912b7 Move ResourceLabels and NodeSelectorTerms fields into TapConfig 2023-04-11 22:37:29 +03:00
M. Mert Yildiran
0aedc023aa Add README.md to helm-chart directory 2023-04-11 22:31:48 +03:00
M. Mert Yildiran
0801ea8c74 Generate Chart.yaml 2023-04-11 22:17:37 +03:00
M. Mert Yildiran
83be3558ed Copy the license into Helm chart 2023-04-11 19:29:19 +03:00
M. Mert Yildiran
100b397cdf Run kubeshark helm-chart 2023-04-11 19:21:04 +03:00
M. Mert Yildiran
c2cad11e0a Add helm-chart command 2023-04-11 19:20:04 +03:00
M. Mert Yildiran
c42481deb8 Add POD_REGEX, NAMESPACES, STORAGE_LIMIT and LICENSE environment variables to Hub 2023-04-11 18:40:34 +03:00
M. Mert Yildiran
39d1b77045 Fix the issues in worker DaemonSet 2023-04-11 02:33:17 +03:00
M. Mert Yildiran
f19db77228 Fix more issues in manifests command 2023-04-11 02:18:23 +03:00
M. Mert Yildiran
077fc6c126 Set the apiVersion in the manifests 2023-04-11 02:09:03 +03:00
M. Mert Yildiran
aeeb9e6c9f Run kubeshark manifests --dump and add the manifest files 2023-04-11 02:02:29 +03:00
M. Mert Yildiran
384ed4e16b Set YAML indent to 2 2023-04-11 01:57:04 +03:00
M. Mert Yildiran
5dafc015bb Add manifests command to generate Kubernetes manifests 2023-04-11 01:54:06 +03:00
M. Mert Yildiran
d1b17d4534 Build worker DaemonSet separately then apply it by converting it to ApplyConfiguration 2023-04-10 22:24:54 +03:00
M. Mert Yildiran
b9333e4d67 🔧 Add some useful kubectl commands to Makefile 2023-04-10 01:09:34 +03:00
M. Mert Yildiran
c962864d0b 🐛 Fix the clean command cause leftover ClusterRole and ClusterRoleBinding 2023-04-10 00:48:22 +03:00
M. Mert Yildiran
07b080e97a 🔥 Remove the unused methods in the kubernetes package 2023-04-01 21:36:48 +03:00
M. Mert Yildiran
e4684a10af Add --ignoreTainted flag to tap command 2023-03-27 16:26:09 +03:00
dependabot[bot]
8b5e55d53a ⬆️ Bump golang.org/x/crypto (#1326)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.0.0-20220208050332-20e1d8d225ab to 0.1.0.
- [Release notes](https://github.com/golang/crypto/releases)
- [Commits](https://github.com/golang/crypto/commits/v0.1.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-26 23:36:04 +03:00
dependabot[bot]
ee968adec5 ⬆️ Bump golang.org/x/net from 0.2.0 to 0.7.0 (#1325)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.2.0 to 0.7.0.
- [Release notes](https://github.com/golang/net/releases)
- [Commits](https://github.com/golang/net/compare/v0.2.0...v0.7.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-26 23:34:34 +03:00
dependabot[bot]
2e7fad77a9 ⬆️ Bump github.com/docker/distribution (#1324)
Bumps [github.com/docker/distribution](https://github.com/docker/distribution) from 2.7.1+incompatible to 2.8.0+incompatible.
- [Release notes](https://github.com/docker/distribution/releases)
- [Commits](https://github.com/docker/distribution/compare/v2.7.1...v2.8.0)

---
updated-dependencies:
- dependency-name: github.com/docker/distribution
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-26 23:32:40 +03:00
M. Mert Yildiran
7f6f710b3f 🐛 Fix selfnamespace issue by changing its location in the config and adding --selfnamespace flag to tap and clean commands 2023-03-26 23:26:35 +03:00
M. Mert Yildiran
261df8261f 🐛 Use tap config for console, pro, proxy and scripts commands 2023-03-26 21:40:49 +03:00
M. Mert Yildiran
e14bc8064b 🐛 Don't stop watching in case of an error occurs in scripts command 2023-03-24 02:58:49 +03:00
M. Mert Yildiran
6f3b2c6755 Update README.md 2023-03-19 21:41:26 +03:00
M. Mert Yildiran
bdaa626f30 🐛 Fix release.yml GitHub workflow 2023-03-19 21:20:01 +03:00
M. Mert Yildiran
e1ada1768d 🐛 Remove configpath field from the config and call os.MkdirAll before regenerating config 2023-03-19 20:58:20 +03:00
Christopher Redwine
b05dbed71e Upgrade github actions versions to latest and update deprecated options (#1319)
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-03-19 20:20:47 +03:00
M. Mert Yildiran
9b66600602 Update README.md 2023-03-19 20:09:49 +03:00
M. Mert Yildiran
d826b95687 Update README.md 2023-03-19 20:08:37 +03:00
85 changed files with 3244 additions and 989 deletions

35
.github/workflows/helm.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
on:
push:
tags:
- '*'
name: Release Helm Charts
jobs:
release:
# depending on default permission settings for your org (contents being read-only or read-write for workloads), you will have to add permissions
# see: https://docs.github.com/en/actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v3
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.5.0
with:
charts_dir: .
charts_repo_url: https://kubeshark.github.io/kubeshark
env:
CR_TOKEN: "${{ secrets.HELM_TOKEN }}"

View File

@@ -16,16 +16,18 @@ jobs:
name: Golint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: actions/setup-go@v2
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '^1.17'
go-version-file: 'go.mod'
- name: Go lint
uses: golangci/golangci-lint-action@v2
uses: golangci/golangci-lint-action@v3
with:
version: latest
args: --timeout=10m

View File

@@ -14,13 +14,13 @@ jobs:
name: Build and publish a new release
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.17
uses: actions/setup-go@v2
with:
go-version: '1.17'
- name: Check out the repo
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
- name: Version
id: version
@@ -55,7 +55,7 @@ jobs:
needs: [release]
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -73,16 +73,16 @@ jobs:
run: git fetch --force --tags
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v4
with:
go-version: 1.17
go-version-file: 'go.mod'
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
uses: goreleaser/goreleaser-action@v4
with:
distribution: goreleaser
version: ${{ env.GITHUB_REF_NAME }}
args: release --rm-dist
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.HOMEBREW_TOKEN }}
VER: ${{ steps.version.outputs.tag }}

View File

@@ -15,17 +15,17 @@ jobs:
timeout-minutes: 20
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Set up Go 1.17
uses: actions/setup-go@v2
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '^1.17'
go-version-file: 'go.mod'
- name: Test
run: make test
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
uses: codecov/codecov-action@v3

View File

@@ -62,3 +62,9 @@ test: ## Run cli tests.
lint: ## Lint the source code.
golangci-lint run
kubectl-view-all-resources: ## This command outputs all Kubernetes resources using YAML format and pipes it to VS Code
./kubectl.sh view-all-resources
kubectl-view-kubeshark-resources: ## This command outputs all Kubernetes resources in "kubeshark" namespace using YAML format and pipes it to VS Code
./kubectl.sh view-kubeshark-resources

View File

@@ -9,10 +9,10 @@
<a href="https://github.com/kubeshark/kubeshark/releases/latest">
<img alt="GitHub Latest Release" src="https://img.shields.io/github/v/release/kubeshark/kubeshark?logo=GitHub&style=flat-square">
</a>
<a href="https://hub.docker.com/r/kubeshark/kubeshark">
<a href="https://hub.docker.com/r/kubeshark/worker">
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/kubeshark/kubeshark?color=%23099cec&logo=Docker&style=flat-square">
</a>
<a href="https://hub.docker.com/r/kubeshark/kubeshark">
<a href="https://hub.docker.com/r/kubeshark/worker">
<img alt="Image size" src="https://img.shields.io/docker/image-size/kubeshark/kubeshark/latest?logo=Docker&style=flat-square">
</a>
<a href="https://discord.gg/WkvRGMUcx7">
@@ -25,9 +25,9 @@
<p align="center">
<b>
<span>NEW: </span><a href="https://github.com/kubeshark/kubeshark/releases/tag/39.2">Version 39.2</a> is out, introducing
<span>NEW: </span><a href="https://github.com/kubeshark/kubeshark/releases/tag/39.4">Version 39.4</a> is out, introducing
<a href="https://docs.kubeshark.co/en/automation_scripting">Scripting</a>,
<a href="https://docs.kubeshark.co/en/automation_hooks">L4/L7 hooks</a>, and so much more ..
<a href="https://docs.kubeshark.co/en/automation_hooks">L4/L7 hooks</a>, and so much more...
</b>
</p>
@@ -45,10 +45,6 @@ Download **Kubeshark**'s binary distribution [latest release](https://github.com
kubeshark tap
```
```shell
kubeshark tap -A
```
```shell
kubeshark tap -n sock-shop "(catalo*|front-end*)"
```

View File

@@ -2,48 +2,16 @@ package check
import (
"context"
"embed"
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
rbac "k8s.io/api/rbac/v1"
"k8s.io/client-go/kubernetes/scheme"
)
func KubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool {
func KubernetesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "kubernetes-permissions").Msg("Checking:")
var filePath string
if config.Config.IsNsRestrictedMode() {
filePath = "permissionFiles/permissions-ns-tap.yaml"
} else {
filePath = "permissionFiles/permissions-all-namespaces-tap.yaml"
}
data, err := embedFS.ReadFile(filePath)
if err != nil {
log.Error().Err(err).Msg("While checking Kubernetes permissions!")
return false
}
decode := scheme.Codecs.UniversalDeserializer().Decode
obj, _, err := decode(data, nil, nil)
if err != nil {
log.Error().Err(err).Msg("While checking Kubernetes permissions!")
return false
}
switch resource := obj.(type) {
case *rbac.Role:
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.SelfNamespace)
case *rbac.ClusterRole:
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, "")
}
log.Error().Msg("While checking Kubernetes permissions! Resource of types 'Role' or 'ClusterRole' are not found in permission files.")
return false
return checkRulesPermissions(ctx, kubernetesProvider, kubernetesProvider.BuildClusterRole().Rules, "")
}
func checkRulesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider, rules []rbac.PolicyRule, namespace string) bool {

View File

@@ -12,17 +12,17 @@ import (
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "k8s-components").Msg("Checking:")
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.SelfNamespace)
allResourcesExist := checkResourceExist(config.Config.SelfNamespace, "namespace", exist, err)
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.Tap.SelfNamespace)
allResourcesExist := checkResourceExist(config.Config.Tap.SelfNamespace, "namespace", exist, err)
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.SelfNamespace, kubernetes.ServiceAccountName)
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.ServiceAccountName)
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
if config.Config.IsNsRestrictedMode() {
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.SelfNamespace, kubernetes.RoleName)
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.RoleName)
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.SelfNamespace, kubernetes.RoleBindingName)
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.RoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
} else {
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
@@ -32,7 +32,7 @@ func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Pro
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
}
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.SelfNamespace, kubernetes.HubServiceName)
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.HubServiceName)
allResourcesExist = checkResourceExist(kubernetes.HubServiceName, "service", exist, err) && allResourcesExist
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
@@ -41,7 +41,7 @@ func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Pro
}
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.SelfNamespace, kubernetes.HubPodName); err != nil {
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.Tap.SelfNamespace, kubernetes.HubPodName); err != nil {
log.Error().
Str("name", kubernetes.HubPodName).
Err(err).
@@ -63,7 +63,7 @@ func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.
Str("name", kubernetes.HubPodName).
Msg("Pod is running.")
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.SelfNamespace, kubernetes.WorkerPodName); err != nil {
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.Tap.SelfNamespace, kubernetes.WorkerPodName); err != nil {
log.Error().
Str("name", kubernetes.WorkerPodName).
Err(err).

View File

@@ -12,14 +12,14 @@ func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
var connectedToHub, connectedToFront bool
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), "/echo", kubernetesProvider); err != nil {
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), "/echo", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Hub using proxy!")
} else {
connectedToHub = true
log.Info().Msg("Connected successfully to Hub using proxy.")
}
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort), "", kubernetesProvider); err != nil {
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port), "", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Front using proxy!")
} else {
connectedToFront = true

View File

@@ -2,7 +2,6 @@ package cmd
import (
"context"
"embed"
"fmt"
"os"
@@ -12,11 +11,6 @@ import (
"github.com/rs/zerolog/log"
)
var (
//go:embed permissionFiles
embedFS embed.FS
)
func runCheck() {
log.Info().Msg(fmt.Sprintf("Checking the %s resources...", misc.Software))
@@ -30,7 +24,7 @@ func runCheck() {
}
if checkPassed {
checkPassed = check.KubernetesPermissions(ctx, embedFS, kubernetesProvider)
checkPassed = check.KubernetesPermissions(ctx, kubernetesProvider)
}
if checkPassed {

View File

@@ -3,7 +3,10 @@ package cmd
import (
"fmt"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/misc"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
@@ -18,4 +21,11 @@ var cleanCmd = &cobra.Command{
func init() {
rootCmd.AddCommand(cleanCmd)
defaultTapConfig := configStructs.TapConfig{}
if err := defaults.Set(&defaultTapConfig); err != nil {
log.Debug().Err(err).Send()
}
cleanCmd.Flags().StringP(configStructs.SelfNamespaceLabel, "s", defaultTapConfig.SelfNamespace, "Self-namespace of Kubeshark")
}

View File

@@ -5,10 +5,10 @@ import (
)
func performCleanCommand() {
kubernetesProvider, err := getKubernetesProviderForCli()
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return
}
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace, false)
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, false)
}

View File

@@ -19,7 +19,7 @@ import (
)
func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx context.Context, serviceName string, podName string, proxyPortLabel string, srcPort uint16, dstPort uint16, healthCheck string) {
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.Proxy.Host, srcPort, config.Config.SelfNamespace, serviceName)
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.Proxy.Host, srcPort, config.Config.Tap.SelfNamespace, serviceName)
if err != nil {
log.Error().
Err(errormessage.FormatError(err)).
@@ -39,7 +39,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
}
podRegex, _ := regexp.Compile(podName)
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.SelfNamespace, podRegex, srcPort, dstPort, ctx); err != nil {
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.Tap.SelfNamespace, podRegex, srcPort, dstPort, ctx); err != nil {
log.Error().
Str("pod-regex", podRegex.String()).
Err(errormessage.FormatError(err)).
@@ -58,7 +58,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
}
}
func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
func getKubernetesProviderForCli(silent bool, dontCheckVersion bool) (*kubernetes.Provider, error) {
kubeConfigPath := config.Config.KubeConfigPath()
kubernetesProvider, err := kubernetes.NewProvider(kubeConfigPath, config.Config.Kube.Context)
if err != nil {
@@ -66,22 +66,26 @@ func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
return nil, err
}
log.Info().Str("path", kubeConfigPath).Msg("Using kubeconfig:")
if !silent {
log.Info().Str("path", kubeConfigPath).Msg("Using kubeconfig:")
}
if err := kubernetesProvider.ValidateNotProxy(); err != nil {
handleKubernetesProviderError(err)
return nil, err
}
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
handleKubernetesProviderError(err)
return nil, err
}
if !dontCheckVersion {
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
handleKubernetesProviderError(err)
return nil, err
}
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
handleKubernetesProviderError(err)
return nil, err
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
handleKubernetesProviderError(err)
return nil, err
}
}
return kubernetesProvider, nil

View File

@@ -2,6 +2,7 @@ package cmd
import (
"fmt"
"path"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
@@ -28,7 +29,7 @@ var configCmd = &cobra.Command{
return nil
}
log.Info().Str("config-path", config.Config.ConfigFilePath).Msg("Template file written to config path.")
log.Info().Str("config-path", config.ConfigFilePath).Msg("Template file written to config path.")
} else {
template, err := utils.PrettyYaml(configWithDefaults)
if err != nil {
@@ -52,5 +53,5 @@ func init() {
log.Debug().Err(err).Send()
}
configCmd.Flags().BoolP(configStructs.RegenerateConfigName, "r", defaultConfig.Config.Regenerate, fmt.Sprintf("Regenerate the config file with default values to path %s or to chosen path using --%s", defaultConfig.ConfigFilePath, config.ConfigFilePathCommandName))
configCmd.Flags().BoolP(configStructs.RegenerateConfigName, "r", defaultConfig.Config.Regenerate, fmt.Sprintf("Regenerate the config file with default values to path %s", path.Join(misc.GetDotFolderPath(), "config.yaml")))
}

View File

@@ -36,12 +36,12 @@ func init() {
log.Debug().Err(err).Send()
}
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
func runConsole() {
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
@@ -51,10 +51,10 @@ func runConsole() {
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.SrcPort).Msg("Connecting to:")
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.Port).Msg("Connecting to:")
u := url.URL{
Scheme: "ws",
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.SrcPort),
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.Port),
Path: "/scripts/logs",
}

488
cmd/helmChart.go Normal file
View File

@@ -0,0 +1,488 @@
package cmd
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/misc/fsUtils"
"github.com/kubeshark/kubeshark/utils"
"github.com/ohler55/ojg/jp"
"github.com/ohler55/ojg/oj"
"github.com/otiai10/copy"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
var helmChartCmd = &cobra.Command{
Use: "helm-chart",
Short: "Generate Helm chart of Kubeshark",
RunE: func(cmd *cobra.Command, args []string) error {
runHelmChart()
return nil
},
}
// Maintainer describes a Chart maintainer.
type Maintainer struct {
// Name is a user name or organization name
Name string `json:"name,omitempty"`
// Email is an optional email address to contact the named maintainer
Email string `json:"email,omitempty"`
// URL is an optional URL to an address for the named maintainer
URL string `json:"url,omitempty"`
}
// Metadata for a Chart file. This models the structure of a Chart.yaml file.
type Metadata struct {
// The name of the chart. Required.
Name string `json:"name,omitempty"`
// The URL to a relevant project page, git repo, or contact person
Home string `json:"home,omitempty"`
// Source is the URL to the source code of this chart
Sources []string `json:"sources,omitempty"`
// A SemVer 2 conformant version string of the chart. Required.
Version string `json:"version,omitempty"`
// A one-sentence description of the chart
Description string `json:"description,omitempty"`
// A list of string keywords
Keywords []string `json:"keywords,omitempty"`
// A list of name and URL/email address combinations for the maintainer(s)
Maintainers []*Maintainer `json:"maintainers,omitempty"`
// The URL to an icon file.
Icon string `json:"icon,omitempty"`
// The API Version of this chart. Required.
APIVersion string `json:"apiVersion,omitempty"`
// The condition to check to enable chart
Condition string `json:"condition,omitempty"`
// The tags to check to enable chart
Tags string `json:"tags,omitempty"`
// The version of the application enclosed inside of this chart.
AppVersion string `json:"appVersion,omitempty"`
// Whether or not this chart is deprecated
Deprecated bool `json:"deprecated,omitempty"`
// Annotations are additional mappings uninterpreted by Helm,
// made available for inspection by other applications.
Annotations map[string]string `json:"annotations,omitempty"`
// KubeVersion is a SemVer constraint specifying the version of Kubernetes required.
KubeVersion string `json:"kubeVersion,omitempty"`
// Dependencies are a list of dependencies for a chart.
Dependencies []*Dependency `json:"dependencies,omitempty"`
// Specifies the chart type: application or library
Type string `json:"type,omitempty"`
}
// Dependency describes a chart upon which another chart depends.
//
// Dependencies can be used to express developer intent, or to capture the state
// of a chart.
type Dependency struct {
// Name is the name of the dependency.
//
// This must mach the name in the dependency's Chart.yaml.
Name string `json:"name"`
// Version is the version (range) of this chart.
//
// A lock file will always produce a single version, while a dependency
// may contain a semantic version range.
Version string `json:"version,omitempty"`
// The URL to the repository.
//
// Appending `index.yaml` to this string should result in a URL that can be
// used to fetch the repository index.
Repository string `json:"repository"`
// A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled )
Condition string `json:"condition,omitempty"`
// Tags can be used to group charts for enabling/disabling together
Tags []string `json:"tags,omitempty"`
// Enabled bool determines if chart should be loaded
Enabled bool `json:"enabled,omitempty"`
// ImportValues holds the mapping of source values to parent key to be imported. Each item can be a
// string or pair of child/parent sublist items.
ImportValues []interface{} `json:"import-values,omitempty"`
// Alias usable alias to be used for the chart
Alias string `json:"alias,omitempty"`
}
var namespaceMappings = map[string]interface{}{
"metadata.name": "{{ .Values.tap.selfnamespace }}",
}
var serviceAccountMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
}
var clusterRoleMappings = serviceAccountMappings
var clusterRoleBindingMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"subjects[0].namespace": "{{ .Values.tap.selfnamespace }}",
}
var hubPodMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.containers[0].env": []map[string]interface{}{
{
"name": "POD_REGEX",
"value": "{{ .Values.tap.regex }}",
},
{
"name": "NAMESPACES",
"value": "{{ gt (len .Values.tap.namespaces) 0 | ternary (join \",\" .Values.tap.namespaces) \"\" }}",
},
{
"name": "LICENSE",
"value": "{{ .Values.license }}",
},
{
"name": "SCRIPTING_ENV",
"value": "{}",
},
{
"name": "SCRIPTING_SCRIPTS",
"value": "[]",
},
{
"name": "AUTH_APPROVED_DOMAINS",
"value": "{{ gt (len .Values.tap.ingress.auth.approvedDomains) 0 | ternary (join \",\" .Values.tap.ingress.auth.approvedDomains) \"\" }}",
},
},
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
"spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.hub.limits.cpu }}",
"spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.hub.limits.memory }}",
"spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.hub.requests.cpu }}",
"spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.hub.requests.memory }}",
"spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./hub -debug\" \"./hub\" }}",
}
var hubServiceMappings = serviceAccountMappings
var frontPodMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
"spec.containers[0].env": []map[string]interface{}{
{
"name": "REACT_APP_DEFAULT_FILTER",
"value": " ",
},
{
"name": "REACT_APP_HUB_HOST",
"value": " ",
},
{
"name": "REACT_APP_HUB_PORT",
"value": "{{ .Values.tap.ingress.enabled | ternary \"80/api\" \"8898\" }}",
},
},
}
var frontServiceMappings = serviceAccountMappings
var persistentVolumeMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.resources.requests.storage": "{{ .Values.tap.storagelimit }}",
"spec.storageClassName": "{{ .Values.tap.storageclass }}",
}
var workerDaemonSetMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.template.spec.containers[0].image": "{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}",
"spec.template.spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
"spec.template.spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.worker.limits.cpu }}",
"spec.template.spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.worker.limits.memory }}",
"spec.template.spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.worker.requests.cpu }}",
"spec.template.spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.worker.requests.memory }}",
"spec.template.spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./worker -debug\" \"./worker\" }}",
"spec.template.spec.containers[0].command[4]": "{{ .Values.tap.proxy.worker.srvport }}",
"spec.template.spec.containers[0].command[6]": "{{ .Values.tap.packetcapture }}",
}
var ingressClassMappings = serviceAccountMappings
var ingressMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"metadata.annotations[\"certmanager.k8s.io/cluster-issuer\"]": "{{ .Values.tap.ingress.certManager }}",
"spec.rules[0].host": "{{ .Values.tap.ingress.host }}",
"spec.tls": "{{ .Values.tap.ingress.tls | toYaml }}",
}
func init() {
rootCmd.AddCommand(helmChartCmd)
}
func runHelmChart() {
namespace,
serviceAccount,
clusterRole,
clusterRoleBinding,
hubPod,
hubService,
frontPod,
frontService,
persistentVolume,
workerDaemonSet,
ingressClass,
ingress,
err := generateManifests()
if err != nil {
log.Error().Err(err).Send()
return
}
err = dumpHelmChart(map[string]interface{}{
"00-namespace.yaml": template(namespace, namespaceMappings),
"01-service-account.yaml": template(serviceAccount, serviceAccountMappings),
"02-cluster-role.yaml": template(clusterRole, clusterRoleMappings),
"03-cluster-role-binding.yaml": template(clusterRoleBinding, clusterRoleBindingMappings),
"04-hub-pod.yaml": template(hubPod, hubPodMappings),
"05-hub-service.yaml": template(hubService, hubServiceMappings),
"06-front-pod.yaml": template(frontPod, frontPodMappings),
"07-front-service.yaml": template(frontService, frontServiceMappings),
"08-persistent-volume-claim.yaml": template(persistentVolume, persistentVolumeMappings),
"09-worker-daemon-set.yaml": template(workerDaemonSet, workerDaemonSetMappings),
"10-ingress-class.yaml": template(ingressClass, ingressClassMappings),
"11-ingress.yaml": template(ingress, ingressMappings),
})
if err != nil {
log.Error().Err(err).Send()
return
}
}
func template(object interface{}, mappings map[string]interface{}) (template interface{}) {
var err error
var data []byte
data, err = json.Marshal(object)
if err != nil {
log.Error().Err(err).Send()
return
}
var obj interface{}
obj, err = oj.Parse(data)
if err != nil {
log.Error().Err(err).Send()
return
}
for path, value := range mappings {
var x jp.Expr
x, err = jp.ParseString(path)
if err != nil {
log.Error().Err(err).Send()
return
}
err = x.Set(obj, value)
if err != nil {
log.Error().Err(err).Send()
return
}
}
newJson := oj.JSON(obj)
err = json.Unmarshal([]byte(newJson), &template)
if err != nil {
log.Error().Err(err).Send()
return
}
return
}
func handleHubPod(manifest string) string {
lines := strings.Split(manifest, "\n")
for i, line := range lines {
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
lines[i] = " hostPort: {{ .Values.tap.proxy.hub.srvport }}"
}
}
return strings.Join(lines, "\n")
}
func handleFrontPod(manifest string) string {
lines := strings.Split(manifest, "\n")
for i, line := range lines {
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
lines[i] = " hostPort: {{ .Values.tap.proxy.front.srvport }}"
}
}
return strings.Join(lines, "\n")
}
func handlePVCManifest(manifest string) string {
return fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s{{- end }}\n", manifest)
}
func handleDaemonSetManifest(manifest string) string {
lines := strings.Split(manifest, "\n")
for i, line := range lines {
if strings.TrimSpace(line) == "- mountPath: /app/data" {
lines[i] = fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s", line)
}
if strings.TrimSpace(line) == "name: kubeshark-persistent-volume" {
lines[i] = fmt.Sprintf("%s\n{{- end }}", line)
}
if strings.TrimSpace(line) == "- name: kubeshark-persistent-volume" {
lines[i] = fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s", line)
}
if strings.TrimSpace(line) == "claimName: kubeshark-persistent-volume-claim" {
lines[i] = fmt.Sprintf("%s\n{{- end }}", line)
}
if strings.HasPrefix(strings.TrimSpace(line), "- containerPort:") {
lines[i] = " - containerPort: {{ .Values.tap.proxy.worker.srvport }}"
}
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
lines[i] = " hostPort: {{ .Values.tap.proxy.worker.srvport }}"
}
}
return strings.Join(lines, "\n")
}
func handleIngressClass(manifest string) string {
return fmt.Sprintf("{{- if .Values.tap.ingress.enabled }}\n%s{{- end }}\n", manifest)
}
func handleIngress(manifest string) string {
manifest = strings.Replace(manifest, "'{{ .Values.tap.ingress.tls | toYaml }}'", "{{ .Values.tap.ingress.tls | toYaml }}", 1)
return handleIngressClass(manifest)
}
func dumpHelmChart(objects map[string]interface{}) error {
folder := filepath.Join(".", "helm-chart")
templatesFolder := filepath.Join(folder, "templates")
err := fsUtils.RemoveFilesByExtension(templatesFolder, "yaml")
if err != nil {
return err
}
err = os.MkdirAll(templatesFolder, os.ModePerm)
if err != nil {
return err
}
// Sort by filenames
filenames := make([]string, 0)
for filename := range objects {
filenames = append(filenames, filename)
}
sort.Strings(filenames)
// Generate templates
for _, filename := range filenames {
manifest, err := utils.PrettyYamlOmitEmpty(objects[filename])
if err != nil {
return err
}
if filename == "04-hub-pod.yaml" {
manifest = handleHubPod(manifest)
}
if filename == "06-front-pod.yaml" {
manifest = handleFrontPod(manifest)
}
if filename == "08-persistent-volume-claim.yaml" {
manifest = handlePVCManifest(manifest)
}
if filename == "09-worker-daemon-set.yaml" {
manifest = handleDaemonSetManifest(manifest)
}
if filename == "10-ingress-class.yaml" {
manifest = handleIngressClass(manifest)
}
if filename == "11-ingress.yaml" {
manifest = handleIngress(manifest)
}
path := filepath.Join(templatesFolder, filename)
err = os.WriteFile(path, []byte(manifestHeader+manifest), 0644)
if err != nil {
return err
}
log.Info().Msgf("Helm chart template generated: %s", path)
}
// Copy LICENSE
licenseSrcPath := filepath.Join(".", "LICENSE")
licenseDstPath := filepath.Join(folder, "LICENSE")
err = copy.Copy(licenseSrcPath, licenseDstPath)
if err != nil {
log.Warn().Err(err).Str("path", licenseSrcPath).Msg("Couldn't find the license:")
} else {
log.Info().Msgf("Helm chart license copied: %s", licenseDstPath)
}
// Generate Chart.yaml
chartMetadata := Metadata{
APIVersion: "v2",
Name: misc.Program,
Description: misc.Description,
Home: misc.Website,
Sources: []string{"https://github.com/kubeshark/kubeshark/tree/master/helm-chart"},
Keywords: []string{
"kubeshark",
"packet capture",
"traffic capture",
"traffic analyzer",
"network sniffer",
"observability",
"devops",
"microservice",
"forensics",
"api",
},
Maintainers: []*Maintainer{
{
Name: misc.Software,
Email: misc.Email,
URL: misc.Website,
},
},
Version: misc.Ver,
AppVersion: misc.Ver,
KubeVersion: fmt.Sprintf(">= %s-0", kubernetes.MinKubernetesServerVersion),
Type: "application",
}
chart, err := utils.PrettyYamlOmitEmpty(chartMetadata)
if err != nil {
return err
}
path := filepath.Join(folder, "Chart.yaml")
err = os.WriteFile(path, []byte(chart), 0644)
if err != nil {
return err
}
log.Info().Msgf("Helm chart Chart.yaml generated: %s", path)
// Generate values.yaml
values, err := utils.PrettyYaml(config.Config)
if err != nil {
return err
}
path = filepath.Join(folder, "values.yaml")
err = os.WriteFile(path, []byte(values), 0644)
if err != nil {
return err
}
log.Info().Msgf("Helm chart values.yaml generated: %s", path)
return nil
}

View File

@@ -18,7 +18,7 @@ var logsCmd = &cobra.Command{
Use: "logs",
Short: "Create a ZIP file with logs for GitHub issues or troubleshooting",
RunE: func(cmd *cobra.Command, args []string) error {
kubernetesProvider, err := getKubernetesProviderForCli()
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return nil
}

239
cmd/manifests.go Normal file
View File

@@ -0,0 +1,239 @@
package cmd
import (
"fmt"
"os"
"path/filepath"
"sort"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc/fsUtils"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
v1 "k8s.io/api/core/v1"
networking "k8s.io/api/networking/v1"
rbac "k8s.io/api/rbac/v1"
)
const manifestSeperator = "---"
const manifestHeader = "# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!\n" + manifestSeperator + "\n"
var manifestsCmd = &cobra.Command{
Use: "manifests",
Short: "Generate Kubernetes manifests of Kubeshark",
RunE: func(cmd *cobra.Command, args []string) error {
runManifests()
return nil
},
}
func init() {
rootCmd.AddCommand(manifestsCmd)
defaultManifestsConfig := config.ManifestsConfig{}
if err := defaults.Set(&defaultManifestsConfig); err != nil {
log.Debug().Err(err).Send()
}
manifestsCmd.Flags().Bool("dump", defaultManifestsConfig.Dump, "Enable the debug mode")
}
func runManifests() {
namespace,
serviceAccount,
clusterRole,
clusterRoleBinding,
hubPod,
hubService,
frontPod,
frontService,
persistentVolume,
workerDaemonSet,
ingressClass,
ingress,
err := generateManifests()
if err != nil {
log.Error().Err(err).Send()
return
}
if config.Config.Manifests.Dump {
err = dumpManifests(map[string]interface{}{
"00-namespace.yaml": namespace,
"01-service-account.yaml": serviceAccount,
"02-cluster-role.yaml": clusterRole,
"03-cluster-role-binding.yaml": clusterRoleBinding,
"04-hub-pod.yaml": hubPod,
"05-hub-service.yaml": hubService,
"06-front-pod.yaml": frontPod,
"07-front-service.yaml": frontService,
"08-persistent-volume-claim.yaml": persistentVolume,
"09-worker-daemon-set.yaml": workerDaemonSet,
"10-ingress-class.yaml": ingressClass,
"11-ingress.yaml": ingress,
})
} else {
err = printManifests([]interface{}{
namespace,
serviceAccount,
clusterRole,
clusterRoleBinding,
hubPod,
hubService,
frontPod,
frontService,
workerDaemonSet,
})
}
if err != nil {
log.Error().Err(err).Send()
return
}
}
func generateManifests() (
namespace *v1.Namespace,
serviceAccount *v1.ServiceAccount,
clusterRole *rbac.ClusterRole,
clusterRoleBinding *rbac.ClusterRoleBinding,
hubPod *v1.Pod,
hubService *v1.Service,
frontPod *v1.Pod,
frontService *v1.Service,
persistentVolumeClaim *v1.PersistentVolumeClaim,
workerDaemonSet *kubernetes.DaemonSet,
ingressClass *networking.IngressClass,
ingress *networking.Ingress,
err error,
) {
config.Config.License = ""
persistentStorage := config.Config.Tap.PersistentStorage
config.Config.Tap.PersistentStorage = true
var kubernetesProvider *kubernetes.Provider
kubernetesProvider, err = getKubernetesProviderForCli(true, true)
if err != nil {
return
}
namespace = kubernetesProvider.BuildNamespace(config.Config.Tap.SelfNamespace)
serviceAccount = kubernetesProvider.BuildServiceAccount()
clusterRole = kubernetesProvider.BuildClusterRole()
clusterRoleBinding = kubernetesProvider.BuildClusterRoleBinding()
hubPod, err = kubernetesProvider.BuildHubPod(&kubernetes.PodOptions{
Namespace: config.Config.Tap.SelfNamespace,
PodName: kubernetes.HubPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: config.Config.Tap.Resources.Hub,
ImagePullPolicy: config.Config.ImagePullPolicy(),
ImagePullSecrets: config.Config.ImagePullSecrets(),
Debug: config.Config.Tap.Debug,
})
if err != nil {
return
}
hubService = kubernetesProvider.BuildHubService(config.Config.Tap.SelfNamespace)
frontPod, err = kubernetesProvider.BuildFrontPod(&kubernetes.PodOptions{
Namespace: config.Config.Tap.SelfNamespace,
PodName: kubernetes.FrontPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: config.Config.Tap.Resources.Hub,
ImagePullPolicy: config.Config.ImagePullPolicy(),
ImagePullSecrets: config.Config.ImagePullSecrets(),
Debug: config.Config.Tap.Debug,
}, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port))
if err != nil {
return
}
frontService = kubernetesProvider.BuildFrontService(config.Config.Tap.SelfNamespace)
persistentVolumeClaim, err = kubernetesProvider.BuildPersistentVolumeClaim()
if err != nil {
return
}
workerDaemonSet, err = kubernetesProvider.BuildWorkerDaemonSet(
docker.GetWorkerImage(),
kubernetes.WorkerDaemonSetName,
kubernetes.ServiceAccountName,
config.Config.Tap.Resources.Worker,
config.Config.ImagePullPolicy(),
config.Config.ImagePullSecrets(),
config.Config.Tap.ServiceMesh,
config.Config.Tap.Tls,
config.Config.Tap.Debug,
)
if err != nil {
return
}
ingressClass = kubernetesProvider.BuildIngressClass()
ingress = kubernetesProvider.BuildIngress()
config.Config.Tap.PersistentStorage = persistentStorage
return
}
func dumpManifests(objects map[string]interface{}) error {
folder := filepath.Join(".", "manifests")
err := fsUtils.RemoveFilesByExtension(folder, "yaml")
if err != nil {
return err
}
err = os.MkdirAll(folder, os.ModePerm)
if err != nil {
return err
}
// Sort by filenames
filenames := make([]string, 0)
for filename := range objects {
filenames = append(filenames, filename)
}
sort.Strings(filenames)
for _, filename := range filenames {
manifest, err := utils.PrettyYamlOmitEmpty(objects[filename])
if err != nil {
return err
}
path := filepath.Join(folder, filename)
err = os.WriteFile(path, []byte(manifestHeader+manifest), 0644)
if err != nil {
return err
}
log.Info().Msgf("Manifest generated: %s", path)
}
return nil
}
func printManifests(objects []interface{}) error {
for _, object := range objects {
manifest, err := utils.PrettyYamlOmitEmpty(object)
if err != nil {
return err
}
fmt.Println(manifestSeperator)
fmt.Println(manifest)
}
return nil
}

View File

@@ -1,25 +0,0 @@
# This example shows permissions that enrich the logs with additional info
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-clusterrole
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-runner-debug-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,37 +0,0 @@
# This example shows permissions that are required for Kubeshark to resolve IPs to service names
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-clusterrole
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "create"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterrolebindings"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["", "apps", "extensions"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["services"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["endpoints"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-resolver-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,40 +0,0 @@
# This example shows the permissions that are required in order to run the `kubeshark tap` command
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-clusterrole
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "watch", "create"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "create"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["create", "patch"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["services/proxy"]
verbs: ["get", "create"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-runner-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,25 +0,0 @@
# This example shows permissions that enrich the logs with additional info in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-role
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-runner-debug-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,37 +0,0 @@
# This example shows permissions that are required for Kubeshark to resolve IPs to service names in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-role
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["rolebindings"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["", "apps", "extensions"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["services"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["endpoints"]
verbs: ["get", "list", "watch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-resolver-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,37 +0,0 @@
# This example shows the permissions that are required in order to run the `kubeshark tap` command in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "watch", "create"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "create", "delete"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["create", "patch", "delete"]
- apiGroups: [""]
resources: ["services/proxy"]
verbs: ["get", "create", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "delete"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-runner-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -40,19 +40,19 @@ func init() {
log.Debug().Err(err).Send()
}
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
proCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
func acquireLicense() {
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
log.Info().Str("url", PRO_URL).Msg("Opening in the browser:")
utils.OpenBrowser(PRO_URL)
@@ -61,17 +61,19 @@ func acquireLicense() {
}
func updateLicense(licenseKey string) {
log.Info().Str("key", licenseKey).Msg("Received license:")
config.Config.License = licenseKey
err := config.WriteConfig(&config.Config)
if err != nil {
log.Error().Err(err).Send()
}
connector.PostLicenseSingle(config.Config.License)
log.Info().Msg("Updated the license. Exiting.")
go func() {
connector.PostLicense(config.Config.License)
log.Info().Msg("Updated the license. Exiting.")
time.Sleep(2 * time.Second)
os.Exit(0)
}()
@@ -105,8 +107,6 @@ func runLicenseRecieverServer() {
licenseKey := string(data)
log.Info().Str("key", licenseKey).Msg("Received license:")
updateLicense(licenseKey)
})

View File

@@ -24,7 +24,7 @@ func init() {
log.Debug().Err(err).Send()
}
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
proxyCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
}

View File

@@ -15,7 +15,7 @@ import (
)
func runProxy(block bool, noBrowser bool) {
kubernetesProvider, err := getKubernetesProviderForCli()
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return
}
@@ -23,7 +23,7 @@ func runProxy(block bool, noBrowser bool) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.SelfNamespace, kubernetes.FrontServiceName)
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.FrontServiceName)
if err != nil {
log.Error().
Str("service", kubernetes.FrontServiceName).
@@ -42,7 +42,7 @@ func runProxy(block bool, noBrowser bool) {
return
}
exists, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.SelfNamespace, kubernetes.HubServiceName)
exists, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.HubServiceName)
if err != nil {
log.Error().
Str("service", kubernetes.HubServiceName).
@@ -63,12 +63,12 @@ func runProxy(block bool, noBrowser bool) {
var establishedProxy bool
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.HubServiceName).
Int("port", int(config.Config.Tap.Proxy.Hub.SrcPort)).
Int("port", int(config.Config.Tap.Proxy.Hub.Port)).
Msg("Found a running service.")
okToOpen("Hub", hubUrl, true)
@@ -79,8 +79,8 @@ func runProxy(block bool, noBrowser bool) {
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
connector := connect.NewConnector(hubUrl, connect.DefaultRetries, connect.DefaultTimeout)
@@ -93,12 +93,12 @@ func runProxy(block bool, noBrowser bool) {
okToOpen("Hub", hubUrl, true)
}
frontUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
frontUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port)
response, err = http.Get(fmt.Sprintf("%s/", frontUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.FrontServiceName).
Int("port", int(config.Config.Tap.Proxy.Front.SrcPort)).
Int("port", int(config.Config.Tap.Proxy.Front.Port)).
Msg("Found a running service.")
okToOpen("Kubeshark", frontUrl, noBrowser)
@@ -109,8 +109,8 @@ func runProxy(block bool, noBrowser bool) {
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
connector := connect.NewConnector(frontUrl, connect.DefaultRetries, connect.DefaultTimeout)

View File

@@ -12,10 +12,10 @@ import (
var rootCmd = &cobra.Command{
Use: "kubeshark",
Short: fmt.Sprintf("%s: The API Traffic Analyzer for Kubernetes", misc.Software),
Long: fmt.Sprintf(`%s: The API Traffic Analyzer for Kubernetes
Short: fmt.Sprintf("%s: %s", misc.Software, misc.Description),
Long: fmt.Sprintf(`%s: %s
An extensible Kubernetes-aware network sniffer and kernel tracer.
For more info: %s`, misc.Software, misc.Website),
For more info: %s`, misc.Software, misc.Description, misc.Website),
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := config.InitConfig(cmd); err != nil {
log.Fatal().Err(err).Send()
@@ -32,7 +32,6 @@ func init() {
}
rootCmd.PersistentFlags().StringSlice(config.SetCommandName, []string{}, fmt.Sprintf("Override values using --%s", config.SetCommandName))
rootCmd.PersistentFlags().String(config.ConfigFilePathCommandName, defaultConfig.ConfigFilePath, fmt.Sprintf("Override config file path using --%s", config.ConfigFilePathCommandName))
rootCmd.PersistentFlags().BoolP(config.DebugFlag, "d", false, "Enable debug mode")
}

View File

@@ -34,7 +34,7 @@ func init() {
log.Debug().Err(err).Send()
}
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
@@ -44,14 +44,14 @@ func runScripts() {
return
}
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
watchScripts(true)
}
@@ -94,13 +94,13 @@ func watchScripts(block bool) {
script, err := misc.ReadScriptFile(event.Name)
if err != nil {
log.Error().Err(err).Send()
return
continue
}
index, err := connector.PostScript(script)
if err != nil {
log.Error().Err(err).Send()
return
continue
}
files[script.Path] = index
@@ -110,13 +110,13 @@ func watchScripts(block bool) {
script, err := misc.ReadScriptFile(event.Name)
if err != nil {
log.Error().Err(err).Send()
return
continue
}
err = connector.PutScript(script, index)
if err != nil {
log.Error().Err(err).Send()
return
continue
}
case fsnotify.Rename:
@@ -124,7 +124,7 @@ func watchScripts(block bool) {
err := connector.DeleteScript(index)
if err != nil {
log.Error().Err(err).Send()
return
continue
}
default:

View File

@@ -47,15 +47,19 @@ func init() {
tapCmd.Flags().StringP(configStructs.DockerTagLabel, "t", defaultTapConfig.Docker.Tag, "The tag of the Docker images that are going to be pulled")
tapCmd.Flags().String(configStructs.DockerImagePullPolicy, defaultTapConfig.Docker.ImagePullPolicy, "ImagePullPolicy for the Docker images")
tapCmd.Flags().StringSlice(configStructs.DockerImagePullSecrets, defaultTapConfig.Docker.ImagePullSecrets, "ImagePullSecrets for the Docker images")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
tapCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
tapCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultTapConfig.Namespaces, "Namespaces selector")
tapCmd.Flags().BoolP(configStructs.AllNamespacesLabel, "A", defaultTapConfig.AllNamespaces, "Tap all namespaces")
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit. (per node)")
tapCmd.Flags().StringP(configStructs.SelfNamespaceLabel, "s", defaultTapConfig.SelfNamespace, "Self-namespace of Kubeshark")
tapCmd.Flags().Bool(configStructs.PersistentStorageLabel, defaultTapConfig.PersistentStorage, "Enable persistent storage (PersistentVolumeClaim)")
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit (per node)")
tapCmd.Flags().String(configStructs.StorageClassLabel, defaultTapConfig.StorageClass, "Override the default storage class of the PersistentVolumeClaim (per node)")
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
tapCmd.Flags().StringP(configStructs.PcapLabel, "p", defaultTapConfig.Pcap, fmt.Sprintf("Capture from a PCAP snapshot of %s (.tar.gz) using your Docker Daemon instead of Kubernetes", misc.Software))
tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS")
tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries")
tapCmd.Flags().Bool(configStructs.IgnoreTaintedLabel, defaultTapConfig.IgnoreTainted, "Ignore tainted pods while running Worker DaemonSet")
tapCmd.Flags().Bool(configStructs.IngressEnabledLabel, defaultTapConfig.Ingress.Enabled, "Enable Ingress")
tapCmd.Flags().Bool(configStructs.DebugLabel, defaultTapConfig.Debug, "Enable the debug mode")
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
@@ -141,10 +142,10 @@ func createAndStartContainers(
hostConfigFront := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Front.DstPort)): []nat.PortBinding{
nat.Port(fmt.Sprintf("%d/tcp", configStructs.ContainerPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.SrcPort),
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.Port),
},
},
},
@@ -156,7 +157,7 @@ func createAndStartContainers(
Env: []string{
"REACT_APP_DEFAULT_FILTER= ",
"REACT_APP_HUB_HOST= ",
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.SrcPort),
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.Port),
},
}, hostConfigFront, nil, nil, nameFront)
if err != nil {
@@ -165,16 +166,16 @@ func createAndStartContainers(
hostConfigHub := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): []nat.PortBinding{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort),
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port),
},
},
},
}
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.DstPort)}
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrvPort)}
if config.DebugMode {
cmdHub = append(cmdHub, fmt.Sprintf("-%s", config.DebugFlag))
}
@@ -183,13 +184,13 @@ func createAndStartContainers(
Image: imageHub,
Cmd: cmdHub,
Tty: false,
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): {}},
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): {}},
}, hostConfigHub, nil, nil, nameHub)
if err != nil {
return
}
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.DstPort)}
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.SrvPort)}
if config.DebugMode {
cmdWorker = append(cmdWorker, fmt.Sprintf("-%s", config.DebugFlag))
}
@@ -328,14 +329,19 @@ func pcap(tarPath string) {
},
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
connector.PostWorkerPodToHub(workerPod)
// License
if config.Config.License != "" {
connector.PostLicense(config.Config.License)
}
log.Info().
Str("url", kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)).
Str("url", kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)).
Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {

View File

@@ -58,11 +58,11 @@ func tap() {
log.Info().
Str("limit", config.Config.Tap.StorageLimit).
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP streams will be removed once the limit is reached.", misc.Software))
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP/UDP streams will be removed once the limit is reached.", misc.Software))
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
kubernetesProvider, err := getKubernetesProviderForCli()
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return
}
@@ -70,11 +70,11 @@ func tap() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel will be called when this function exits
state.targetNamespaces = getNamespaces(kubernetesProvider)
state.targetNamespaces = kubernetesProvider.GetNamespaces()
if config.Config.IsNsRestrictedMode() {
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.SelfNamespace) {
log.Error().Msg(fmt.Sprintf("%s can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", misc.Software, configStructs.NamespacesLabel, config.SelfNamespaceConfigName))
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.Tap.SelfNamespace) {
log.Error().Msg(fmt.Sprintf("%s can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", misc.Software, configStructs.NamespacesLabel, configStructs.SelfNamespaceLabel))
return
}
}
@@ -90,7 +90,7 @@ func tap() {
}
log.Info().Msg(fmt.Sprintf("Waiting for the creation of %s resources...", misc.Software))
if state.selfServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace, config.Config.Tap.Resources.Hub, config.Config.ImagePullPolicy(), config.Config.ImagePullSecrets(), config.Config.Tap.Debug); err != nil {
if state.selfServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, config.Config.Tap.Resources.Hub, config.Config.ImagePullPolicy(), config.Config.ImagePullSecrets(), config.Config.Tap.Debug); err != nil {
var statusError *k8serrors.StatusError
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
log.Info().Msg(fmt.Sprintf("%s is already running in this namespace, change the `selfnamespace` configuration or run `%s clean` to remove the currently running %s instance.", misc.Software, misc.Program, misc.Software))
@@ -98,7 +98,7 @@ func tap() {
log.Info().Msg("Updated Hub about the changes in the config. Exiting.")
printProxyCommandSuggestion()
} else {
defer resources.CleanUpSelfResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace)
defer resources.CleanUpSelfResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace)
log.Error().Err(errormessage.FormatError(err)).Msg("Error creating resources!")
}
@@ -113,7 +113,10 @@ func tap() {
// block until exit signal or error
utils.WaitForTermination(ctx, cancel)
printProxyCommandSuggestion()
if !config.Config.Tap.Ingress.Enabled {
printProxyCommandSuggestion()
}
}
func printProxyCommandSuggestion() {
@@ -123,7 +126,7 @@ func printProxyCommandSuggestion() {
}
func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace, true)
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, true)
}
/*
@@ -156,7 +159,7 @@ func printNoPodsFoundSuggestion(targetNamespaces []string) {
func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.HubPodName))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.SelfNamespace}, podWatchHelper)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.SelfNamespace}, podWatchHelper)
isPodReady := false
timeAfter := time.After(120 * time.Second)
@@ -223,7 +226,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
log.Error().
Str("pod", kubernetes.HubPodName).
Str("namespace", config.Config.SelfNamespace).
Str("namespace", config.Config.Tap.SelfNamespace).
Err(err).
Msg("Failed creating pod.")
cancel()
@@ -247,7 +250,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.FrontPodName))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.SelfNamespace}, podWatchHelper)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.SelfNamespace}, podWatchHelper)
isPodReady := false
timeAfter := time.After(120 * time.Second)
@@ -312,10 +315,9 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
log.Error().
Str("pod", kubernetes.FrontPodName).
Str("namespace", config.Config.SelfNamespace).
Str("namespace", config.Config.Tap.SelfNamespace).
Err(err).
Msg("Failed creating pod.")
cancel()
case <-timeAfter:
if !isPodReady {
@@ -336,7 +338,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.HubPodName))
eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, podExactRegex, "pod")
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.SelfNamespace}, eventWatchHelper)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.Tap.SelfNamespace}, eventWatchHelper)
for {
select {
case wEvent, ok := <-eventChan:
@@ -409,8 +411,8 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
@@ -420,7 +422,7 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
kubernetesProvider,
state.selfServiceAccountExists,
ctx,
config.Config.SelfNamespace,
config.Config.Tap.SelfNamespace,
config.Config.Tap.Resources.Worker,
config.Config.ImagePullPolicy(),
config.Config.ImagePullSecrets(),
@@ -431,43 +433,36 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
if err != nil {
log.Error().Err(err).Send()
}
} else {
// Pod regex
connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces)
// Grace period
log.Info().Msg("Waiting for worker containers...")
time.Sleep(5 * time.Second)
}
// License
if config.Config.License != "" {
connector.PostLicense(config.Config.License)
}
// Storage limit
connector.PostStorageLimitToHub(config.Config.Tap.StorageLimitBytes())
// Scripting
connector.PostEnv(config.Config.Scripting.Env)
// Pod regex
connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces)
// License
if config.Config.License != "" {
connector.PostLicense(config.Config.License)
}
// Scripting
connector.PostEnv(config.Config.Scripting.Env)
scripts, err := config.Config.Scripting.GetScripts()
if err != nil {
log.Error().Err(err).Send()
}
for _, script := range scripts {
_, err = connector.PostScript(script)
scripts, err := config.Config.Scripting.GetScripts()
if err != nil {
log.Error().Err(err).Send()
}
for _, script := range scripts {
_, err = connector.PostScript(script)
if err != nil {
log.Error().Err(err).Send()
}
}
connector.PostScriptDone()
}
connector.PostScriptDone()
if !update {
if !update && !config.Config.Tap.Ingress.Enabled {
// Hub proxy URL
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
}
@@ -483,29 +478,20 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
var url string
if config.Config.Tap.Ingress.Enabled {
url = fmt.Sprintf("http://%s", config.Config.Tap.Ingress.Host)
} else {
url = kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port)
}
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {
utils.OpenBrowser(url)
}
}
func getNamespaces(kubernetesProvider *kubernetes.Provider) []string {
if config.Config.Tap.AllNamespaces {
return []string{kubernetes.K8sAllNamespaces}
} else if len(config.Config.Tap.Namespaces) > 0 {
return utils.Unique(config.Config.Tap.Namespaces)
} else {
currentNamespace, err := kubernetesProvider.CurrentNamespace()
if err != nil {
log.Fatal().Err(err).Msg("Error getting current namespace!")
}
return []string{currentNamespace}
}
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"reflect"
"strconv"
@@ -51,21 +52,35 @@ func InitConfig(cmd *cobra.Command) error {
return nil
}
if cmd.Use != "console" && cmd.Use != "pro" {
if !utils.Contains([]string{
"console",
"pro",
"manifests",
}, cmd.Use) {
go version.CheckNewerVersion()
}
Config = CreateDefaultConfig()
cmdName = cmd.Name()
if utils.Contains([]string{
"clean",
"console",
"pro",
"proxy",
"scripts",
}, cmdName) {
cmdName = "tap"
}
if err := defaults.Set(&Config); err != nil {
return err
}
configFilePathFlag := cmd.Flags().Lookup(ConfigFilePathCommandName)
ConfigFilePath = configFilePathFlag.Value.String()
if err := loadConfigFile(&Config); err != nil {
if configFilePathFlag.Changed || !os.IsNotExist(err) {
ConfigFilePath = path.Join(misc.GetDotFolderPath(), "config.yaml")
if err := loadConfigFile(&Config, utils.Contains([]string{
"manifests",
}, cmd.Use)); err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("invalid config, %w\n"+
"you can regenerate the file by removing it (%v) and using `kubeshark config -r`", err, ConfigFilePath)
}
@@ -97,6 +112,14 @@ func WriteConfig(config *ConfigStruct) error {
}
data := []byte(template)
if _, err := os.Stat(ConfigFilePath); os.IsNotExist(err) {
err = os.MkdirAll(filepath.Dir(ConfigFilePath), 0700)
if err != nil {
return fmt.Errorf("failed creating directories, err: %v", err)
}
}
if err := os.WriteFile(ConfigFilePath, data, 0644); err != nil {
return fmt.Errorf("failed writing config, err: %v", err)
}
@@ -104,7 +127,7 @@ func WriteConfig(config *ConfigStruct) error {
return nil
}
func loadConfigFile(config *ConfigStruct) error {
func loadConfigFile(config *ConfigStruct, silent bool) error {
cwd, err := os.Getwd()
if err != nil {
return err
@@ -130,7 +153,9 @@ func loadConfigFile(config *ConfigStruct) error {
return err
}
log.Info().Str("path", ConfigFilePath).Msg("Found config file!")
if !silent {
log.Info().Str("path", ConfigFilePath).Msg("Found config file!")
}
return nil
}
@@ -139,9 +164,7 @@ func initFlag(f *pflag.Flag) {
configElemValue := reflect.ValueOf(&Config).Elem()
var flagPath []string
if !utils.Contains([]string{ConfigFilePathCommandName}, f.Name) {
flagPath = append(flagPath, cmdName)
}
flagPath = append(flagPath, cmdName)
flagPath = append(flagPath, strings.Split(f.Name, "-")...)

View File

@@ -2,7 +2,6 @@ package config
import (
"os"
"path"
"path/filepath"
"github.com/kubeshark/kubeshark/config/configStructs"
@@ -12,9 +11,7 @@ import (
)
const (
SelfNamespaceConfigName = "selfnamespace"
ConfigFilePathCommandName = "configpath"
KubeConfigPathConfigName = "kube-configpath"
KubeConfigPathConfigName = "kube-configpath"
)
func CreateDefaultConfig() ConfigStruct {
@@ -26,22 +23,20 @@ type KubeConfig struct {
Context string `yaml:"context"`
}
type ConfigStruct struct {
Tap configStructs.TapConfig `yaml:"tap"`
Logs configStructs.LogsConfig `yaml:"logs"`
Config configStructs.ConfigConfig `yaml:"config,omitempty"`
Kube KubeConfig `yaml:"kube"`
SelfNamespace string `yaml:"selfnamespace" default:"kubeshark"`
DumpLogs bool `yaml:"dumplogs" default:"false"`
ConfigFilePath string `yaml:"configpath,omitempty" readonly:""`
HeadlessMode bool `yaml:"headless" default:"false"`
License string `yaml:"license" default:""`
Scripting configStructs.ScriptingConfig `yaml:"scripting"`
ResourceLabels map[string]string `yaml:"resourceLabels" default:"{}"`
type ManifestsConfig struct {
Dump bool `yaml:"dump"`
}
func (config *ConfigStruct) SetDefaults() {
config.ConfigFilePath = path.Join(misc.GetDotFolderPath(), "config.yaml")
type ConfigStruct struct {
Tap configStructs.TapConfig `yaml:"tap"`
Logs configStructs.LogsConfig `yaml:"logs"`
Config configStructs.ConfigConfig `yaml:"config,omitempty"`
Kube KubeConfig `yaml:"kube"`
DumpLogs bool `yaml:"dumplogs" default:"false"`
HeadlessMode bool `yaml:"headless" default:"false"`
License string `yaml:"license" default:""`
Scripting configStructs.ScriptingConfig `yaml:"scripting"`
Manifests ManifestsConfig `yaml:"manifests,omitempty"`
}
func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy {
@@ -58,7 +53,7 @@ func (config *ConfigStruct) ImagePullSecrets() []v1.LocalObjectReference {
}
func (config *ConfigStruct) IsNsRestrictedMode() bool {
return config.SelfNamespace != misc.Program // Notice "kubeshark" string must match the default SelfNamespace
return config.Tap.SelfNamespace != misc.Program // Notice "kubeshark" string must match the default SelfNamespace
}
func (config *ConfigStruct) KubeConfigPath() string {

View File

@@ -4,8 +4,8 @@ import (
"fmt"
"regexp"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
v1 "k8s.io/api/core/v1"
networking "k8s.io/api/networking/v1"
)
const (
@@ -17,35 +17,48 @@ const (
ProxyHubPortLabel = "proxy-hub-port"
ProxyHostLabel = "proxy-host"
NamespacesLabel = "namespaces"
AllNamespacesLabel = "allnamespaces"
SelfNamespaceLabel = "selfnamespace"
PersistentStorageLabel = "persistentstorage"
StorageLimitLabel = "storagelimit"
StorageClassLabel = "storageclass"
DryRunLabel = "dryrun"
PcapLabel = "pcap"
ServiceMeshLabel = "servicemesh"
TlsLabel = "tls"
IgnoreTaintedLabel = "ignoreTainted"
IngressEnabledLabel = "ingress-enabled"
DebugLabel = "debug"
ContainerPort = 80
ContainerPortStr = "80"
)
type Resources struct {
CpuLimit string `yaml:"cpu-limit" default:"750m"`
MemoryLimit string `yaml:"memory-limit" default:"1Gi"`
CpuRequests string `yaml:"cpu-requests" default:"50m"`
MemoryRequests string `yaml:"memory-requests" default:"50Mi"`
type ResourceLimits struct {
CPU string `yaml:"cpu" default:"750m"`
Memory string `yaml:"memory" default:"1Gi"`
}
type ResourceRequests struct {
CPU string `yaml:"cpu" default:"50m"`
Memory string `yaml:"memory" default:"50Mi"`
}
type ResourceRequirements struct {
Limits ResourceLimits `json:"limits"`
Requests ResourceRequests `json:"requests"`
}
type WorkerConfig struct {
SrcPort uint16 `yaml:"port" default:"8897"`
DstPort uint16 `yaml:"srvport" default:"8897"`
SrvPort uint16 `yaml:"srvport" default:"8897"`
}
type HubConfig struct {
SrcPort uint16 `yaml:"port" default:"8898"`
DstPort uint16 `yaml:"srvport" default:"80"`
Port uint16 `yaml:"port" default:"8898"`
SrvPort uint16 `yaml:"srvport" default:"8898"`
}
type FrontConfig struct {
SrcPort uint16 `yaml:"port" default:"8899"`
DstPort uint16 `yaml:"srvport" default:"80"`
Port uint16 `yaml:"port" default:"8899"`
SrvPort uint16 `yaml:"srvport" default:"8899"`
}
type ProxyConfig struct {
@@ -63,24 +76,42 @@ type DockerConfig struct {
}
type ResourcesConfig struct {
Worker Resources `yaml:"worker"`
Hub Resources `yaml:"hub"`
Worker ResourceRequirements `yaml:"worker"`
Hub ResourceRequirements `yaml:"hub"`
}
type AuthConfig struct {
ApprovedDomains []string `yaml:"approvedDomains"`
}
type IngressConfig struct {
Enabled bool `yaml:"enabled" default:"false"`
Host string `yaml:"host" default:"ks.svc.cluster.local"`
TLS []networking.IngressTLS `yaml:"tls"`
Auth AuthConfig `yaml:"auth"`
CertManager string `yaml:"certManager" default:"letsencrypt-prod"`
}
type TapConfig struct {
Docker DockerConfig `yaml:"docker"`
Proxy ProxyConfig `yaml:"proxy"`
PodRegexStr string `yaml:"regex" default:".*"`
Namespaces []string `yaml:"namespaces"`
AllNamespaces bool `yaml:"allnamespaces" default:"false"`
StorageLimit string `yaml:"storagelimit" default:"200MB"`
DryRun bool `yaml:"dryrun" default:"false"`
Pcap string `yaml:"pcap" default:""`
Resources ResourcesConfig `yaml:"resources"`
ServiceMesh bool `yaml:"servicemesh" default:"true"`
Tls bool `yaml:"tls" default:"true"`
PacketCapture string `yaml:"packetcapture" default:"libpcap"`
Debug bool `yaml:"debug" default:"false"`
Docker DockerConfig `yaml:"docker"`
Proxy ProxyConfig `yaml:"proxy"`
PodRegexStr string `yaml:"regex" default:".*"`
Namespaces []string `yaml:"namespaces"`
SelfNamespace string `yaml:"selfnamespace" default:"kubeshark"`
PersistentStorage bool `yaml:"persistentstorage" default:"false"`
StorageLimit string `yaml:"storagelimit" default:"200Mi"`
StorageClass string `yaml:"storageclass" default:"standard"`
DryRun bool `yaml:"dryrun" default:"false"`
Pcap string `yaml:"pcap" default:""`
Resources ResourcesConfig `yaml:"resources"`
ServiceMesh bool `yaml:"servicemesh" default:"true"`
Tls bool `yaml:"tls" default:"true"`
PacketCapture string `yaml:"packetcapture" default:"libpcap"`
IgnoreTainted bool `yaml:"ignoreTainted" default:"false"`
ResourceLabels map[string]string `yaml:"resourceLabels" default:"{}"`
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeSelectorTerms" default:"[]"`
Ingress IngressConfig `yaml:"ingress"`
Debug bool `yaml:"debug" default:"false"`
}
func (config *TapConfig) PodRegex() *regexp.Regexp {
@@ -88,24 +119,11 @@ func (config *TapConfig) PodRegex() *regexp.Regexp {
return podRegex
}
func (config *TapConfig) StorageLimitBytes() int64 {
storageLimitBytes, err := utils.HumanReadableToBytes(config.StorageLimit)
if err != nil {
log.Fatal().Err(err).Send()
}
return storageLimitBytes
}
func (config *TapConfig) Validate() error {
_, compileErr := regexp.Compile(config.PodRegexStr)
if compileErr != nil {
return fmt.Errorf("%s is not a valid regex %s", config.PodRegexStr, compileErr)
}
_, parseHumanDataSizeErr := utils.HumanReadableToBytes(config.StorageLimit)
if parseHumanDataSizeErr != nil {
return fmt.Errorf("Could not parse --%s value %s", StorageLimitLabel, config.StorageLimit)
}
return nil
}

View File

@@ -6,6 +6,7 @@ import (
regexpsyntax "regexp/syntax"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/misc"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
@@ -21,9 +22,9 @@ func FormatError(err error) error {
"in the config file or setting the targeted namespace with --%s %s=<NAMEPSACE>",
err,
misc.Software,
config.SelfNamespaceConfigName,
configStructs.SelfNamespaceLabel,
config.SetCommandName,
config.SelfNamespaceConfigName)
configStructs.SelfNamespaceLabel)
} else if syntaxError, isSyntaxError := asRegexSyntaxError(err); isSyntaxError {
errorNew = fmt.Errorf("regex %s is invalid: %w", syntaxError.Expr, err)
} else {

19
go.mod
View File

@@ -4,14 +4,14 @@ go 1.17
require (
github.com/creasty/defaults v1.5.2
github.com/docker/docker v20.10.22+incompatible
github.com/docker/docker v20.10.24+incompatible
github.com/docker/go-connections v0.4.0
github.com/docker/go-units v0.4.0
github.com/fsnotify/fsnotify v1.5.1
github.com/gin-gonic/gin v1.7.7
github.com/google/go-github/v37 v37.0.0
github.com/gorilla/websocket v1.4.2
github.com/kubeshark/base v0.6.3
github.com/ohler55/ojg v1.14.5
github.com/otiai10/copy v1.10.0
github.com/robertkrimen/otto v0.2.1
github.com/rs/zerolog v1.28.0
github.com/spf13/cobra v1.3.0
@@ -38,7 +38,8 @@ require (
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/distribution v2.8.0+incompatible // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect
@@ -90,13 +91,13 @@ require (
github.com/ugorji/go/codec v1.1.7 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
go.starlark.net v0.0.0-20220203230714-bb14e151c28f // indirect
golang.org/x/crypto v0.0.0-20220208050332-20e1d8d225ab // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.2.0 // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sys v0.2.0 // indirect
golang.org/x/term v0.2.0 // indirect
golang.org/x/text v0.4.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
golang.org/x/tools v0.1.12 // indirect
google.golang.org/appengine v1.6.7 // indirect

49
go.sum
View File

@@ -27,6 +27,7 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y=
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
@@ -113,8 +114,10 @@ github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
@@ -149,10 +152,11 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.22+incompatible h1:6jX4yB+NtcbldT90k7vBSaWJDB3i+zkVJT9BEK8kQkk=
github.com/docker/docker v20.10.22+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/distribution v2.8.0+incompatible h1:l9EaZDICImO1ngI+uTifW+ZYvvz7fKISBAKpg+MbWbY=
github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE=
github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
@@ -427,8 +431,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubeshark/base v0.6.3 h1:4MpADgqG51cM4unGSe5J8HBHy9PSvzpw/kcUO5cHMG4=
github.com/kubeshark/base v0.6.3/go.mod h1:/ZzBY+5KLaC7J6QUVXtZ0HZALhMcEDrU6Waux5/bHQc=
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
@@ -503,6 +505,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/ohler55/ojg v1.14.5 h1:xCX2oyh/ZaoesbLH6fwVHStSJpk4o4eJs8ttXutzdg0=
github.com/ohler55/ojg v1.14.5/go.mod h1:7Ghirupn8NC8hSSDpI0gcjorPxj+vSVIONDWfliHR1k=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -518,6 +522,10 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/otiai10/copy v1.10.0 h1:znyI7l134wNg/wDktoVQPxPkgvhDfGCYUasey+h0rDQ=
github.com/otiai10/copy v1.10.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww=
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
@@ -627,6 +635,7 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
@@ -641,6 +650,7 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
@@ -690,8 +700,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220208050332-20e1d8d225ab h1:lnZ4LoV0UMdibeCUfIB2a4uFwRu491WX/VB2reB8xNc=
golang.org/x/crypto v0.0.0-20220208050332-20e1d8d225ab/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -782,8 +792,10 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -814,6 +826,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -897,13 +910,19 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -913,8 +932,9 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1154,6 +1174,7 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/readline.v1 v1.0.0-20160726135117-62c6fe619375/go.mod h1:lNEQeAhU009zbRxng+XOj5ITVgY24WcbNnQopyfKoYQ=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/sourcemap.v1 v1.0.5 h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI=
gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78=

25
helm-chart/Chart.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: v2
appVersion: "40.4"
description: The API Traffic Analyzer for Kubernetes
home: https://kubeshark.co
keywords:
- kubeshark
- packet capture
- traffic capture
- traffic analyzer
- network sniffer
- observability
- devops
- microservice
- forensics
- api
kubeVersion: '>= 1.16.0-0'
maintainers:
- email: info@kubeshark.co
name: Kubeshark
url: https://kubeshark.co
name: kubeshark
sources:
- https://github.com/kubeshark/kubeshark/tree/master/helm-chart
type: application
version: "40.4"

191
helm-chart/LICENSE Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2022 Kubeshark
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

71
helm-chart/README.md Normal file
View File

@@ -0,0 +1,71 @@
# Helm Chart of Kubeshark
## Officially
Add the Helm repo for Kubeshark:
```shell
helm repo add kubeshark https://helm.kubeshark.co
```
then install Kubeshark:
```shell
helm install kubeshark kubeshark/kubeshark
```
## Locally
Clone the repo:
```shell
git clone git@github.com:kubeshark/kubeshark.git --depth 1
cd kubeshark/helm-chart
```
Render the templates
```shell
helm template .
```
Install Kubeshark:
```shell
helm install kubeshark .
```
Uninstall Kubeshark:
```shell
helm uninstall kubeshark
```
## Accesing
Do the port forwarding:
```shell
kubectl port-forward -n kubeshark service/kubeshark-hub 8898:80 & \
kubectl port-forward -n kubeshark service/kubeshark-front 8899:80
```
Visit [localhost:8899](http://localhost:8899)
## Installing with Ingress Enabled
```shell
helm install kubeshark kubeshark/kubeshark \
--set tap.ingress.enabled=true \
--set tap.ingress.host=ks.svc.cluster.local \
--set "tap.ingress.auth.approvedDomains={gmail.com}" \
--set license=LICENSE_GOES_HERE
```
## Installing with Persistent Storage Enabled
```shell
helm install kubeshark kubeshark/kubeshark \
--set tap.persistentstorage=true \
--set license=LICENSE_GOES_HERE
```

View File

@@ -0,0 +1,12 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: '{{ .Values.tap.selfnamespace }}'
spec: {}
status: {}

View File

@@ -0,0 +1,12 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-service-account
namespace: '{{ .Values.tap.selfnamespace }}'

View File

@@ -0,0 +1,28 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-cluster-role
namespace: '{{ .Values.tap.selfnamespace }}'
rules:
- apiGroups:
- ""
- extensions
- apps
- networking.k8s.io
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- ingresses
verbs:
- list
- get
- watch

View File

@@ -0,0 +1,20 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-cluster-role-binding
namespace: '{{ .Values.tap.selfnamespace }}'
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeshark-cluster-role
subjects:
- kind: ServiceAccount
name: kubeshark-service-account
namespace: '{{ .Values.tap.selfnamespace }}'

View File

@@ -0,0 +1,51 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-hub
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-hub
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
containers:
- command:
- '{{ .Values.tap.debug | ternary "./hub -debug" "./hub" }}'
env:
- name: POD_REGEX
value: '{{ .Values.tap.regex }}'
- name: NAMESPACES
value: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}'
- name: LICENSE
value: '{{ .Values.license }}'
- name: SCRIPTING_ENV
value: '{}'
- name: SCRIPTING_SCRIPTS
value: '[]'
- name: AUTH_APPROVED_DOMAINS
value: '{{ gt (len .Values.tap.ingress.auth.approvedDomains) 0 | ternary (join "," .Values.tap.ingress.auth.approvedDomains) "" }}'
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
name: kubeshark-hub
ports:
- containerPort: 80
hostPort: {{ .Values.tap.proxy.hub.srvport }}
resources:
limits:
cpu: '{{ .Values.tap.resources.hub.limits.cpu }}'
memory: '{{ .Values.tap.resources.hub.limits.memory }}'
requests:
cpu: '{{ .Values.tap.resources.hub.requests.cpu }}'
memory: '{{ .Values.tap.resources.hub.requests.memory }}'
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}

View File

@@ -0,0 +1,21 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-hub
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
ports:
- name: kubeshark-hub
port: 80
targetPort: 80
selector:
app: kubeshark-hub
type: NodePort
status:
loadBalancer: {}

View File

@@ -0,0 +1,50 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-front
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-front
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
containers:
- env:
- name: REACT_APP_DEFAULT_FILTER
value: ' '
- name: REACT_APP_HUB_HOST
value: ' '
- name: REACT_APP_HUB_PORT
value: '{{ .Values.tap.ingress.enabled | ternary "80/api" "8898" }}'
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
name: kubeshark-front
ports:
- containerPort: 80
hostPort: {{ .Values.tap.proxy.front.srvport }}
readinessProbe:
failureThreshold: 3
periodSeconds: 1
successThreshold: 1
tcpSocket:
port: 80
timeoutSeconds: 1
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}

View File

@@ -0,0 +1,21 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-front
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
ports:
- name: kubeshark-front
port: 80
targetPort: 80
selector:
app: kubeshark-front
type: NodePort
status:
loadBalancer: {}

View File

@@ -0,0 +1,22 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
{{- if .Values.tap.persistentstorage }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-persistent-volume-claim
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: '{{ .Values.tap.storagelimit }}'
storageClassName: '{{ .Values.tap.storageclass }}'
status: {}
{{- end }}

View File

@@ -0,0 +1,97 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-worker-daemon-set
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
selector:
matchLabels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
template:
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
containers:
- command:
- '{{ .Values.tap.debug | ternary "./worker -debug" "./worker" }}'
- -i
- any
- -port
- '{{ .Values.tap.proxy.worker.srvport }}'
- -packet-capture
- '{{ .Values.tap.packetcapture }}'
- -servicemesh
- -tls
- -procfs
- /hostproc
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
name: kubeshark-worker-daemon-set
ports:
- containerPort: {{ .Values.tap.proxy.worker.srvport }}
hostPort: {{ .Values.tap.proxy.worker.srvport }}
resources:
limits:
cpu: '{{ .Values.tap.resources.worker.limits.cpu }}'
memory: '{{ .Values.tap.resources.worker.limits.memory }}'
requests:
cpu: '{{ .Values.tap.resources.worker.requests.cpu }}'
memory: '{{ .Values.tap.resources.worker.requests.memory }}'
securityContext:
capabilities:
add:
- NET_RAW
- NET_ADMIN
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
drop:
- ALL
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
{{- if .Values.tap.persistentstorage }}
- mountPath: /app/data
name: kubeshark-persistent-volume
{{- end }}
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
- hostPath:
path: /proc
name: proc
- hostPath:
path: /sys
name: sys
{{- if .Values.tap.persistentstorage }}
- name: kubeshark-persistent-volume
persistentVolumeClaim:
claimName: kubeshark-persistent-volume-claim
{{- end }}

View File

@@ -0,0 +1,16 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
{{- if .Values.tap.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress-class
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
controller: k8s.io/ingress-nginx
{{- end }}

View File

@@ -0,0 +1,40 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
{{- if .Values.tap.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
certmanager.k8s.io/cluster-issuer: '{{ .Values.tap.ingress.certManager }}'
nginx.ingress.kubernetes.io/rewrite-target: /$2
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
ingressClassName: kubeshark-ingress-class
rules:
- host: '{{ .Values.tap.ingress.host }}'
http:
paths:
- backend:
service:
name: kubeshark-hub
port:
number: 80
path: /api(/|$)(.*)
pathType: Prefix
- backend:
service:
name: kubeshark-front
port:
number: 80
path: /()(.*)
pathType: Prefix
tls: {{ .Values.tap.ingress.tls | toYaml }}
status:
loadBalancer: {}
{{- end }}

65
helm-chart/values.yaml Normal file
View File

@@ -0,0 +1,65 @@
tap:
docker:
registry: docker.io/kubeshark
tag: latest
imagepullpolicy: Always
imagepullsecrets: []
proxy:
worker:
srvport: 8897
hub:
port: 8898
srvport: 8898
front:
port: 8899
srvport: 8899
host: 127.0.0.1
regex: .*
namespaces: []
selfnamespace: kubeshark
persistentstorage: false
storagelimit: 200Mi
storageclass: standard
dryrun: false
pcap: ""
resources:
worker:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
hub:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
servicemesh: true
tls: true
packetcapture: libpcap
ignoreTainted: false
resourceLabels: {}
nodeSelectorTerms: []
ingress:
enabled: false
host: ks.svc.cluster.local
tls: []
auth:
approvedDomains: []
certManager: letsencrypt-prod
debug: false
logs:
file: ""
kube:
configpath: ""
context: ""
dumplogs: false
headless: false
license: ""
scripting:
env: {}
source: ""
watchScripts: true

View File

@@ -87,36 +87,6 @@ func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
}
}
type postStorageLimit struct {
Limit int64 `json:"limit"`
}
func (connector *Connector) PostStorageLimitToHub(limit int64) {
payload := &postStorageLimit{
Limit: limit,
}
postStorageLimitUrl := fmt.Sprintf("%s/pcaps/set-storage-limit", connector.url)
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the storage limit:")
} else {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postStorageLimitUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed sending the storage limit to Hub. Retrying...")
} else {
log.Debug().Int("limit", int(limit)).Msg("Reported storage limit to Hub:")
return
}
time.Sleep(DefaultSleep)
}
}
}
type postRegexRequest struct {
Regex string `json:"regex"`
Namespaces []string `json:"namespaces"`
@@ -181,6 +151,26 @@ func (connector *Connector) PostLicense(license string) {
}
}
func (connector *Connector) PostLicenseSingle(license string) {
postLicenseUrl := fmt.Sprintf("%s/license", connector.url)
payload := postLicenseRequest{
License: license,
}
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the payload:")
} else {
var resp *http.Response
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
log.Warn().Err(err).Msg("Failed sending the license to Hub.")
} else {
log.Debug().Str("license", license).Msg("Reported license to Hub:")
return
}
}
}
func (connector *Connector) PostEnv(env map[string]interface{}) {
if len(env) == 0 {
return

13
kubectl.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
# Useful kubectl commands for Kubeshark development
# This command outputs all Kubernetes resources using YAML format and pipes it to VS Code
if [ $1 = "view-all-resources" ] ; then
kubectl get $(kubectl api-resources | awk '{print $1}' | tail -n +2 | tr '\n' ',' | sed s/,\$//) -o yaml | code -
fi
# This command outputs all Kubernetes resources in "kubeshark" namespace using YAML format and pipes it to VS Code
if [[ $1 = "view-kubeshark-resources" ]] ; then
kubectl get $(kubectl api-resources | awk '{print $1}' | tail -n +2 | tr '\n' ',' | sed s/,\$//) -n kubeshark -o yaml | code -
fi

View File

@@ -14,11 +14,15 @@ const (
ServiceAccountName = SelfResourcesPrefix + "service-account"
WorkerDaemonSetName = SelfResourcesPrefix + "worker-daemon-set"
WorkerPodName = SelfResourcesPrefix + "worker"
PersistentVolumeName = SelfResourcesPrefix + "persistent-volume"
PersistentVolumeClaimName = SelfResourcesPrefix + "persistent-volume-claim"
IngressName = SelfResourcesPrefix + "ingress"
IngressClassName = SelfResourcesPrefix + "ingress-class"
PersistentVolumeHostPath = "/app/data"
MinKubernetesServerVersion = "1.16.0"
)
const (
LabelPrefixApp = "app.kubernetes.io/"
LabelManagedBy = LabelPrefixApp + "managed-by"
LabelCreatedBy = LabelPrefixApp + "created-by"
LabelManagedBy = SelfResourcesPrefix + "managed-by"
LabelCreatedBy = SelfResourcesPrefix + "created-by"
)

File diff suppressed because it is too large Load Diff

147
kubernetes/types.go Normal file
View File

@@ -0,0 +1,147 @@
package kubernetes
import (
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
applyconfapp "k8s.io/client-go/applyconfigurations/apps/v1"
applyconfcore "k8s.io/client-go/applyconfigurations/core/v1"
v1 "k8s.io/client-go/applyconfigurations/core/v1"
applyconfmeta "k8s.io/client-go/applyconfigurations/meta/v1"
)
type DaemonSetPod struct {
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Spec core.PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
type DaemonSetSpec struct {
Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"`
Template DaemonSetPod `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
}
type DaemonSet struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
func (d *DaemonSet) GenerateApplyConfiguration(name string, namespace string, podName string, provider *Provider) *applyconfapp.DaemonSetApplyConfiguration {
// Pod
p := d.Spec.Template.Spec
podSpec := applyconfcore.PodSpec()
podSpec.WithHostNetwork(p.HostNetwork)
podSpec.WithDNSPolicy(p.DNSPolicy)
podSpec.WithTerminationGracePeriodSeconds(*p.TerminationGracePeriodSeconds)
podSpec.WithServiceAccountName(p.ServiceAccountName)
// Containers
for _, c := range d.Spec.Template.Spec.Containers {
// Common
container := applyconfcore.Container()
container.WithName(c.Name)
container.WithImage(c.Image)
container.WithImagePullPolicy(c.ImagePullPolicy)
container.WithCommand(c.Command...)
// Linux capabilities
caps := applyconfcore.Capabilities().WithAdd(c.SecurityContext.Capabilities.Add...).WithDrop(c.SecurityContext.Capabilities.Drop...)
container.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
// Environment variables
var envvars []*v1.EnvVarApplyConfiguration
for _, e := range c.Env {
envvars = append(envvars, applyconfcore.EnvVar().WithName(e.Name).WithValue(e.Value))
}
container.WithEnv(envvars...)
// Resource limits
resources := applyconfcore.ResourceRequirements().WithRequests(c.Resources.Requests).WithLimits(c.Resources.Limits)
container.WithResources(resources)
// Volume mounts
for _, m := range c.VolumeMounts {
volumeMount := applyconfcore.VolumeMount().WithName(m.Name).WithMountPath(m.MountPath).WithReadOnly(m.ReadOnly)
container.WithVolumeMounts(volumeMount)
}
podSpec.WithContainers(container)
}
// Node affinity (RequiredDuringSchedulingIgnoredDuringExecution only)
if p.Affinity != nil {
nodeSelector := applyconfcore.NodeSelector()
for _, term := range p.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
nodeSelectorTerm := applyconfcore.NodeSelectorTerm()
for _, selector := range term.MatchExpressions {
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
nodeSelectorRequirement.WithKey(selector.Key)
nodeSelectorRequirement.WithOperator(selector.Operator)
nodeSelectorRequirement.WithValues(selector.Values...)
nodeSelectorTerm.WithMatchExpressions(nodeSelectorRequirement)
}
for _, selector := range term.MatchFields {
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
nodeSelectorRequirement.WithKey(selector.Key)
nodeSelectorRequirement.WithOperator(selector.Operator)
nodeSelectorRequirement.WithValues(selector.Values...)
nodeSelectorTerm.WithMatchFields(nodeSelectorRequirement)
}
nodeSelector.WithNodeSelectorTerms(nodeSelectorTerm)
}
nodeAffinity := applyconfcore.NodeAffinity()
nodeAffinity.WithRequiredDuringSchedulingIgnoredDuringExecution(nodeSelector)
affinity := applyconfcore.Affinity()
affinity.WithNodeAffinity(nodeAffinity)
podSpec.WithAffinity(affinity)
}
// Tolerations
for _, t := range p.Tolerations {
toleration := applyconfcore.Toleration()
toleration.WithKey(t.Key)
toleration.WithOperator(t.Operator)
toleration.WithValue(t.Value)
toleration.WithEffect(t.Effect)
if t.TolerationSeconds != nil {
toleration.WithTolerationSeconds(*t.TolerationSeconds)
}
podSpec.WithTolerations(toleration)
}
// Volumes
for _, v := range p.Volumes {
volume := applyconfcore.Volume()
if v.HostPath != nil {
volume.WithName(v.Name).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath(v.HostPath.Path))
}
if v.PersistentVolumeClaim != nil {
volume.WithName(v.Name).WithPersistentVolumeClaim(applyconfcore.PersistentVolumeClaimVolumeSource().WithClaimName(v.PersistentVolumeClaim.ClaimName))
}
podSpec.WithVolumes(volume)
}
// Image pull secrets
if len(p.ImagePullSecrets) > 0 {
localObjectReference := applyconfcore.LocalObjectReference()
for _, o := range p.ImagePullSecrets {
localObjectReference.WithName(o.Name)
}
podSpec.WithImagePullSecrets(localObjectReference)
}
podTemplate := applyconfcore.PodTemplateSpec()
podTemplate.WithLabels(buildWithDefaultLabels(map[string]string{
"app": podName,
}, provider))
podTemplate.WithSpec(podSpec)
labelSelector := applyconfmeta.LabelSelector()
labelSelector.WithMatchLabels(map[string]string{"app": podName})
daemonSet := applyconfapp.DaemonSet(name, namespace)
daemonSet.
WithLabels(buildWithDefaultLabels(map[string]string{}, provider)).
WithSpec(applyconfapp.DaemonSetSpec().WithSelector(labelSelector).WithTemplate(podTemplate))
return daemonSet
}

View File

@@ -1,65 +1,14 @@
package kubernetes
import (
"github.com/kubeshark/base/pkg/models"
"github.com/kubeshark/kubeshark/config"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func GetNodeHostToTargetedPodsMap(targetedPods []core.Pod) models.NodeToPodsMap {
nodeToTargetedPodsMap := make(models.NodeToPodsMap)
for _, pod := range targetedPods {
minimizedPod := getMinimizedPod(pod)
existingList := nodeToTargetedPodsMap[pod.Spec.NodeName]
if existingList == nil {
nodeToTargetedPodsMap[pod.Spec.NodeName] = []core.Pod{minimizedPod}
} else {
nodeToTargetedPodsMap[pod.Spec.NodeName] = append(nodeToTargetedPodsMap[pod.Spec.NodeName], minimizedPod)
}
}
return nodeToTargetedPodsMap
}
func getMinimizedPod(fullPod core.Pod) core.Pod {
return core.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fullPod.Name,
Namespace: fullPod.Namespace,
},
Status: core.PodStatus{
PodIP: fullPod.Status.PodIP,
ContainerStatuses: getMinimizedContainerStatuses(fullPod),
},
}
}
func getMinimizedContainerStatuses(fullPod core.Pod) []core.ContainerStatus {
result := make([]core.ContainerStatus, len(fullPod.Status.ContainerStatuses))
for i, container := range fullPod.Status.ContainerStatuses {
result[i] = core.ContainerStatus{
ContainerID: container.ContainerID,
}
}
return result
}
func GetPodInfosForPods(pods []core.Pod) []*models.PodInfo {
podInfos := make([]*models.PodInfo, 0)
for _, pod := range pods {
podInfos = append(podInfos, &models.PodInfo{Name: pod.Name, Namespace: pod.Namespace, NodeName: pod.Spec.NodeName})
}
return podInfos
}
func buildWithDefaultLabels(labels map[string]string, provider *Provider) map[string]string {
labels["LabelManagedBy"] = provider.managedBy
labels["LabelCreatedBy"] = provider.createdBy
labels[LabelManagedBy] = provider.managedBy
labels[LabelCreatedBy] = provider.createdBy
for k, v := range config.Config.ResourceLabels {
for k, v := range config.Config.Tap.ResourceLabels {
labels[k] = v
}

View File

@@ -3,6 +3,7 @@ package kubernetes
import (
"context"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/rs/zerolog/log"
@@ -14,13 +15,28 @@ func CreateWorkers(
selfServiceAccountExists bool,
ctx context.Context,
namespace string,
resources configStructs.Resources,
resources configStructs.ResourceRequirements,
imagePullPolicy core.PullPolicy,
imagePullSecrets []core.LocalObjectReference,
serviceMesh bool,
tls bool,
debug bool,
) error {
if config.Config.Tap.PersistentStorage {
persistentVolumeClaim, err := kubernetesProvider.BuildPersistentVolumeClaim()
if err != nil {
return err
}
if _, err = kubernetesProvider.CreatePersistentVolumeClaim(
ctx,
namespace,
persistentVolumeClaim,
); err != nil {
return err
}
}
image := docker.GetWorkerImage()
var serviceAccountName string

View File

@@ -0,0 +1,12 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark
spec: {}
status: {}

View File

@@ -0,0 +1,12 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-service-account
namespace: kubeshark

View File

@@ -0,0 +1,28 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-cluster-role
namespace: kubeshark
rules:
- apiGroups:
- ""
- extensions
- apps
- networking.k8s.io
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- ingresses
verbs:
- list
- get
- watch

View File

@@ -0,0 +1,20 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-cluster-role-binding
namespace: kubeshark
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeshark-cluster-role
subjects:
- kind: ServiceAccount
name: kubeshark-service-account
namespace: kubeshark

48
manifests/04-hub-pod.yaml Normal file
View File

@@ -0,0 +1,48 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-hub
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-hub
namespace: kubeshark
spec:
containers:
- command:
- ./hub
env:
- name: POD_REGEX
value: .*
- name: NAMESPACES
- name: LICENSE
- name: SCRIPTING_ENV
value: '{}'
- name: SCRIPTING_SCRIPTS
value: '[]'
- name: AUTH_APPROVED_DOMAINS
image: docker.io/kubeshark/hub:latest
imagePullPolicy: Always
name: kubeshark-hub
ports:
- containerPort: 80
hostPort: 8898
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}

View File

@@ -0,0 +1,21 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-hub
namespace: kubeshark
spec:
ports:
- name: kubeshark-hub
port: 80
targetPort: 80
selector:
app: kubeshark-hub
type: NodePort
status:
loadBalancer: {}

View File

@@ -0,0 +1,50 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-front
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-front
namespace: kubeshark
spec:
containers:
- env:
- name: REACT_APP_DEFAULT_FILTER
value: ' '
- name: REACT_APP_HUB_HOST
value: ' '
- name: REACT_APP_HUB_PORT
value: "8898"
image: docker.io/kubeshark/front:latest
imagePullPolicy: Always
name: kubeshark-front
ports:
- containerPort: 80
hostPort: 8899
readinessProbe:
failureThreshold: 3
periodSeconds: 1
successThreshold: 1
tcpSocket:
port: 80
timeoutSeconds: 1
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}

View File

@@ -0,0 +1,21 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-front
namespace: kubeshark
spec:
ports:
- name: kubeshark-front
port: 80
targetPort: 80
selector:
app: kubeshark-front
type: NodePort
status:
loadBalancer: {}

View File

@@ -0,0 +1,20 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-persistent-volume-claim
namespace: kubeshark
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 200Mi
storageClassName: standard
status: {}

View File

@@ -0,0 +1,93 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
selector:
matchLabels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
template:
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
containers:
- command:
- ./worker
- -i
- any
- -port
- "8897"
- -packet-capture
- libpcap
- -servicemesh
- -tls
- -procfs
- /hostproc
image: docker.io/kubeshark/worker:latest
imagePullPolicy: Always
name: kubeshark-worker-daemon-set
ports:
- containerPort: 8897
hostPort: 8897
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
securityContext:
capabilities:
add:
- NET_RAW
- NET_ADMIN
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
drop:
- ALL
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
- mountPath: /app/data
name: kubeshark-persistent-volume
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
- hostPath:
path: /proc
name: proc
- hostPath:
path: /sys
name: sys
- name: kubeshark-persistent-volume
persistentVolumeClaim:
claimName: kubeshark-persistent-volume-claim

View File

@@ -0,0 +1,14 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress-class
namespace: kubeshark
spec:
controller: k8s.io/ingress-nginx

37
manifests/11-ingress.yaml Normal file
View File

@@ -0,0 +1,37 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
certmanager.k8s.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/rewrite-target: /$2
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-ingress
namespace: kubeshark
spec:
ingressClassName: kubeshark-ingress-class
rules:
- host: ks.svc.cluster.local
http:
paths:
- backend:
service:
name: kubeshark-hub
port:
number: 80
path: /api(/|$)(.*)
pathType: Prefix
- backend:
service:
name: kubeshark-front
port:
number: 80
path: /()(.*)
pathType: Prefix
status:
loadBalancer: {}

35
manifests/README.md Normal file
View File

@@ -0,0 +1,35 @@
# Manifests
## Apply
Clone the repo:
```shell
git clone git@github.com:kubeshark/kubeshark.git --depth 1
cd kubeshark/manifests
```
To apply the manifests, run:
```shell
kubectl apply -f .
```
To clean up:
```shell
kubectl delete namespace kubeshark
kubectl delete clusterrolebinding kubeshark-cluster-role-binding
kubectl delete clusterrole kubeshark-cluster-role
```
## Accesing
Do the port forwarding:
```shell
kubectl port-forward -n kubeshark service/kubeshark-hub 8898:80 & \
kubectl port-forward -n kubeshark service/kubeshark-front 8899:80
```
Visit [localhost:8899](http://localhost:8899)

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: kubeshark-tls
namespace: default
spec:
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
secretName: cert-kubeshark
dnsNames:
- ks.svc.cluster.local

View File

@@ -0,0 +1,14 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: info@kubeshark.com
privateKeySecretRef:
name: letsencrypt-prod-key
solvers:
- http01:
ingress:
class: kubeshark-ingress-class

15
manifests/tls/run.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
__dir="$(cd -P -- "$(dirname -- "$0")" && pwd -P)"
helm repo add jetstack https://charts.jetstack.io
helm repo update
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.crds.yaml
helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.9.1
kubectl apply -f ${__dir}/cluster-issuer.yaml
kubectl apply -f ${__dir}/certificate.yaml

View File

@@ -9,7 +9,9 @@ import (
var (
Software = "Kubeshark"
Program = "kubeshark"
Description = "The API Traffic Analyzer for Kubernetes"
Website = "https://kubeshark.co"
Email = "info@kubeshark.co"
Ver = "0.0"
Branch = "develop"
GitCommitHash = "" // this var is overridden using ldflags in makefile when building

22
misc/fsUtils/globUtils.go Normal file
View File

@@ -0,0 +1,22 @@
package fsUtils
import (
"fmt"
"os"
"path/filepath"
)
func RemoveFilesByExtension(dirPath string, ext string) error {
files, err := filepath.Glob(filepath.Join(dirPath, fmt.Sprintf("/*.%s", ext)))
if err != nil {
return err
}
for _, f := range files {
if err := os.Remove(f); err != nil {
return err
}
}
return nil
}

View File

@@ -15,13 +15,13 @@ import (
func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath string) error {
podExactRegex := regexp.MustCompile("^" + kubernetes.SelfResourcesPrefix)
pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.SelfNamespace})
pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.Tap.SelfNamespace})
if err != nil {
return err
}
if len(pods) == 0 {
return fmt.Errorf("No %s pods found in namespace %s", misc.Software, config.Config.SelfNamespace)
return fmt.Errorf("No %s pods found in namespace %s", misc.Software, config.Config.Tap.SelfNamespace)
}
newZipFile, err := os.Create(filePath)
@@ -60,23 +60,23 @@ func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath strin
}
}
events, err := provider.GetNamespaceEvents(ctx, config.Config.SelfNamespace)
events, err := provider.GetNamespaceEvents(ctx, config.Config.Tap.SelfNamespace)
if err != nil {
log.Error().Err(err).Msg("Failed to get k8b events!")
} else {
log.Debug().Str("namespace", config.Config.SelfNamespace).Msg("Successfully read events.")
log.Debug().Str("namespace", config.Config.Tap.SelfNamespace).Msg("Successfully read events.")
}
if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.SelfNamespace)); err != nil {
if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.Tap.SelfNamespace)); err != nil {
log.Error().Err(err).Msg("Failed write logs!")
} else {
log.Debug().Str("namespace", config.Config.SelfNamespace).Msg("Successfully added events.")
log.Debug().Str("namespace", config.Config.Tap.SelfNamespace).Msg("Successfully added events.")
}
if err := AddFileToZip(zipWriter, config.Config.ConfigFilePath); err != nil {
if err := AddFileToZip(zipWriter, config.ConfigFilePath); err != nil {
log.Error().Err(err).Msg("Failed write file!")
} else {
log.Debug().Str("file-path", config.Config.ConfigFilePath).Msg("Successfully added file.")
log.Debug().Str("file-path", config.ConfigFilePath).Msg("Successfully added file.")
}
log.Info().Str("path", filePath).Msg("You can find the ZIP file with all logs at:")

View File

@@ -44,7 +44,7 @@ func CheckNewerVersion() {
} else {
downloadCommand = fmt.Sprintf("sh <(curl -Ls %s/install)", misc.Website)
}
msg := fmt.Sprintf("There is a new release! %v -> %v run:", misc.Ver, latestVersion)
msg := fmt.Sprintf("There is a new release! %v -> %v Please upgrade to the latest release, as new releases are not always backward compatible. Run:", misc.Ver, latestVersion)
log.Warn().Str("command", downloadCommand).Msg(fmt.Sprintf(utils.Yellow, msg))
}
}

View File

@@ -35,6 +35,11 @@ func CleanUpSelfResources(ctx context.Context, cancel context.CancelFunc, kubern
func cleanUpNonRestrictedMode(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string {
leftoverResources := make([]string, 0)
if err := kubernetesProvider.RemoveIngressClass(ctx, kubernetes.IngressClassName); err != nil {
resourceDesc := kubernetes.IngressClassName
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemoveNamespace(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("Namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
@@ -108,6 +113,11 @@ func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.P
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemovePersistentVolumeClaim(ctx, selfResourcesNamespace, kubernetes.PersistentVolumeClaimName); err != nil {
resourceDesc := fmt.Sprintf("Persistent Volume %s in namespace %s", kubernetes.PersistentVolumeClaimName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemoveDaemonSet(ctx, selfResourcesNamespace, kubernetes.WorkerDaemonSetName); err != nil {
resourceDesc := fmt.Sprintf("DaemonSet %s in namespace %s", kubernetes.WorkerDaemonSetName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)

View File

@@ -14,30 +14,25 @@ import (
core "k8s.io/api/core/v1"
)
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, hubResources configStructs.Resources, imagePullPolicy core.PullPolicy, imagePullSecrets []core.LocalObjectReference, debug bool) (bool, error) {
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, hubResources configStructs.ResourceRequirements, imagePullPolicy core.PullPolicy, imagePullSecrets []core.LocalObjectReference, debug bool) (bool, error) {
if !isNsRestrictedMode {
if err := createSelfNamespace(ctx, kubernetesProvider, selfNamespace); err != nil {
log.Debug().Err(err).Send()
}
}
selfServiceAccountExists, err := createRBACIfNecessary(ctx, kubernetesProvider, isNsRestrictedMode, selfNamespace, []string{"pods", "services", "endpoints"})
err := kubernetesProvider.CreateSelfRBAC(ctx, selfNamespace)
var selfServiceAccountExists bool
if err != nil {
selfServiceAccountExists = true
log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Failed to ensure the resources required for IP resolving. %s will not resolve target IPs to names.", misc.Software))
}
var serviceAccountName string
if selfServiceAccountExists {
serviceAccountName = kubernetes.ServiceAccountName
} else {
serviceAccountName = ""
}
opts := &kubernetes.PodOptions{
hubOpts := &kubernetes.PodOptions{
Namespace: selfNamespace,
PodName: kubernetes.HubPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: serviceAccountName,
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
ImagePullSecrets: imagePullSecrets,
@@ -48,14 +43,14 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
Namespace: selfNamespace,
PodName: kubernetes.FrontPodName,
PodImage: docker.GetWorkerImage(),
ServiceAccountName: serviceAccountName,
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Debug: debug,
}
if err := createSelfHubPod(ctx, kubernetesProvider, opts); err != nil {
if err := createSelfHubPod(ctx, kubernetesProvider, hubOpts); err != nil {
return selfServiceAccountExists, err
}
@@ -63,43 +58,40 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
return selfServiceAccountExists, err
}
// TODO: Why the port values need to be 80?
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.HubServiceName, kubernetes.HubServiceName, 80, 80)
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildHubService(selfNamespace))
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("service", kubernetes.HubServiceName).Msg("Successfully created a service.")
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.FrontServiceName, kubernetes.FrontServiceName, 80, int32(config.Config.Tap.Proxy.Front.DstPort))
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildFrontService(selfNamespace))
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("service", kubernetes.FrontServiceName).Msg("Successfully created a service.")
if config.Config.Tap.Ingress.Enabled {
_, err = kubernetesProvider.CreateIngressClass(ctx, kubernetesProvider.BuildIngressClass())
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("ingress-class", kubernetes.IngressClassName).Msg("Successfully created an ingress class.")
_, err = kubernetesProvider.CreateIngress(ctx, selfNamespace, kubernetesProvider.BuildIngress())
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("ingress", kubernetes.IngressName).Msg("Successfully created an ingress.")
}
return selfServiceAccountExists, nil
}
func createSelfNamespace(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfNamespace string) error {
_, err := kubernetesProvider.CreateNamespace(ctx, selfNamespace)
_, err := kubernetesProvider.CreateNamespace(ctx, kubernetesProvider.BuildNamespace(selfNamespace))
return err
}
func createRBACIfNecessary(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, resources []string) (bool, error) {
if !isNsRestrictedMode {
if err := kubernetesProvider.CreateSelfRBAC(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.ClusterRoleName, kubernetes.ClusterRoleBindingName, misc.RBACVersion, resources); err != nil {
return false, err
}
} else {
if err := kubernetesProvider.CreateSelfRBACNamespaceRestricted(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.RoleName, kubernetes.RoleBindingName, misc.RBACVersion); err != nil {
return false, err
}
}
return true, nil
}
func createSelfHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
pod, err := kubernetesProvider.BuildHubPod(opts)
if err != nil {
@@ -113,7 +105,7 @@ func createSelfHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provid
}
func createFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
pod, err := kubernetesProvider.BuildFrontPod(opts, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
pod, err := kubernetesProvider.BuildFrontPod(opts, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port))
if err != nil {
return err
}

View File

@@ -27,7 +27,7 @@ func PrettyJson(data interface{}) (string, error) {
func PrettyYaml(data interface{}) (string, error) {
buffer := new(bytes.Buffer)
encoder := yaml.NewEncoder(buffer)
encoder.SetIndent(0)
encoder.SetIndent(2)
err := encoder.Encode(data)
if err != nil {
@@ -35,3 +35,18 @@ func PrettyYaml(data interface{}) (string, error) {
}
return buffer.String(), nil
}
func PrettyYamlOmitEmpty(data interface{}) (string, error) {
d, err := json.Marshal(data)
if err != nil {
return empty, err
}
var cleanData map[string]interface{}
err = json.Unmarshal(d, &cleanData)
if err != nil {
return empty, err
}
return PrettyYaml(cleanData)
}

View File

@@ -1,7 +0,0 @@
package utils
import "github.com/docker/go-units"
func HumanReadableToBytes(humanReadableSize string) (int64, error) {
return units.FromHumanSize(humanReadableSize)
}