Compare commits

...

160 Commits
40.0 ... 50.0

Author SHA1 Message Date
M. Mert Yildiran
d00d2eafa7 🔖 Bump the Helm chart version to 50.0 2023-08-22 23:25:48 +03:00
M. Mert Yildiran
63eb39b451 🚑 Fix the pod regex in the watch function for the recent changes related to pod names 2023-08-22 23:24:40 +03:00
M. Mert Yildiran
149a8b7efe 🔧 Remove the KMM related Makefile rules 2023-08-22 19:02:39 +03:00
M. Mert Yildiran
247fbc1291 🔥 Delete the module loader Dockerfile 2023-08-22 19:02:22 +03:00
M. Mert Yildiran
0e74238e56 🚀 Rename some of the recently added Kubernetes resources 2023-08-22 19:00:22 +03:00
M. Mert Yildiran
05ecef557f 🔧 Run make generate-manifests 2023-08-22 18:54:25 +03:00
Luiz Oliveira
63325ec890 🚀 Add readiness and liveness probes to worker DaemonSet (#1414)
Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
2023-08-19 20:32:49 +03:00
M. Mert Yildiran
579cb47ecf 🔥 networking.k8s.io from apiGroups and ingresses from resources in ClusterRole 2023-08-17 17:29:54 +03:00
M. Mert Yildiran
7ed4088b4b Load the environment variables from kubeshark-hub-secret in worker DaemonSet 2023-08-17 00:56:16 +03:00
Luiz Oliveira
f95db49317 🚀 Change Hub's and Front's resource type from Pod to Deployment (#1412)
* change services to ClusterIP and update selector labels

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* replace kind of hub and front to Deployments

Pod -> Deployments
hub config -> Uses a config-map
license -> Ises a secret

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* uses map of labels to select pods and services

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* remove ListAllNamespaces method

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* include livenessProbe and readinessProbe for deployments

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

---------

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
2023-08-16 02:35:31 +03:00
M. Mert Yildiran
749b19512e Bring back the app labels 2023-08-15 18:33:00 +03:00
M. Mert Yildiran
746eff1e23 🔥 Remove the dead code in kubernetes package 2023-08-15 17:46:23 +03:00
M. Mert Yildiran
b7a8d9a41a Fix the label order 2023-08-15 17:44:39 +03:00
Luiz Oliveira
995fb96f24 🎨 Rename worker labels to the same pattern just like the other resources (#1410)
* rename worker labels to the same pattern from others kubeshark components

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* update matchLabels from daemonsets

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

---------

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
2023-08-15 16:56:43 +03:00
M. Mert Yildiran
5d4557d1dd Add SYS_MODULE Linux capability to the worker DaemonSet 2023-08-14 17:49:14 +03:00
M. Mert Yildiran
78c1c02fe6 🔥 Delete the recently added KMM related resources 2023-08-14 17:43:44 +03:00
M. Mert Yildiran
742a56272b 👕 Fix the linter error 2023-08-12 03:36:01 +03:00
M. Mert Yildiran
b7b3603e57 Add cert-manager Helm dependency 2023-08-12 03:29:12 +03:00
M. Mert Yildiran
54c5da2fcb Add a default NodeSelectorTerm that's matching Linux OS 2023-08-12 03:28:33 +03:00
M. Mert Yildiran
a5efb6b625 Fix the indentation 2023-08-12 03:09:37 +03:00
M. Mert Yildiran
7dcb2d23a0 Use the nodeselectorterms from values.yaml in the kmm-operator-controller-manager deployment 2023-08-12 02:44:35 +03:00
M. Mert Yildiran
f4ff4d4dd6 Add KMMConfig struct to TapConfig 2023-08-12 02:41:29 +03:00
M. Mert Yildiran
dd5761f112 🎨 Add a new line character at the end of values.yaml 2023-08-12 02:38:25 +03:00
M. Mert Yildiran
854836056d 🔨 Rename kernel-module-management.yaml to 15-kernel-module-management.yaml 2023-08-12 02:37:29 +03:00
Luiz Oliveira
090368295c Include kernel module management operator (#1409)
Files generated from https://github.com/kubernetes-sigs/kernel-module-management/tree/main/config/default
using kubectl kustomize
included kubeshark labels and checking

Attention, KMM requires cert-manager.

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
2023-08-12 02:36:30 +03:00
M. Mert Yildiran
67038e324b 🔧 Add logs-kmm-loader Makefile rule 2023-08-11 21:49:46 +03:00
M. Mert Yildiran
a5fb7e0474 Add .Capabilities.APIVersions.Has "kmm.sigs.x-k8s.io/v1beta1"check to module loader related Helm templates 2023-08-11 21:49:01 +03:00
M. Mert Yildiran
1a0625d37c Change the key from Dockerfile to dockerfile in module loader ConfigMap 2023-08-11 17:15:12 +03:00
M. Mert Yildiran
7ec1f595a1 Change the selector in module loader 2023-08-11 00:20:47 +03:00
M. Mert Yildiran
3998485944 🔨 Rename 12-nginx-config.yaml to 12-nginx-config-map.yaml 2023-08-11 00:15:41 +03:00
M. Mert Yildiran
e5de984acd 🔧 Add ssh-node Makefile rule 2023-08-11 00:14:04 +03:00
M. Mert Yildiran
18d6345e80 🔧 Add logs-kmm Makefile rule 2023-08-11 00:06:17 +03:00
M. Mert Yildiran
661e17ace9 Add 14-module-loader-config-map.yaml and a Makefile rule that generates it 2023-08-11 00:03:37 +03:00
M. Mert Yildiran
cc78b291af 🐳 Bring in module-loader Dockerfile 2023-08-10 23:50:53 +03:00
Luiz Oliveira
7c8adee7a8 🔨 Add _helpers.tpl and NOTES.txt to Helm chart and refactor labels (#1406)
* include kubernetes default labels

Using _helpers.tpl to define those labels

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* include Notes with tips after the installs

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* create a standard service account name

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* Update helm-chart/templates/NOTES.txt

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/templates/NOTES.txt

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* fixes ingress and nginx labels

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* fixes new label mapping from values

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* update makefile to to use correct default namespace and release name to generate manifests

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

---------

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-08-10 22:39:17 +03:00
M. Mert Yildiran
461ad1921e Add 13-module-loader.yaml Helm template which should load pf_ring.ko kernel module using KMM 2023-08-10 15:51:37 +03:00
M. Mert Yildiran
5ca90d70ff Have consistent case style in values.yaml 2023-08-09 20:16:49 +03:00
M. Mert Yildiran
65bda4e844 Add the IPv6 field to TapConfig struct 2023-08-09 01:24:08 +03:00
M. Mert Yildiran
c533bcd38c Add AUTH_ENABLED and AUTH_APPROVED_EMAILS environment variables to Hub's template 2023-08-09 01:22:10 +03:00
M. Mert Yildiran
1d17f83931 ⬆️ Bump the Helm chart version 2023-08-07 20:03:11 +03:00
M. Mert Yildiran
b9c3704bae Remove apiVersion field 2023-08-07 20:01:59 +03:00
M. Mert Yildiran
08602c75e0 Run make generate-manifests 2023-08-07 20:00:06 +03:00
M. Mert Yildiran
46799f6665 Revert " Let the user system:anonymous access the services/proxy resource"
This reverts commit acaa29f8eb.
2023-08-07 19:59:16 +03:00
Adrian Wyssmann
250a878407 Allow to disable IPv6 for nginx ingress (#1392)
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-08-05 18:43:13 +03:00
M. Mert Yildiran
b32f5f9e12 🔥 Remove the unused constants in kubernetes package 2023-08-04 20:49:21 +03:00
M. Mert Yildiran
5325f94f2b 🐛 Fix the flag redefined: release-namespace error 2023-08-01 23:00:36 +03:00
M. Mert Yildiran
fc3bf69348 Add -s flag to set release namespace into console, proxy and scripts 2023-07-31 23:09:04 +03:00
M. Mert Yildiran
7f41c348e6 ⬆️ Bump the Helm chart version 2023-07-30 23:39:59 +03:00
M. Mert Yildiran
eb69ebf008 Run make generate-manifests 2023-07-30 23:36:20 +03:00
M. Mert Yildiran
9f889a7a36 🔧 Add Makefile rules to do Helm install using canary and dev tags without the debug mode enabled 2023-07-30 05:46:53 +03:00
M. Mert Yildiran
909cc8de15 Change default PROFILING_INTERVAL_SECONDS to 60 2023-07-30 04:57:26 +03:00
M. Mert Yildiran
a0313e9e5a 🔧 Fix the recently added Makefile rules 2023-07-30 04:29:05 +03:00
M. Mert Yildiran
3aed354ab8 🔧 Add Makefile rules do Helm install/uninstall 2023-07-30 04:27:34 +03:00
M. Mert Yildiran
7fe9ecbca4 🔧 Add Makefile rules to exec into pods 2023-07-30 04:24:27 +03:00
M. Mert Yildiran
9e6af8c0bc Enable profiling in the worker when debug is enabled 2023-07-30 04:23:53 +03:00
M. Mert Yildiran
2c8f2e903f 🔧 Add Makefile rules to see the pod logs 2023-07-30 03:13:11 +03:00
dependabot[bot]
ca451e08f6 ⬆️ Bump github.com/docker/distribution (#1399)
Bumps [github.com/docker/distribution](https://github.com/docker/distribution) from 2.8.1+incompatible to 2.8.2+incompatible.
- [Release notes](https://github.com/docker/distribution/releases)
- [Commits](https://github.com/docker/distribution/compare/v2.8.1...v2.8.2)

---
updated-dependencies:
- dependency-name: github.com/docker/distribution
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-30 02:57:43 +03:00
M. Mert Yildiran
45bfebc956 Add sidecar.istio.io/inject: "false" label to all pods 2023-07-30 02:50:30 +03:00
M. Mert Yildiran
acaa29f8eb Let the user system:anonymous access the services/proxy resource 2023-07-30 02:48:32 +03:00
M. Mert Yildiran
470ab3d7ed Run make generate-manifests 2023-07-17 20:15:55 +03:00
M. Mert Yildiran
a259361a96 🔥 Remove -packet-capture flag 2023-07-17 20:14:53 +03:00
Alon Girmonsky
0350bcdd61 Update README.md (#1387)
Announcing latest features.
2023-07-16 08:12:40 +03:00
M. Mert Yildiran
cddc7d25fd ⬆️ Upgrade github.com/gin-gonic/gin to v1.9.1 2023-07-14 21:42:33 +03:00
M. Mert Yildiran
635a9d3256 ⬆️ Bump the Helm chart version 2023-07-10 20:44:14 +03:00
M. Mert Yildiran
2224d0e9f4 🐛 Fix the -debug flag of the worker in the Helm template 2023-07-10 20:43:07 +03:00
M. Mert Yildiran
db01c4e9e3 Revert the tap.proxy.hub.srvport usage in the Hub template 2023-07-09 23:11:02 +03:00
M. Mert Yildiran
0659d0fead ⬆️ Bump the Helm chart version 2023-07-09 23:01:19 +03:00
M. Mert Yildiran
988bb16260 Use the tap.proxy.hub.port and tap.proxy.hub.srvport in the Helm templates 2023-07-09 22:58:02 +03:00
M. Mert Yildiran
b4e8573634 Add license command 2023-07-06 21:57:21 +03:00
M. Mert Yildiran
cfa12ea45e 🐛 FIx the websocket: bad handshake error in console command in case Ingress is enabled 2023-07-06 21:48:03 +03:00
M. Mert Yildiran
9a7c23f070 🔥 Remove nodeSelectorTerms from hub and front pods 2023-07-06 21:28:32 +03:00
M. Mert Yildiran
0f1f832ddd 🐛 Add the missing json struct tags to ResourcesConfig 2023-07-03 23:26:18 +03:00
M. Mert Yildiran
dfe5605032 Update complete.yaml 2023-07-03 18:50:36 +03:00
M. Mert Yildiran
4c2884c40f Add KUBESHARK_HELM_CHART_PATH environment variable to set a local path for the Helm chart 2023-07-03 17:15:47 +03:00
M. Mert Yildiran
4fb179f623 ⬆️ Bump the Helm chart version 2023-07-03 17:05:20 +03:00
M. Mert Yildiran
796fc1453c Fix the hub and worker commands 2023-07-03 16:47:00 +03:00
M. Mert Yildiran
0ef3e2d018 Fix the issues related to release namespace 2023-07-03 16:33:50 +03:00
M. Mert Yildiran
77a14410f4 Revert " Rename releasenamespace field to selfnamespace"
This reverts commit d8ee89225c.
2023-07-03 15:11:21 +03:00
M. Mert Yildiran
f269a61842 Revert "Revert "🐛 Fix the commands in case of -debug flag enabled""
This reverts commit 64b22daa2a.
2023-07-03 15:11:04 +03:00
M. Mert Yildiran
51eddd3ae4 Fix the -r flag behavior in config command 2023-07-03 13:14:30 +03:00
M. Mert Yildiran
64b22daa2a Revert "🐛 Fix the commands in case of -debug flag enabled"
This reverts commit 3a2d34647e.
2023-07-03 12:32:15 +03:00
M. Mert Yildiran
3a2d34647e 🐛 Fix the commands in case of -debug flag enabled 2023-07-03 12:08:57 +03:00
M. Mert Yildiran
d8ee89225c Rename releasenamespace field to selfnamespace 2023-07-03 11:54:06 +03:00
M. Mert Yildiran
f7ce141d0d Remove an unnecessary check 2023-07-03 11:45:00 +03:00
M. Mert Yildiran
3c25cec633 Regenerate the complete.yaml 2023-06-30 16:57:44 +03:00
M. Mert Yildiran
7b86d32174 Remove the hostPort field from the manifests 2023-06-30 16:57:26 +03:00
M. Mert Yildiran
aeda619104 Download files in parallel 2023-06-29 16:45:59 +03:00
M. Mert Yildiran
98738cb5a6 Use Prefix field of ListObjectsV2Input instead of strings.HasPrefix check 2023-06-29 16:06:52 +03:00
M. Mert Yildiran
bf3285cb8b 🐛 Fix the collision of the -d flag in tap with the root level debug flag 2023-06-29 02:49:01 +03:00
M. Mert Yildiran
5f9084e497 Make the config command print the current config instead of the default config 2023-06-29 02:39:35 +03:00
M. Mert Yildiran
f2a384c8db Change the S3 URL to S3 URI in the flag description 2023-06-29 02:29:34 +03:00
M. Mert Yildiran
207d89fa17 🐛 Fix the cleanUpOldContainers method by adding All: true to ContainerListOptions 2023-06-29 02:22:57 +03:00
M. Mert Yildiran
3b758d15a0 Change the pattern of temporary file downloaded from S3 2023-06-29 02:21:32 +03:00
M. Mert Yildiran
261e850a59 Support folder URLs 2023-06-29 02:13:47 +03:00
M. Mert Yildiran
242a276c5f Download all the objects in bucket and TAR them in case of key is empty in the S3 URL 2023-06-29 01:42:51 +03:00
M. Mert Yildiran
b9f9e860b6 Change the default namespace from kubeshark to default and use .Release.Namespace in Helm templates 2023-06-27 21:06:44 +03:00
M. Mert Yildiran
1404c68a22 Fix the annotations in Ingress 2023-06-27 20:50:02 +03:00
M. Mert Yildiran
400c681369 Fix the issues in Ingress Helm template 2023-06-27 20:36:46 +03:00
M. Mert Yildiran
a4761e3262 Handle the column character in REACT_APP_HUB_PORT environment variable 2023-06-27 14:43:53 +03:00
M. Mert Yildiran
313d26670b Regenerate the manifests 2023-06-27 03:47:27 +03:00
M. Mert Yildiran
16f1e116c0 Template the annotations in all resources 2023-06-27 03:45:47 +03:00
M. Mert Yildiran
2d625eccaa Rename resourcelabels to labels 2023-06-27 03:33:46 +03:00
M. Mert Yildiran
19443501da Have consistent key style in values.yaml 2023-06-27 03:32:03 +03:00
M. Mert Yildiran
4ef91a2701 Template the controller field in IngressClass resource 2023-06-27 03:27:40 +03:00
M. Mert Yildiran
bc031be0fe 🔧 Add generate-helm-values Makefile rule 2023-06-27 03:26:20 +03:00
M. Mert Yildiran
f32a7d97ec Template the ingressClassName field in Ingress resource 2023-06-27 03:25:58 +03:00
M. Mert Yildiran
aeda024986 Remove the unnecessary single quotes from the Helm templates 2023-06-27 03:19:45 +03:00
M. Mert Yildiran
98198b9733 Remove the unused labels from the resources 2023-06-27 02:48:46 +03:00
M. Mert Yildiran
0bf7c83b86 Use toYaml and nindent instead of range in the Helm templates 2023-06-27 02:45:55 +03:00
M. Mert Yildiran
a8df589076 Bring back the functionality of nodeselectorterms field into the Helm chart 2023-06-27 01:32:16 +03:00
M. Mert Yildiran
c07f1851b3 🔥 Delete the manifests and add complete.yaml instead 2023-06-27 01:22:30 +03:00
M. Mert Yildiran
5c4c913a27 Bring back the functionality of resourcelabels field into the Helm chart 2023-06-27 01:12:04 +03:00
M. Mert Yildiran
71111248bd Add icon field to Chart.yaml 2023-06-27 00:30:53 +03:00
M. Mert Yildiran
5efb48f0c5 Bring back the functionality of ignoretainted field into the Helm chart 2023-06-27 00:15:04 +03:00
M. Mert Yildiran
cc980dbaf8 Print a warning if the storage limit modified while persistent storage is disabled and default its value 2023-06-23 02:08:42 +03:00
M. Mert Yildiran
1afe27e969 Add S3 URL support to --pcap flag 2023-06-22 20:59:14 +03:00
M. Mert Yildiran
8df5e015c5 Call os.Exit if the Helm install fails 2023-06-21 17:11:03 +03:00
M. Mert Yildiran
6b898077f1 ⬆️ Bump the Helm chart version 2023-06-21 17:04:25 +03:00
Victor Login
e93cd978e8 Update TLS for ingress (#1367) 2023-06-21 17:02:44 +03:00
M. Mert Yildiran
bada6dae68 🐛 Fix <len .Values.tap.namespaces>: error calling len: len of nil pointer Helm install error 2023-06-20 22:14:06 +03:00
M. Mert Yildiran
8814e08871 ⬆️ Bump the Helm chart version 2023-06-19 23:59:38 +03:00
M. Mert Yildiran
6b7a94a850 Revert values.yaml 2023-06-19 02:40:07 +03:00
M. Mert Yildiran
7b004e7a1f Change GetLocalhostOnPort method to GetProxyOnPort 2023-06-19 02:19:52 +03:00
M. Mert Yildiran
836b87d517 Template the SCRIPTING_ENV env in Hub pod (Helm) 2023-06-19 01:46:51 +03:00
M. Mert Yildiran
646da4810d Allow license key holders to bypass the auth 2023-06-19 01:44:01 +03:00
Alon Girmonsky
a6d349a8fa Update README.md
Changed the announcement part
2023-06-13 10:11:44 -07:00
Alon Girmonsky
9d58c662a8 Update README.md
Announcing Self-hosted Kubeshark
2023-06-13 10:01:28 -07:00
M. Mert Yildiran
e4a09be4e2 Change the PRO_URL constant 2023-06-07 01:09:30 +03:00
Alon Girmonsky
7208ed85d3 Update README.md
Adding a way to get the license where relevant.
2023-06-06 17:43:35 +03:00
M. Mert Yildiran
7a5bf83336 Use the Helm chart in tap command to install Kubeshark (#1362)
*  Use the Helm chart in `tap` command to install Kubeshark

* ⬆️ Set Go version to `1.19` in `go.mod` file

*  Add `Helm` struct`, `NewHelm` and `NewHelmDefault` methods

*  Better logging and error return

*  Pass the config as `values.yaml` to Helm install

* 🔥 Remove `helm-chart`, `manifests` and `check` commands

*  Run `go mod tidy`

* 🎨 Move `helm` package into `kubernetes` package

* 🔥 Remove `# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!` notice from the manifests and Helm templates

* 🔥 Remove the unused `GenerateApplyConfiguration` and `buildWithDefaultLabels` methods
2023-06-06 12:16:03 +03:00
Alon Girmonskys
87b8a067c9 changed com to co 2023-05-28 20:54:57 -07:00
M. Mert Yildiran
3fe765e072 ⬆️ Bump the Helm chart version 2023-05-26 00:07:46 +03:00
M. Mert Yildiran
a163f9cc0e Change the new release warning 2023-05-25 20:42:57 +03:00
M. Mert Yildiran
2edb987c07 Template REACT_APP_HUB_PORT in the Helm chart 2023-05-25 20:24:29 +03:00
M. Mert Yildiran
c0d7d0fe80 Update Helm README.md 2023-05-25 05:46:10 +03:00
M. Mert Yildiran
be5bd6a372 Template the AUTH_APPROVED_DOMAINS and certmanager.k8s.io/cluster-issuer
Also add `networking.k8s.io` to `apiGroups` in `ClusterRole`
2023-05-25 05:07:42 +03:00
M. Mert Yildiran
42df7aa42f Update the Certificate resource name 2023-05-24 06:32:48 +03:00
M. Mert Yildiran
9a9052198f ⬆️ Bump the Helm chart version 2023-05-24 05:44:18 +03:00
M. Mert Yildiran
2fb83c3642 Fix the Bash script 2023-05-24 04:18:27 +03:00
M. Mert Yildiran
d44674fe86 Update the secret name of certificate 2023-05-24 04:10:46 +03:00
M. Mert Yildiran
c57ed1efd3 Run kubeshark manifests --dump && kubeshark helm-chart 2023-05-24 04:04:34 +03:00
M. Mert Yildiran
c19cd00c77 Add CertManager field to IngressConfig and add an Ingress TLS example 2023-05-24 04:01:45 +03:00
M. Mert Yildiran
39f8d40b76 Revert " Add Refresh-Token to the list of Access-Control-Allow-Headers"
This reverts commit bf731073c8.
2023-05-24 02:10:48 +03:00
M. Mert Yildiran
bf731073c8 Add Refresh-Token to the list of Access-Control-Allow-Headers 2023-05-24 02:04:56 +03:00
M. Mert Yildiran
4bb68afaaf Add AuthConfig struct and pass domains in AUTH_APPROVED_DOMAINS environment variable 2023-05-24 01:50:59 +03:00
M. Mert Yildiran
2126fc83a7 🐛 Remove the cancel() call 2023-05-22 19:20:39 +03:00
M. Mert Yildiran
d0c1dbcd5e Print and open a different URL in case of Ingress is enabled 2023-05-17 03:57:27 +03:00
M. Mert Yildiran
ad9dfbce40 Add Ingress (#1357)
*  Add `Ingress`

*  Rewrite the target in `Ingress`

*  Fix the path of front pod in `Ingress`

*  Add `IngressConfig` struct

*  Generate the correct Helm chart based on `tap.ingress` field of `values.yaml`
2023-05-16 19:46:47 +03:00
M. Mert Yildiran
139336d4ee Template hostPort(s) in the Helm chart 2023-05-10 14:38:38 +03:00
M. Mert Yildiran
f68fed0de8 🐛 Fix the effect of proxy config port changes 2023-05-10 01:28:43 +03:00
M. Mert Yildiran
1d7d242e6c Generate the missing new line in 08-persistent-volume-claim.yaml 2023-05-09 00:00:07 +03:00
M. Mert Yildiran
aa904e23c7 Add --persistentstorage option to tap command 2023-05-08 23:57:22 +03:00
M. Mert Yildiran
baf0e65337 Template the Helm chart based on persistentstorage value 2023-05-08 23:52:14 +03:00
M. Mert Yildiran
a33a3467fc Add persistentstorage option 2023-05-08 00:50:56 +03:00
M. Mert Yildiran
a9b598bc41 ⬆️ Bump the Helm chart version 2023-05-04 21:45:55 +03:00
M. Mert Yildiran
0aee367ad5 Omit the license string in helm-chart and manifests commands 2023-05-04 21:37:55 +03:00
M. Mert Yildiran
8c7d9ea8fd Fix the updateLicense method 2023-05-04 21:33:38 +03:00
M. Mert Yildiran
fab0f713ed 🐛 Pass the license string 2023-05-04 21:18:34 +03:00
M. Mert Yildiran
2563cc1922 🐛 Fix the imagePullPolicy to imagepullpolicy in helm-chart command 2023-04-24 02:03:58 +03:00
Jay R. Wren
26c9f42eba 📚 Remove kubeshark tap -A example from README.md (#1339) 2023-04-21 18:35:24 -07:00
84 changed files with 2295 additions and 3717 deletions

View File

@@ -68,3 +68,84 @@ kubectl-view-all-resources: ## This command outputs all Kubernetes resources usi
kubectl-view-kubeshark-resources: ## This command outputs all Kubernetes resources in "kubeshark" namespace using YAML format and pipes it to VS Code
./kubectl.sh view-kubeshark-resources
generate-helm-values: ## Generate the Helm values from config.yaml
./bin/kubeshark__ config > ./helm-chart/values.yaml
generate-manifests: ## Generate the manifests from the Helm chart using default configuration
helm template kubeshark -n default ./helm-chart > ./manifests/complete.yaml
logs-worker:
export LOGS_POD_PREFIX=kubeshark-worker-
export LOGS_FOLLOW=
${MAKE} logs
logs-worker-follow:
export LOGS_POD_PREFIX=kubeshark-worker-
export LOGS_FOLLOW=--follow
${MAKE} logs
logs-hub:
export LOGS_POD_PREFIX=kubeshark-hub
export LOGS_FOLLOW=
${MAKE} logs
logs-hub-follow:
export LOGS_POD_PREFIX=kubeshark-hub
export LOGS_FOLLOW=--follow
${MAKE} logs
logs-front:
export LOGS_POD_PREFIX=kubeshark-front
export LOGS_FOLLOW=
${MAKE} logs
logs-front-follow:
export LOGS_POD_PREFIX=kubeshark-front
export LOGS_FOLLOW=--follow
${MAKE} logs
logs:
kubectl logs $$(kubectl get pods | awk '$$1 ~ /^$(LOGS_POD_PREFIX)/' | awk 'END {print $$1}') $(LOGS_FOLLOW)
ssh-node:
kubectl ssh node $$(kubectl get nodes | awk 'END {print $$1}')
exec-worker:
export EXEC_POD_PREFIX=kubeshark-worker-
${MAKE} exec
exec-hub:
export EXEC_POD_PREFIX=kubeshark-hub
${MAKE} exec
exec-front:
export EXEC_POD_PREFIX=kubeshark-front
${MAKE} exec
exec:
kubectl exec --stdin --tty $$(kubectl get pods | awk '$$1 ~ /^$(EXEC_POD_PREFIX)/' | awk 'END {print $$1}') -- /bin/sh
helm-install:
cd helm-chart && helm install kubeshark . && cd ..
helm-install-canary:
cd helm-chart && helm install kubeshark . --set tap.docker.tag=canary && cd ..
helm-install-dev:
cd helm-chart && helm install kubeshark . --set tap.docker.tag=dev && cd ..
helm-install-debug:
cd helm-chart && helm install kubeshark . --set tap.debug=true && cd ..
helm-install-debug-canary:
cd helm-chart && helm install kubeshark . --set tap.debug=true --set tap.docker.tag=canary && cd ..
helm-install-debug-dev:
cd helm-chart && helm install kubeshark . --set tap.debug=true --set tap.docker.tag=dev && cd ..
helm-uninstall:
helm uninstall kubeshark
proxy:
kubeshark proxy

View File

@@ -3,9 +3,6 @@
</p>
<p align="center">
<a href="https://github.com/kubeshark/kubeshark/blob/main/LICENSE">
<img alt="GitHub License" src="https://img.shields.io/github/license/kubeshark/kubeshark?logo=GitHub&style=flat-square">
</a>
<a href="https://github.com/kubeshark/kubeshark/releases/latest">
<img alt="GitHub Latest Release" src="https://img.shields.io/github/v/release/kubeshark/kubeshark?logo=GitHub&style=flat-square">
</a>
@@ -25,9 +22,9 @@
<p align="center">
<b>
<span>NEW: </span><a href="https://github.com/kubeshark/kubeshark/releases/tag/39.4">Version 39.4</a> is out, introducing
<a href="https://docs.kubeshark.co/en/automation_scripting">Scripting</a>,
<a href="https://docs.kubeshark.co/en/automation_hooks">L4/L7 hooks</a>, and so much more...
<span>NEW: </span>
<a href="https://kubeshark.co/traffic-recording">Traffic Recording and Offline Investigation</a>, and
<a href="https://kubeshark.co/self-hosting">Self-hosting with Ingress and Authentication</a>.
</b>
</p>
@@ -45,10 +42,6 @@ Download **Kubeshark**'s binary distribution [latest release](https://github.com
kubeshark tap
```
```shell
kubeshark tap -A
```
```shell
kubeshark tap -n sock-shop "(catalo*|front-end*)"
```

View File

@@ -1,21 +0,0 @@
package cmd
import (
"fmt"
"github.com/kubeshark/kubeshark/misc"
"github.com/spf13/cobra"
)
var checkCmd = &cobra.Command{
Use: "check",
Short: fmt.Sprintf("Check the %s resources for potential problems", misc.Software),
RunE: func(cmd *cobra.Command, args []string) error {
runCheck()
return nil
},
}
func init() {
rootCmd.AddCommand(checkCmd)
}

View File

@@ -1,28 +0,0 @@
package check
import (
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/semver"
"github.com/rs/zerolog/log"
)
func KubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
log.Info().Str("procedure", "kubernetes-api").Msg("Checking:")
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.Kube.Context)
if err != nil {
log.Error().Err(err).Msg("Can't initialize the client!")
return nil, nil, false
}
log.Info().Msg("Initialization of the client is passed.")
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
log.Error().Err(err).Msg("Can't query the Kubernetes API!")
return nil, nil, false
}
log.Info().Msg("Querying the Kubernetes API is passed.")
return kubernetesProvider, kubernetesVersion, true
}

View File

@@ -1,59 +0,0 @@
package check
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
rbac "k8s.io/api/rbac/v1"
)
func KubernetesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "kubernetes-permissions").Msg("Checking:")
return checkRulesPermissions(ctx, kubernetesProvider, kubernetesProvider.BuildClusterRole().Rules, "")
}
func checkRulesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider, rules []rbac.PolicyRule, namespace string) bool {
permissionsExist := true
for _, rule := range rules {
for _, group := range rule.APIGroups {
for _, resource := range rule.Resources {
for _, verb := range rule.Verbs {
exist, err := kubernetesProvider.CanI(ctx, namespace, resource, verb, group)
permissionsExist = checkPermissionExist(group, resource, verb, namespace, exist, err) && permissionsExist
}
}
}
}
return permissionsExist
}
func checkPermissionExist(group string, resource string, verb string, namespace string, exist bool, err error) bool {
var groupAndNamespace string
if group != "" && namespace != "" {
groupAndNamespace = fmt.Sprintf("in api group '%v' and namespace '%v'", group, namespace)
} else if group != "" {
groupAndNamespace = fmt.Sprintf("in api group '%v'", group)
} else if namespace != "" {
groupAndNamespace = fmt.Sprintf("in namespace '%v'", namespace)
}
if err != nil {
log.Error().
Str("verb", verb).
Str("resource", resource).
Str("group-and-namespace", groupAndNamespace).
Err(err).
Msg("While checking Kubernetes permissions!")
return false
} else if !exist {
log.Error().Msg(fmt.Sprintf("Can't %v %v %v", verb, resource, groupAndNamespace))
return false
}
log.Info().Msg(fmt.Sprintf("Can %v %v %v", verb, resource, groupAndNamespace))
return true
}

View File

@@ -1,118 +0,0 @@
package check
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
)
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "k8s-components").Msg("Checking:")
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.Tap.SelfNamespace)
allResourcesExist := checkResourceExist(config.Config.Tap.SelfNamespace, "namespace", exist, err)
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.ServiceAccountName)
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
if config.Config.IsNsRestrictedMode() {
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.RoleName)
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.RoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
} else {
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesClusterRoleBindingExist(ctx, kubernetes.ClusterRoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
}
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.HubServiceName)
allResourcesExist = checkResourceExist(kubernetes.HubServiceName, "service", exist, err) && allResourcesExist
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
return allResourcesExist
}
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.Tap.SelfNamespace, kubernetes.HubPodName); err != nil {
log.Error().
Str("name", kubernetes.HubPodName).
Err(err).
Msg("While checking if pod is running!")
return false
} else if len(pods) == 0 {
log.Error().
Str("name", kubernetes.HubPodName).
Msg("Pod doesn't exist!")
return false
} else if !kubernetes.IsPodRunning(&pods[0]) {
log.Error().
Str("name", kubernetes.HubPodName).
Msg("Pod is not running!")
return false
}
log.Info().
Str("name", kubernetes.HubPodName).
Msg("Pod is running.")
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.Tap.SelfNamespace, kubernetes.WorkerPodName); err != nil {
log.Error().
Str("name", kubernetes.WorkerPodName).
Err(err).
Msg("While checking if pods are running!")
return false
} else {
workers := 0
notRunningWorkers := 0
for _, pod := range pods {
workers += 1
if !kubernetes.IsPodRunning(&pod) {
notRunningWorkers += 1
}
}
if notRunningWorkers > 0 {
log.Error().
Str("name", kubernetes.WorkerPodName).
Msg(fmt.Sprintf("%d/%d pods are not running!", notRunningWorkers, workers))
return false
}
log.Info().
Str("name", kubernetes.WorkerPodName).
Msg(fmt.Sprintf("All %d pods are running.", workers))
return true
}
}
func checkResourceExist(resourceName string, resourceType string, exist bool, err error) bool {
if err != nil {
log.Error().
Str("name", resourceName).
Str("type", resourceType).
Err(err).
Msg("Checking if resource exists!")
return false
} else if !exist {
log.Error().
Str("name", resourceName).
Str("type", resourceType).
Msg("Resource doesn't exist!")
return false
}
log.Info().
Str("name", resourceName).
Str("type", resourceType).
Msg("Resource exist.")
return true
}

View File

@@ -1,22 +0,0 @@
package check
import (
"fmt"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/semver"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
)
func KubernetesVersion(kubernetesVersion *semver.SemVersion) bool {
log.Info().Str("procedure", "kubernetes-version").Msg("Checking:")
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
log.Error().Str("k8s-version", string(*kubernetesVersion)).Err(err).Msg(fmt.Sprintf(utils.Red, "The cluster does not have the minimum required Kubernetes API version!"))
return false
}
log.Info().Str("k8s-version", string(*kubernetesVersion)).Msg("Minimum required Kubernetes API version is passed.")
return true
}

View File

@@ -1,40 +0,0 @@
package check
import (
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
)
func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "server-connectivity").Msg("Checking:")
var connectedToHub, connectedToFront bool
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), "/echo", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Hub using proxy!")
} else {
connectedToHub = true
log.Info().Msg("Connected successfully to Hub using proxy.")
}
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort), "", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Front using proxy!")
} else {
connectedToFront = true
log.Info().Msg("Connected successfully to Front using proxy.")
}
return connectedToHub && connectedToFront
}
func checkProxy(serverUrl string, path string, kubernetesProvider *kubernetes.Provider) error {
log.Info().Str("url", serverUrl).Msg("Connecting:")
connector := connect.NewConnector(serverUrl, connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(path); err != nil {
return err
}
return nil
}

View File

@@ -1,47 +0,0 @@
package cmd
import (
"context"
"fmt"
"os"
"github.com/kubeshark/kubeshark/cmd/check"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
)
func runCheck() {
log.Info().Msg(fmt.Sprintf("Checking the %s resources...", misc.Software))
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel will be called when this function exits
kubernetesProvider, kubernetesVersion, checkPassed := check.KubernetesApi()
if checkPassed {
checkPassed = check.KubernetesVersion(kubernetesVersion)
}
if checkPassed {
checkPassed = check.KubernetesPermissions(ctx, kubernetesProvider)
}
if checkPassed {
checkPassed = check.KubernetesResources(ctx, kubernetesProvider)
}
if checkPassed {
checkPassed = check.ServerConnection(kubernetesProvider)
}
if checkPassed {
log.Info().Msg(fmt.Sprintf(utils.Green, "All checks are passed."))
} else {
log.Error().
Str("command1", fmt.Sprintf("%s %s", misc.Program, cleanCmd.Use)).
Str("command2", fmt.Sprintf("%s %s", misc.Program, tapCmd.Use)).
Msg(fmt.Sprintf(utils.Red, fmt.Sprintf("There are issues in your %s resources! Run these commands:", misc.Software)))
os.Exit(1)
}
}

View File

@@ -4,7 +4,9 @@ import (
"fmt"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/kubernetes/helm"
"github.com/kubeshark/kubeshark/misc"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
@@ -14,7 +16,16 @@ var cleanCmd = &cobra.Command{
Use: "clean",
Short: fmt.Sprintf("Removes all %s resources", misc.Software),
RunE: func(cmd *cobra.Command, args []string) error {
performCleanCommand()
resp, err := helm.NewHelm(
config.Config.Tap.Release.Repo,
config.Config.Tap.Release.Name,
config.Config.Tap.Release.Namespace,
).Uninstall()
if err != nil {
log.Error().Err(err).Send()
} else {
log.Info().Msgf("Uninstalled the Helm release: %s", resp.Release.Name)
}
return nil
},
}
@@ -27,5 +38,5 @@ func init() {
log.Debug().Err(err).Send()
}
cleanCmd.Flags().StringP(configStructs.SelfNamespaceLabel, "s", defaultTapConfig.SelfNamespace, "Self-namespace of Kubeshark")
cleanCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}

View File

@@ -1,14 +0,0 @@
package cmd
import (
"github.com/kubeshark/kubeshark/config"
)
func performCleanCommand() {
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return
}
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, false)
}

View File

@@ -14,12 +14,11 @@ import (
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/misc/fsUtils"
"github.com/kubeshark/kubeshark/resources"
"github.com/rs/zerolog/log"
)
func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx context.Context, serviceName string, podName string, proxyPortLabel string, srcPort uint16, dstPort uint16, healthCheck string) {
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.Proxy.Host, srcPort, config.Config.Tap.SelfNamespace, serviceName)
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.Proxy.Host, srcPort, config.Config.Tap.Release.Namespace, serviceName)
if err != nil {
log.Error().
Err(errormessage.FormatError(err)).
@@ -27,7 +26,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
return
}
connector := connect.NewConnector(kubernetes.GetLocalhostOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector := connect.NewConnector(kubernetes.GetProxyOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(healthCheck); err != nil {
log.Warn().
Str("service", serviceName).
@@ -39,7 +38,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
}
podRegex, _ := regexp.Compile(podName)
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.Tap.SelfNamespace, podRegex, srcPort, dstPort, ctx); err != nil {
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.Tap.Release.Namespace, podRegex, srcPort, dstPort, ctx); err != nil {
log.Error().
Str("pod-regex", podRegex.String()).
Err(errormessage.FormatError(err)).
@@ -47,7 +46,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
return
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(healthCheck); err != nil {
log.Error().
Str("service", serviceName).
@@ -100,13 +99,10 @@ func handleKubernetesProviderError(err error) {
}
}
func finishSelfExecution(kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, withoutCleanup bool) {
func finishSelfExecution(kubernetesProvider *kubernetes.Provider) {
removalCtx, cancel := context.WithTimeout(context.Background(), cleanupTimeout)
defer cancel()
dumpLogsIfNeeded(removalCtx, kubernetesProvider)
if !withoutCleanup {
resources.CleanUpSelfResources(removalCtx, cancel, kubernetesProvider, isNsRestrictedMode, selfNamespace)
}
}
func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provider) {

View File

@@ -17,21 +17,15 @@ var configCmd = &cobra.Command{
Use: "config",
Short: fmt.Sprintf("Generate %s config with default values", misc.Software),
RunE: func(cmd *cobra.Command, args []string) error {
configWithDefaults, err := config.GetConfigWithDefaults()
if err != nil {
log.Error().Err(err).Msg("Failed generating config with defaults.")
return nil
}
if config.Config.Config.Regenerate {
if err := config.WriteConfig(configWithDefaults); err != nil {
if err := config.WriteConfig(&config.Config); err != nil {
log.Error().Err(err).Msg("Failed generating config with defaults.")
return nil
}
log.Info().Str("config-path", config.ConfigFilePath).Msg("Template file written to config path.")
} else {
template, err := utils.PrettyYaml(configWithDefaults)
template, err := utils.PrettyYaml(config.Config)
if err != nil {
log.Error().Err(err).Msg("Failed converting config with defaults to YAML.")
return nil

View File

@@ -36,12 +36,13 @@ func init() {
log.Debug().Err(err).Send()
}
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
consoleCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}
func runConsole() {
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
@@ -51,14 +52,16 @@ func runConsole() {
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.SrcPort).Msg("Connecting to:")
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.Port).Msg("Connecting to:")
u := url.URL{
Scheme: "ws",
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.SrcPort),
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.Port),
Path: "/scripts/logs",
}
headers := http.Header{}
headers.Set("License-Key", config.Config.License)
c, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
c, _, err := websocket.DefaultDialer.Dial(u.String(), headers)
if err != nil {
log.Error().Err(err).Send()
return

View File

@@ -1,363 +0,0 @@
package cmd
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sort"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/misc/fsUtils"
"github.com/kubeshark/kubeshark/utils"
"github.com/ohler55/ojg/jp"
"github.com/ohler55/ojg/oj"
"github.com/otiai10/copy"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
var helmChartCmd = &cobra.Command{
Use: "helm-chart",
Short: "Generate Helm chart of Kubeshark",
RunE: func(cmd *cobra.Command, args []string) error {
runHelmChart()
return nil
},
}
// Maintainer describes a Chart maintainer.
type Maintainer struct {
// Name is a user name or organization name
Name string `json:"name,omitempty"`
// Email is an optional email address to contact the named maintainer
Email string `json:"email,omitempty"`
// URL is an optional URL to an address for the named maintainer
URL string `json:"url,omitempty"`
}
// Metadata for a Chart file. This models the structure of a Chart.yaml file.
type Metadata struct {
// The name of the chart. Required.
Name string `json:"name,omitempty"`
// The URL to a relevant project page, git repo, or contact person
Home string `json:"home,omitempty"`
// Source is the URL to the source code of this chart
Sources []string `json:"sources,omitempty"`
// A SemVer 2 conformant version string of the chart. Required.
Version string `json:"version,omitempty"`
// A one-sentence description of the chart
Description string `json:"description,omitempty"`
// A list of string keywords
Keywords []string `json:"keywords,omitempty"`
// A list of name and URL/email address combinations for the maintainer(s)
Maintainers []*Maintainer `json:"maintainers,omitempty"`
// The URL to an icon file.
Icon string `json:"icon,omitempty"`
// The API Version of this chart. Required.
APIVersion string `json:"apiVersion,omitempty"`
// The condition to check to enable chart
Condition string `json:"condition,omitempty"`
// The tags to check to enable chart
Tags string `json:"tags,omitempty"`
// The version of the application enclosed inside of this chart.
AppVersion string `json:"appVersion,omitempty"`
// Whether or not this chart is deprecated
Deprecated bool `json:"deprecated,omitempty"`
// Annotations are additional mappings uninterpreted by Helm,
// made available for inspection by other applications.
Annotations map[string]string `json:"annotations,omitempty"`
// KubeVersion is a SemVer constraint specifying the version of Kubernetes required.
KubeVersion string `json:"kubeVersion,omitempty"`
// Dependencies are a list of dependencies for a chart.
Dependencies []*Dependency `json:"dependencies,omitempty"`
// Specifies the chart type: application or library
Type string `json:"type,omitempty"`
}
// Dependency describes a chart upon which another chart depends.
//
// Dependencies can be used to express developer intent, or to capture the state
// of a chart.
type Dependency struct {
// Name is the name of the dependency.
//
// This must mach the name in the dependency's Chart.yaml.
Name string `json:"name"`
// Version is the version (range) of this chart.
//
// A lock file will always produce a single version, while a dependency
// may contain a semantic version range.
Version string `json:"version,omitempty"`
// The URL to the repository.
//
// Appending `index.yaml` to this string should result in a URL that can be
// used to fetch the repository index.
Repository string `json:"repository"`
// A yaml path that resolves to a boolean, used for enabling/disabling charts (e.g. subchart1.enabled )
Condition string `json:"condition,omitempty"`
// Tags can be used to group charts for enabling/disabling together
Tags []string `json:"tags,omitempty"`
// Enabled bool determines if chart should be loaded
Enabled bool `json:"enabled,omitempty"`
// ImportValues holds the mapping of source values to parent key to be imported. Each item can be a
// string or pair of child/parent sublist items.
ImportValues []interface{} `json:"import-values,omitempty"`
// Alias usable alias to be used for the chart
Alias string `json:"alias,omitempty"`
}
var namespaceMappings = map[string]interface{}{
"metadata.name": "{{ .Values.tap.selfnamespace }}",
}
var serviceAccountMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
}
var clusterRoleMappings = serviceAccountMappings
var clusterRoleBindingMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"subjects[0].namespace": "{{ .Values.tap.selfnamespace }}",
}
var hubPodMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.containers[0].env": []map[string]interface{}{
{
"name": "POD_REGEX",
"value": "{{ .Values.tap.regex }}",
},
{
"name": "NAMESPACES",
"value": "{{ gt (len .Values.tap.namespaces) 0 | ternary (join \",\" .Values.tap.namespaces) \"\" }}",
},
{
"name": "LICENSE",
"value": "{{ .Values.license }}",
},
{
"name": "SCRIPTING_ENV",
"value": "{}",
},
{
"name": "SCRIPTING_SCRIPTS",
"value": "[]",
},
},
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
"spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.hub.limits.cpu }}",
"spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.hub.limits.memory }}",
"spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.hub.requests.cpu }}",
"spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.hub.requests.memory }}",
"spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./hub -debug\" \"./hub\" }}",
}
var hubServiceMappings = serviceAccountMappings
var frontPodMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
}
var frontServiceMappings = serviceAccountMappings
var persistentVolumeMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.resources.requests.storage": "{{ .Values.tap.storagelimit }}",
"spec.storageClassName": "{{ .Values.tap.storageclass }}",
}
var workerDaemonSetMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.template.spec.containers[0].image": "{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}",
"spec.template.spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
"spec.template.spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.worker.limits.cpu }}",
"spec.template.spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.worker.limits.memory }}",
"spec.template.spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.worker.requests.cpu }}",
"spec.template.spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.worker.requests.memory }}",
"spec.template.spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./worker -debug\" \"./worker\" }}",
"spec.template.spec.containers[0].command[6]": "{{ .Values.tap.packetcapture }}",
}
func init() {
rootCmd.AddCommand(helmChartCmd)
}
func runHelmChart() {
namespace,
serviceAccount,
clusterRole,
clusterRoleBinding,
hubPod,
hubService,
frontPod,
frontService,
persistentVolume,
workerDaemonSet,
err := generateManifests()
if err != nil {
log.Error().Err(err).Send()
return
}
err = dumpHelmChart(map[string]interface{}{
"00-namespace.yaml": template(namespace, namespaceMappings),
"01-service-account.yaml": template(serviceAccount, serviceAccountMappings),
"02-cluster-role.yaml": template(clusterRole, clusterRoleMappings),
"03-cluster-role-binding.yaml": template(clusterRoleBinding, clusterRoleBindingMappings),
"04-hub-pod.yaml": template(hubPod, hubPodMappings),
"05-hub-service.yaml": template(hubService, hubServiceMappings),
"06-front-pod.yaml": template(frontPod, frontPodMappings),
"07-front-service.yaml": template(frontService, frontServiceMappings),
"08-persistent-volume-claim.yaml": template(persistentVolume, persistentVolumeMappings),
"09-worker-daemon-set.yaml": template(workerDaemonSet, workerDaemonSetMappings),
})
if err != nil {
log.Error().Err(err).Send()
return
}
}
func template(object interface{}, mappings map[string]interface{}) (template interface{}) {
var err error
var data []byte
data, err = json.Marshal(object)
if err != nil {
log.Error().Err(err).Send()
return
}
var obj interface{}
obj, err = oj.Parse(data)
if err != nil {
log.Error().Err(err).Send()
return
}
for path, value := range mappings {
var x jp.Expr
x, err = jp.ParseString(path)
if err != nil {
log.Error().Err(err).Send()
return
}
err = x.Set(obj, value)
if err != nil {
log.Error().Err(err).Send()
return
}
}
newJson := oj.JSON(obj)
err = json.Unmarshal([]byte(newJson), &template)
if err != nil {
log.Error().Err(err).Send()
return
}
return
}
func dumpHelmChart(objects map[string]interface{}) error {
folder := filepath.Join(".", "helm-chart")
templatesFolder := filepath.Join(folder, "templates")
err := fsUtils.RemoveFilesByExtension(templatesFolder, "yaml")
if err != nil {
return err
}
err = os.MkdirAll(templatesFolder, os.ModePerm)
if err != nil {
return err
}
// Sort by filenames
filenames := make([]string, 0)
for filename := range objects {
filenames = append(filenames, filename)
}
sort.Strings(filenames)
// Generate templates
for _, filename := range filenames {
manifest, err := utils.PrettyYamlOmitEmpty(objects[filename])
if err != nil {
return err
}
path := filepath.Join(templatesFolder, filename)
err = os.WriteFile(path, []byte(manifestHeader+manifest), 0644)
if err != nil {
return err
}
log.Info().Msgf("Helm chart template generated: %s", path)
}
// Copy LICENSE
licenseSrcPath := filepath.Join(".", "LICENSE")
licenseDstPath := filepath.Join(folder, "LICENSE")
err = copy.Copy(licenseSrcPath, licenseDstPath)
if err != nil {
log.Warn().Err(err).Str("path", licenseSrcPath).Msg("Couldn't find the license:")
} else {
log.Info().Msgf("Helm chart license copied: %s", licenseDstPath)
}
// Generate Chart.yaml
chartMetadata := Metadata{
APIVersion: "v2",
Name: misc.Program,
Description: misc.Description,
Home: misc.Website,
Sources: []string{"https://github.com/kubeshark/kubeshark/tree/master/helm-chart"},
Keywords: []string{
"kubeshark",
"packet capture",
"traffic capture",
"traffic analyzer",
"network sniffer",
"observability",
"devops",
"microservice",
"forensics",
"api",
},
Maintainers: []*Maintainer{
{
Name: misc.Software,
Email: misc.Email,
URL: misc.Website,
},
},
Version: misc.Ver,
AppVersion: misc.Ver,
KubeVersion: fmt.Sprintf(">= %s-0", kubernetes.MinKubernetesServerVersion),
Type: "application",
}
chart, err := utils.PrettyYamlOmitEmpty(chartMetadata)
if err != nil {
return err
}
path := filepath.Join(folder, "Chart.yaml")
err = os.WriteFile(path, []byte(chart), 0644)
if err != nil {
return err
}
log.Info().Msgf("Helm chart Chart.yaml generated: %s", path)
// Generate values.yaml
values, err := utils.PrettyYaml(config.Config)
if err != nil {
return err
}
path = filepath.Join(folder, "values.yaml")
err = os.WriteFile(path, []byte(values), 0644)
if err != nil {
return err
}
log.Info().Msgf("Helm chart values.yaml generated: %s", path)
return nil
}

21
cmd/license.go Normal file
View File

@@ -0,0 +1,21 @@
package cmd
import (
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/spf13/cobra"
)
var licenseCmd = &cobra.Command{
Use: "license",
Short: "Print the license loaded string",
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println(config.Config.License)
return nil
},
}
func init() {
rootCmd.AddCommand(licenseCmd)
}

View File

@@ -1,223 +0,0 @@
package cmd
import (
"fmt"
"os"
"path/filepath"
"sort"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc/fsUtils"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
v1 "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
)
const manifestSeperator = "---"
const manifestHeader = "# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!\n" + manifestSeperator + "\n"
var manifestsCmd = &cobra.Command{
Use: "manifests",
Short: "Generate Kubernetes manifests of Kubeshark",
RunE: func(cmd *cobra.Command, args []string) error {
runManifests()
return nil
},
}
func init() {
rootCmd.AddCommand(manifestsCmd)
defaultManifestsConfig := config.ManifestsConfig{}
if err := defaults.Set(&defaultManifestsConfig); err != nil {
log.Debug().Err(err).Send()
}
manifestsCmd.Flags().Bool("dump", defaultManifestsConfig.Dump, "Enable the debug mode")
}
func runManifests() {
namespace,
serviceAccount,
clusterRole,
clusterRoleBinding,
hubPod,
hubService,
frontPod,
frontService,
persistentVolume,
workerDaemonSet,
err := generateManifests()
if err != nil {
log.Error().Err(err).Send()
return
}
if config.Config.Manifests.Dump {
err = dumpManifests(map[string]interface{}{
"00-namespace.yaml": namespace,
"01-service-account.yaml": serviceAccount,
"02-cluster-role.yaml": clusterRole,
"03-cluster-role-binding.yaml": clusterRoleBinding,
"04-hub-pod.yaml": hubPod,
"05-hub-service.yaml": hubService,
"06-front-pod.yaml": frontPod,
"07-front-service.yaml": frontService,
"08-persistent-volume-claim.yaml": persistentVolume,
"09-worker-daemon-set.yaml": workerDaemonSet,
})
} else {
err = printManifests([]interface{}{
namespace,
serviceAccount,
clusterRole,
clusterRoleBinding,
hubPod,
hubService,
frontPod,
frontService,
workerDaemonSet,
})
}
if err != nil {
log.Error().Err(err).Send()
return
}
}
func generateManifests() (
namespace *v1.Namespace,
serviceAccount *v1.ServiceAccount,
clusterRole *rbac.ClusterRole,
clusterRoleBinding *rbac.ClusterRoleBinding,
hubPod *v1.Pod,
hubService *v1.Service,
frontPod *v1.Pod,
frontService *v1.Service,
persistentVolumeClaim *v1.PersistentVolumeClaim,
workerDaemonSet *kubernetes.DaemonSet,
err error,
) {
var kubernetesProvider *kubernetes.Provider
kubernetesProvider, err = getKubernetesProviderForCli(true, true)
if err != nil {
return
}
namespace = kubernetesProvider.BuildNamespace(config.Config.Tap.SelfNamespace)
serviceAccount = kubernetesProvider.BuildServiceAccount()
clusterRole = kubernetesProvider.BuildClusterRole()
clusterRoleBinding = kubernetesProvider.BuildClusterRoleBinding()
hubPod, err = kubernetesProvider.BuildHubPod(&kubernetes.PodOptions{
Namespace: config.Config.Tap.SelfNamespace,
PodName: kubernetes.HubPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: config.Config.Tap.Resources.Hub,
ImagePullPolicy: config.Config.ImagePullPolicy(),
ImagePullSecrets: config.Config.ImagePullSecrets(),
Debug: config.Config.Tap.Debug,
})
if err != nil {
return
}
hubService = kubernetesProvider.BuildHubService(config.Config.Tap.SelfNamespace)
frontPod, err = kubernetesProvider.BuildFrontPod(&kubernetes.PodOptions{
Namespace: config.Config.Tap.SelfNamespace,
PodName: kubernetes.FrontPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: config.Config.Tap.Resources.Hub,
ImagePullPolicy: config.Config.ImagePullPolicy(),
ImagePullSecrets: config.Config.ImagePullSecrets(),
Debug: config.Config.Tap.Debug,
}, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
if err != nil {
return
}
frontService = kubernetesProvider.BuildFrontService(config.Config.Tap.SelfNamespace)
persistentVolumeClaim, err = kubernetesProvider.BuildPersistentVolumeClaim()
if err != nil {
return
}
workerDaemonSet, err = kubernetesProvider.BuildWorkerDaemonSet(
docker.GetWorkerImage(),
kubernetes.WorkerDaemonSetName,
kubernetes.ServiceAccountName,
config.Config.Tap.Resources.Worker,
config.Config.ImagePullPolicy(),
config.Config.ImagePullSecrets(),
config.Config.Tap.ServiceMesh,
config.Config.Tap.Tls,
config.Config.Tap.Debug,
)
if err != nil {
return
}
return
}
func dumpManifests(objects map[string]interface{}) error {
folder := filepath.Join(".", "manifests")
err := fsUtils.RemoveFilesByExtension(folder, "yaml")
if err != nil {
return err
}
err = os.MkdirAll(folder, os.ModePerm)
if err != nil {
return err
}
// Sort by filenames
filenames := make([]string, 0)
for filename := range objects {
filenames = append(filenames, filename)
}
sort.Strings(filenames)
for _, filename := range filenames {
manifest, err := utils.PrettyYamlOmitEmpty(objects[filename])
if err != nil {
return err
}
path := filepath.Join(folder, filename)
err = os.WriteFile(path, []byte(manifestHeader+manifest), 0644)
if err != nil {
return err
}
log.Info().Msgf("Manifest generated: %s", path)
}
return nil
}
func printManifests(objects []interface{}) error {
for _, object := range objects {
manifest, err := utils.PrettyYamlOmitEmpty(object)
if err != nil {
return err
}
fmt.Println(manifestSeperator)
fmt.Println(manifest)
}
return nil
}

View File

@@ -2,7 +2,7 @@ package cmd
import (
"fmt"
"io/ioutil"
"io"
"net/http"
"os"
"time"
@@ -28,7 +28,7 @@ var proCmd = &cobra.Command{
}
const (
PRO_URL = "https://console.kubeshark.co"
PRO_URL = "https://console.kubeshark.co/cli"
PRO_PORT = 5252
)
@@ -40,19 +40,19 @@ func init() {
log.Debug().Err(err).Send()
}
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
proCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
func acquireLicense() {
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
log.Info().Str("url", PRO_URL).Msg("Opening in the browser:")
utils.OpenBrowser(PRO_URL)
@@ -61,17 +61,19 @@ func acquireLicense() {
}
func updateLicense(licenseKey string) {
log.Info().Str("key", licenseKey).Msg("Received license:")
config.Config.License = licenseKey
err := config.WriteConfig(&config.Config)
if err != nil {
log.Error().Err(err).Send()
}
connector.PostLicenseSingle(config.Config.License)
log.Info().Msg("Updated the license. Exiting.")
go func() {
connector.PostLicense(config.Config.License)
log.Info().Msg("Updated the license. Exiting.")
time.Sleep(2 * time.Second)
os.Exit(0)
}()
@@ -96,7 +98,7 @@ func runLicenseRecieverServer() {
})
ginApp.POST("/", func(c *gin.Context) {
data, err := ioutil.ReadAll(c.Request.Body)
data, err := io.ReadAll(c.Request.Body)
if err != nil {
log.Error().Err(err).Send()
c.AbortWithStatus(http.StatusBadRequest)
@@ -105,8 +107,6 @@ func runLicenseRecieverServer() {
licenseKey := string(data)
log.Info().Str("key", licenseKey).Msg("Received license:")
updateLicense(licenseKey)
})

View File

@@ -24,7 +24,8 @@ func init() {
log.Debug().Err(err).Send()
}
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
proxyCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
proxyCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}

View File

@@ -23,7 +23,7 @@ func runProxy(block bool, noBrowser bool) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.FrontServiceName)
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.Release.Namespace, kubernetes.FrontServiceName)
if err != nil {
log.Error().
Str("service", kubernetes.FrontServiceName).
@@ -42,7 +42,7 @@ func runProxy(block bool, noBrowser bool) {
return
}
exists, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.HubServiceName)
exists, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.Release.Namespace, kubernetes.HubServiceName)
if err != nil {
log.Error().
Str("service", kubernetes.HubServiceName).
@@ -63,12 +63,12 @@ func runProxy(block bool, noBrowser bool) {
var establishedProxy bool
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.HubServiceName).
Int("port", int(config.Config.Tap.Proxy.Hub.SrcPort)).
Int("port", int(config.Config.Tap.Proxy.Hub.Port)).
Msg("Found a running service.")
okToOpen("Hub", hubUrl, true)
@@ -79,8 +79,8 @@ func runProxy(block bool, noBrowser bool) {
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
connector := connect.NewConnector(hubUrl, connect.DefaultRetries, connect.DefaultTimeout)
@@ -93,12 +93,12 @@ func runProxy(block bool, noBrowser bool) {
okToOpen("Hub", hubUrl, true)
}
frontUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
frontUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
response, err = http.Get(fmt.Sprintf("%s/", frontUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.FrontServiceName).
Int("port", int(config.Config.Tap.Proxy.Front.SrcPort)).
Int("port", int(config.Config.Tap.Proxy.Front.Port)).
Msg("Found a running service.")
okToOpen("Kubeshark", frontUrl, noBrowser)
@@ -109,8 +109,8 @@ func runProxy(block bool, noBrowser bool) {
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
connector := connect.NewConnector(frontUrl, connect.DefaultRetries, connect.DefaultTimeout)

View File

@@ -34,8 +34,9 @@ func init() {
log.Debug().Err(err).Send()
}
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
scriptsCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}
func runScripts() {
@@ -44,14 +45,14 @@ func runScripts() {
return
}
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
watchScripts(true)
}

View File

@@ -47,17 +47,18 @@ func init() {
tapCmd.Flags().StringP(configStructs.DockerTagLabel, "t", defaultTapConfig.Docker.Tag, "The tag of the Docker images that are going to be pulled")
tapCmd.Flags().String(configStructs.DockerImagePullPolicy, defaultTapConfig.Docker.ImagePullPolicy, "ImagePullPolicy for the Docker images")
tapCmd.Flags().StringSlice(configStructs.DockerImagePullSecrets, defaultTapConfig.Docker.ImagePullSecrets, "ImagePullSecrets for the Docker images")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
tapCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
tapCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultTapConfig.Namespaces, "Namespaces selector")
tapCmd.Flags().StringP(configStructs.SelfNamespaceLabel, "s", defaultTapConfig.SelfNamespace, "Self-namespace of Kubeshark")
tapCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
tapCmd.Flags().Bool(configStructs.PersistentStorageLabel, defaultTapConfig.PersistentStorage, "Enable persistent storage (PersistentVolumeClaim)")
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit (per node)")
tapCmd.Flags().String(configStructs.StorageClassLabel, defaultTapConfig.StorageClass, "Override the default storage class of the PersistentVolumeClaim (per node)")
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
tapCmd.Flags().StringP(configStructs.PcapLabel, "p", defaultTapConfig.Pcap, fmt.Sprintf("Capture from a PCAP snapshot of %s (.tar.gz) using your Docker Daemon instead of Kubernetes", misc.Software))
tapCmd.Flags().StringP(configStructs.PcapLabel, "p", defaultTapConfig.Pcap, fmt.Sprintf("Capture from a PCAP snapshot of %s (.tar.gz) using your Docker Daemon instead of Kubernetes. TAR path from the file system or an S3 URI (object, folder or the bucket)", misc.Software))
tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS")
tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries")
tapCmd.Flags().Bool(configStructs.IgnoreTaintedLabel, defaultTapConfig.IgnoreTainted, "Ignore tainted pods while running Worker DaemonSet")
tapCmd.Flags().Bool(configStructs.DebugLabel, defaultTapConfig.Debug, "Enable the debug mode")
tapCmd.Flags().Bool(configStructs.IngressEnabledLabel, defaultTapConfig.Ingress.Enabled, "Enable Ingress")
}

View File

@@ -1,18 +1,30 @@
package cmd
import (
"archive/tar"
"bufio"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
"github.com/aws/aws-sdk-go-v2/aws"
awsConfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3"
s3Types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
@@ -61,6 +73,7 @@ func logPullingImage(image string, reader io.ReadCloser) {
}
func pullImages(ctx context.Context, cli *client.Client, imageFront string, imageHub string, imageWorker string) error {
log.Info().Msg("Pulling images...")
readerFront, err := cli.ImagePull(ctx, imageFront, types.ImagePullOptions{})
if err != nil {
return err
@@ -92,7 +105,7 @@ func cleanUpOldContainers(
nameHub string,
nameWorker string,
) error {
containers, err := cli.ContainerList(ctx, types.ContainerListOptions{})
containers, err := cli.ContainerList(ctx, types.ContainerListOptions{All: true})
if err != nil {
return err
}
@@ -141,10 +154,10 @@ func createAndStartContainers(
hostConfigFront := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Front.DstPort)): []nat.PortBinding{
nat.Port(fmt.Sprintf("%d/tcp", configStructs.ContainerPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.SrcPort),
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.Port),
},
},
},
@@ -156,7 +169,7 @@ func createAndStartContainers(
Env: []string{
"REACT_APP_DEFAULT_FILTER= ",
"REACT_APP_HUB_HOST= ",
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.SrcPort),
fmt.Sprintf("REACT_APP_HUB_PORT=:%d", config.Config.Tap.Proxy.Hub.Port),
},
}, hostConfigFront, nil, nil, nameFront)
if err != nil {
@@ -165,16 +178,16 @@ func createAndStartContainers(
hostConfigHub := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): []nat.PortBinding{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort),
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port),
},
},
},
}
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.DstPort)}
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrvPort)}
if config.DebugMode {
cmdHub = append(cmdHub, fmt.Sprintf("-%s", config.DebugFlag))
}
@@ -183,13 +196,13 @@ func createAndStartContainers(
Image: imageHub,
Cmd: cmdHub,
Tty: false,
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): {}},
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): {}},
}, hostConfigHub, nil, nil, nameHub)
if err != nil {
return
}
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.DstPort)}
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.SrvPort)}
if config.DebugMode {
cmdWorker = append(cmdWorker, fmt.Sprintf("-%s", config.DebugFlag))
}
@@ -270,7 +283,172 @@ func stopAndRemoveContainers(
return
}
func pcap(tarPath string) {
func downloadTarFromS3(s3Url string) (tarPath string, err error) {
u, err := url.Parse(s3Url)
if err != nil {
return
}
bucket := u.Host
key := u.Path[1:]
var cfg aws.Config
cfg, err = awsConfig.LoadDefaultConfig(context.TODO())
if err != nil {
return
}
client := s3.NewFromConfig(cfg)
var listObjectsOutput *s3.ListObjectsV2Output
listObjectsOutput, err = client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Prefix: aws.String(key),
})
if err != nil {
return
}
var file *os.File
file, err = os.CreateTemp(os.TempDir(), fmt.Sprintf("%s_*.%s", strings.TrimSuffix(filepath.Base(key), filepath.Ext(key)), filepath.Ext(key)))
if err != nil {
return
}
defer file.Close()
log.Info().Str("bucket", bucket).Str("key", key).Msg("Downloading from S3")
downloader := manager.NewDownloader(client)
_, err = downloader.Download(context.TODO(), file, &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
})
if err != nil {
log.Info().Err(err).Msg("S3 object is not found. Assuming URL is not a single object. Listing the objects in given folder or the bucket to download...")
var tempDirPath string
tempDirPath, err = os.MkdirTemp(os.TempDir(), "kubeshark_*")
if err != nil {
return
}
var wg sync.WaitGroup
for _, object := range listObjectsOutput.Contents {
wg.Add(1)
go func(object s3Types.Object) {
defer wg.Done()
objectKey := *object.Key
fullPath := filepath.Join(tempDirPath, objectKey)
err = os.MkdirAll(filepath.Dir(fullPath), os.ModePerm)
if err != nil {
return
}
var objectFile *os.File
objectFile, err = os.Create(fullPath)
if err != nil {
return
}
defer objectFile.Close()
log.Info().Str("bucket", bucket).Str("key", objectKey).Msg("Downloading from S3")
downloader := manager.NewDownloader(client)
_, err = downloader.Download(context.TODO(), objectFile, &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
})
if err != nil {
return
}
}(object)
}
wg.Wait()
tarPath, err = tarDirectory(tempDirPath)
return
}
tarPath = file.Name()
return
}
func tarDirectory(dirPath string) (string, error) {
tarPath := fmt.Sprintf("%s.tar.gz", dirPath)
var file *os.File
file, err := os.Create(tarPath)
if err != nil {
return "", err
}
defer file.Close()
gzipWriter := gzip.NewWriter(file)
defer gzipWriter.Close()
tarWriter := tar.NewWriter(gzipWriter)
defer tarWriter.Close()
walker := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return err
}
header := &tar.Header{
Name: path[len(dirPath)+1:],
Size: stat.Size(),
Mode: int64(stat.Mode()),
ModTime: stat.ModTime(),
}
err = tarWriter.WriteHeader(header)
if err != nil {
return err
}
_, err = io.Copy(tarWriter, file)
if err != nil {
return err
}
return nil
}
err = filepath.Walk(dirPath, walker)
if err != nil {
return "", err
}
return tarPath, nil
}
func pcap(tarPath string) error {
if strings.HasPrefix(tarPath, "s3://") {
var err error
tarPath, err = downloadTarFromS3(tarPath)
if err != nil {
log.Error().Err(err).Msg("Failed downloading from S3")
return err
}
}
log.Info().Str("tar-path", tarPath).Msg("Openning")
docker.SetRegistry(config.Config.Tap.Docker.Registry)
docker.SetTag(config.Config.Tap.Docker.Tag)
@@ -278,7 +456,7 @@ func pcap(tarPath string) {
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Error().Err(err).Send()
return
return err
}
defer cli.Close()
@@ -289,13 +467,13 @@ func pcap(tarPath string) {
err = pullImages(ctx, cli, imageFront, imageHub, imageWorker)
if err != nil {
log.Error().Err(err).Send()
return
return err
}
tarFile, err := os.Open(tarPath)
if err != nil {
log.Error().Err(err).Send()
return
return err
}
defer tarFile.Close()
tarReader := bufio.NewReader(tarFile)
@@ -310,7 +488,7 @@ func pcap(tarPath string) {
)
if err != nil {
log.Error().Err(err).Send()
return
return err
}
workerPod := &v1.Pod{
@@ -328,7 +506,7 @@ func pcap(tarPath string) {
},
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
connector.PostWorkerPodToHub(workerPod)
// License
@@ -337,10 +515,10 @@ func pcap(tarPath string) {
}
log.Info().
Str("url", kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)).
Str("url", kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)).
Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
url := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {
@@ -354,5 +532,8 @@ func pcap(tarPath string) {
err = stopAndRemoveContainers(ctx, cli, respFront, respHub, respWorker)
if err != nil {
log.Error().Err(err).Send()
return err
}
return nil
}

View File

@@ -2,21 +2,19 @@ package cmd
import (
"context"
"errors"
"fmt"
"os"
"regexp"
"sync"
"time"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes/helm"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/resources"
"github.com/kubeshark/kubeshark/utils"
core "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
@@ -28,9 +26,8 @@ import (
const cleanupTimeout = time.Minute
type tapState struct {
startTime time.Time
targetNamespaces []string
selfServiceAccountExists bool
startTime time.Time
targetNamespaces []string
}
var state tapState
@@ -52,15 +49,23 @@ func tap() {
docker.SetTag(config.Config.Tap.Docker.Tag)
log.Info().Str("registry", docker.GetRegistry()).Str("tag", docker.GetTag()).Msg("Using Docker:")
if config.Config.Tap.Pcap != "" {
pcap(config.Config.Tap.Pcap)
err := pcap(config.Config.Tap.Pcap)
if err != nil {
os.Exit(1)
}
return
}
if !config.Config.Tap.PersistentStorage {
config.Config.Tap.StorageLimit = "200Mi"
log.Warn().Msg("Storage limit cannot be modified while persistentstorage is set to false!")
}
log.Info().
Str("limit", config.Config.Tap.StorageLimit).
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP/UDP streams will be removed once the limit is reached.", misc.Software))
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
@@ -72,13 +77,6 @@ func tap() {
state.targetNamespaces = kubernetesProvider.GetNamespaces()
if config.Config.IsNsRestrictedMode() {
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.Tap.SelfNamespace) {
log.Error().Msg(fmt.Sprintf("%s can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", misc.Software, configStructs.NamespacesLabel, configStructs.SelfNamespaceLabel))
return
}
}
log.Info().Strs("namespaces", state.targetNamespaces).Msg("Targeting pods in:")
if err := printTargetedPodsPreview(ctx, kubernetesProvider, state.targetNamespaces); err != nil {
@@ -90,19 +88,17 @@ func tap() {
}
log.Info().Msg(fmt.Sprintf("Waiting for the creation of %s resources...", misc.Software))
if state.selfServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, config.Config.Tap.Resources.Hub, config.Config.ImagePullPolicy(), config.Config.ImagePullSecrets(), config.Config.Tap.Debug); err != nil {
var statusError *k8serrors.StatusError
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
log.Info().Msg(fmt.Sprintf("%s is already running in this namespace, change the `selfnamespace` configuration or run `%s clean` to remove the currently running %s instance.", misc.Software, misc.Program, misc.Software))
postHubStarted(ctx, kubernetesProvider, cancel, true)
log.Info().Msg("Updated Hub about the changes in the config. Exiting.")
printProxyCommandSuggestion()
} else {
defer resources.CleanUpSelfResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace)
log.Error().Err(errormessage.FormatError(err)).Msg("Error creating resources!")
}
return
rel, err := helm.NewHelm(
config.Config.Tap.Release.Repo,
config.Config.Tap.Release.Name,
config.Config.Tap.Release.Namespace,
).Install()
if err != nil {
log.Error().Err(err).Send()
os.Exit(1)
} else {
log.Info().Msgf("Installed the Helm release: %s", rel.Name)
}
defer finishTapExecution(kubernetesProvider)
@@ -113,7 +109,10 @@ func tap() {
// block until exit signal or error
utils.WaitForTermination(ctx, cancel)
printProxyCommandSuggestion()
if !config.Config.Tap.Ingress.Enabled {
printProxyCommandSuggestion()
}
}
func printProxyCommandSuggestion() {
@@ -123,7 +122,7 @@ func printProxyCommandSuggestion() {
}
func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, true)
finishSelfExecution(kubernetesProvider)
}
/*
@@ -154,9 +153,9 @@ func printNoPodsFoundSuggestion(targetNamespaces []string) {
}
func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.HubPodName))
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.HubPodName))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.SelfNamespace}, podWatchHelper)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.Release.Namespace}, podWatchHelper)
isPodReady := false
timeAfter := time.After(120 * time.Second)
@@ -223,7 +222,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
log.Error().
Str("pod", kubernetes.HubPodName).
Str("namespace", config.Config.Tap.SelfNamespace).
Str("namespace", config.Config.Tap.Release.Namespace).
Err(err).
Msg("Failed creating pod.")
cancel()
@@ -245,9 +244,9 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
}
func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.FrontPodName))
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.FrontPodName))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.SelfNamespace}, podWatchHelper)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.Release.Namespace}, podWatchHelper)
isPodReady := false
timeAfter := time.After(120 * time.Second)
@@ -312,10 +311,9 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
log.Error().
Str("pod", kubernetes.FrontPodName).
Str("namespace", config.Config.Tap.SelfNamespace).
Str("namespace", config.Config.Tap.Release.Namespace).
Err(err).
Msg("Failed creating pod.")
cancel()
case <-timeAfter:
if !isPodReady {
@@ -336,7 +334,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.HubPodName))
eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, podExactRegex, "pod")
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.Tap.SelfNamespace}, eventWatchHelper)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.Tap.Release.Namespace}, eventWatchHelper)
for {
select {
case wEvent, ok := <-eventChan:
@@ -409,29 +407,12 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
if !update {
// Create workers
err := kubernetes.CreateWorkers(
kubernetesProvider,
state.selfServiceAccountExists,
ctx,
config.Config.Tap.SelfNamespace,
config.Config.Tap.Resources.Worker,
config.Config.ImagePullPolicy(),
config.Config.ImagePullSecrets(),
config.Config.Tap.ServiceMesh,
config.Config.Tap.Tls,
config.Config.Tap.Debug,
)
if err != nil {
log.Error().Err(err).Send()
}
} else {
if update {
// Pod regex
connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces)
@@ -458,9 +439,9 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
connector.PostScriptDone()
}
if !update {
if !update && !config.Config.Tap.Ingress.Enabled {
// Hub proxy URL
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
url := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
}
@@ -476,12 +457,17 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
var url string
if config.Config.Tap.Ingress.Enabled {
url = fmt.Sprintf("http://%s", config.Config.Tap.Ingress.Host)
} else {
url = kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
}
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {

View File

@@ -56,11 +56,13 @@ func InitConfig(cmd *cobra.Command) error {
"console",
"pro",
"manifests",
"license",
}, cmd.Use) {
go version.CheckNewerVersion()
}
Config = CreateDefaultConfig()
Config.Tap.Debug = DebugMode
cmdName = cmd.Name()
if utils.Contains([]string{
"clean",
@@ -79,6 +81,7 @@ func InitConfig(cmd *cobra.Command) error {
ConfigFilePath = path.Join(misc.GetDotFolderPath(), "config.yaml")
if err := loadConfigFile(&Config, utils.Contains([]string{
"manifests",
"license",
}, cmd.Use)); err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("invalid config, %w\n"+

View File

@@ -5,7 +5,6 @@ import (
"path/filepath"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/misc"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/util/homedir"
)
@@ -15,28 +14,42 @@ const (
)
func CreateDefaultConfig() ConfigStruct {
return ConfigStruct{}
return ConfigStruct{
Tap: configStructs.TapConfig{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "kubernetes.io/os",
Operator: v1.NodeSelectorOpIn,
Values: []string{"linux"},
},
},
},
},
},
}
}
type KubeConfig struct {
ConfigPathStr string `yaml:"configpath"`
Context string `yaml:"context"`
ConfigPathStr string `yaml:"configpath" json:"configpath"`
Context string `yaml:"context" json:"context"`
}
type ManifestsConfig struct {
Dump bool `yaml:"dump"`
Dump bool `yaml:"dump" json:"dump"`
}
type ConfigStruct struct {
Tap configStructs.TapConfig `yaml:"tap"`
Logs configStructs.LogsConfig `yaml:"logs"`
Config configStructs.ConfigConfig `yaml:"config,omitempty"`
Kube KubeConfig `yaml:"kube"`
DumpLogs bool `yaml:"dumplogs" default:"false"`
HeadlessMode bool `yaml:"headless" default:"false"`
License string `yaml:"license" default:""`
Scripting configStructs.ScriptingConfig `yaml:"scripting"`
Manifests ManifestsConfig `yaml:"manifests,omitempty"`
Tap configStructs.TapConfig `yaml:"tap" json:"tap"`
Logs configStructs.LogsConfig `yaml:"logs" json:"logs"`
Config configStructs.ConfigConfig `yaml:"config,omitempty" json:"config,omitempty"`
Kube KubeConfig `yaml:"kube" json:"kube"`
DumpLogs bool `yaml:"dumplogs" json:"dumplogs" default:"false"`
HeadlessMode bool `yaml:"headless" json:"headless" default:"false"`
License string `yaml:"license" json:"license" default:""`
Scripting configStructs.ScriptingConfig `yaml:"scripting" json:"scripting"`
Manifests ManifestsConfig `yaml:"manifests,omitempty" json:"manifests,omitempty"`
}
func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy {
@@ -52,10 +65,6 @@ func (config *ConfigStruct) ImagePullSecrets() []v1.LocalObjectReference {
return ref
}
func (config *ConfigStruct) IsNsRestrictedMode() bool {
return config.Tap.SelfNamespace != misc.Program // Notice "kubeshark" string must match the default SelfNamespace
}
func (config *ConfigStruct) KubeConfigPath() string {
if config.Kube.ConfigPathStr != "" {
return config.Kube.ConfigPathStr

View File

@@ -5,5 +5,5 @@ const (
)
type ConfigConfig struct {
Regenerate bool `yaml:"regenerate,omitempty" default:"false" readonly:""`
Regenerate bool `yaml:"regenerate,omitempty" json:"regenerate,omitempty" default:"false" readonly:""`
}

View File

@@ -13,7 +13,7 @@ const (
)
type LogsConfig struct {
FileStr string `yaml:"file"`
FileStr string `yaml:"file" json:"file"`
}
func (config *LogsConfig) Validate() error {

View File

@@ -2,7 +2,7 @@ package configStructs
import (
"io/fs"
"io/ioutil"
"os"
"path/filepath"
"github.com/kubeshark/kubeshark/misc"
@@ -10,9 +10,9 @@ import (
)
type ScriptingConfig struct {
Env map[string]interface{} `yaml:"env"`
Source string `yaml:"source" default:""`
WatchScripts bool `yaml:"watchScripts" default:"true"`
Env map[string]interface{} `yaml:"env" json:"env"`
Source string `yaml:"source" json:"source" default:""`
WatchScripts bool `yaml:"watchscripts" json:"watchscripts" default:"true"`
}
func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error) {
@@ -20,8 +20,8 @@ func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error)
return
}
var files []fs.FileInfo
files, err = ioutil.ReadDir(config.Source)
var files []fs.DirEntry
files, err = os.ReadDir(config.Source)
if err != nil {
return
}

View File

@@ -5,6 +5,7 @@ import (
"regexp"
v1 "k8s.io/api/core/v1"
networking "k8s.io/api/networking/v1"
)
const (
@@ -16,84 +17,112 @@ const (
ProxyHubPortLabel = "proxy-hub-port"
ProxyHostLabel = "proxy-host"
NamespacesLabel = "namespaces"
SelfNamespaceLabel = "selfnamespace"
ReleaseNamespaceLabel = "release-namespace"
PersistentStorageLabel = "persistentstorage"
StorageLimitLabel = "storagelimit"
StorageClassLabel = "storageclass"
DryRunLabel = "dryrun"
PcapLabel = "pcap"
ServiceMeshLabel = "servicemesh"
TlsLabel = "tls"
IgnoreTaintedLabel = "ignoreTainted"
IgnoreTaintedLabel = "ignoretainted"
IngressEnabledLabel = "ingress-enabled"
DebugLabel = "debug"
ContainerPort = 80
ContainerPortStr = "80"
)
type ResourceLimits struct {
CPU string `yaml:"cpu" default:"750m"`
Memory string `yaml:"memory" default:"1Gi"`
CPU string `yaml:"cpu" json:"cpu" default:"750m"`
Memory string `yaml:"memory" json:"memory" default:"1Gi"`
}
type ResourceRequests struct {
CPU string `yaml:"cpu" default:"50m"`
Memory string `yaml:"memory" default:"50Mi"`
CPU string `yaml:"cpu" json:"cpu" default:"50m"`
Memory string `yaml:"memory" json:"memory" default:"50Mi"`
}
type ResourceRequirements struct {
Limits ResourceLimits `json:"limits"`
Requests ResourceRequests `json:"requests"`
Limits ResourceLimits `yaml:"limits" json:"limits"`
Requests ResourceRequests `yaml:"requests" json:"requests"`
}
type WorkerConfig struct {
SrcPort uint16 `yaml:"port" default:"8897"`
DstPort uint16 `yaml:"srvport" default:"8897"`
SrvPort uint16 `yaml:"srvport" json:"srvport" default:"8897"`
}
type HubConfig struct {
SrcPort uint16 `yaml:"port" default:"8898"`
DstPort uint16 `yaml:"srvport" default:"80"`
Port uint16 `yaml:"port" json:"port" default:"8898"`
SrvPort uint16 `yaml:"srvport" json:"srvport" default:"8898"`
}
type FrontConfig struct {
SrcPort uint16 `yaml:"port" default:"8899"`
DstPort uint16 `yaml:"srvport" default:"80"`
Port uint16 `yaml:"port" json:"port" default:"8899"`
}
type ProxyConfig struct {
Worker WorkerConfig `yaml:"worker"`
Hub HubConfig `yaml:"hub"`
Front FrontConfig `yaml:"front"`
Host string `yaml:"host" default:"127.0.0.1"`
Worker WorkerConfig `yaml:"worker" json:"worker"`
Hub HubConfig `yaml:"hub" json:"hub"`
Front FrontConfig `yaml:"front" json:"front"`
Host string `yaml:"host" json:"host" default:"127.0.0.1"`
}
type DockerConfig struct {
Registry string `yaml:"registry" default:"docker.io/kubeshark"`
Tag string `yaml:"tag" default:"latest"`
ImagePullPolicy string `yaml:"imagepullpolicy" default:"Always"`
ImagePullSecrets []string `yaml:"imagepullsecrets"`
Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"`
Tag string `yaml:"tag" json:"tag" default:"latest"`
ImagePullPolicy string `yaml:"imagepullpolicy" json:"imagepullpolicy" default:"Always"`
ImagePullSecrets []string `yaml:"imagepullsecrets" json:"imagepullsecrets"`
}
type ResourcesConfig struct {
Worker ResourceRequirements `yaml:"worker"`
Hub ResourceRequirements `yaml:"hub"`
Worker ResourceRequirements `yaml:"worker" json:"worker"`
Hub ResourceRequirements `yaml:"hub" json:"hub"`
}
type AuthConfig struct {
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
ApprovedEmails []string `yaml:"approvedemails" json:"approvedemails" default:"[]"`
ApprovedDomains []string `yaml:"approveddomains" json:"approveddomains" default:"[]"`
}
type IngressConfig struct {
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
ClassName string `yaml:"classname" json:"classname" default:"kubeshark-ingress-class"`
Controller string `yaml:"controller" json:"controller" default:"k8s.io/ingress-nginx"`
Host string `yaml:"host" json:"host" default:"ks.svc.cluster.local"`
TLS []networking.IngressTLS `yaml:"tls" json:"tls"`
CertManager string `yaml:"certmanager" json:"certmanager" default:"letsencrypt-prod"`
}
type ReleaseConfig struct {
Repo string `yaml:"repo" json:"repo" default:"https://helm.kubeshark.co"`
Name string `yaml:"name" json:"name" default:"kubeshark"`
Namespace string `yaml:"namespace" json:"namespace" default:"default"`
}
type TapConfig struct {
Docker DockerConfig `yaml:"docker"`
Proxy ProxyConfig `yaml:"proxy"`
PodRegexStr string `yaml:"regex" default:".*"`
Namespaces []string `yaml:"namespaces"`
SelfNamespace string `yaml:"selfnamespace" default:"kubeshark"`
StorageLimit string `yaml:"storagelimit" default:"200Mi"`
StorageClass string `yaml:"storageclass" default:"standard"`
DryRun bool `yaml:"dryrun" default:"false"`
Pcap string `yaml:"pcap" default:""`
Resources ResourcesConfig `yaml:"resources"`
ServiceMesh bool `yaml:"servicemesh" default:"true"`
Tls bool `yaml:"tls" default:"true"`
PacketCapture string `yaml:"packetcapture" default:"libpcap"`
IgnoreTainted bool `yaml:"ignoreTainted" default:"false"`
ResourceLabels map[string]string `yaml:"resourceLabels" default:"{}"`
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeSelectorTerms" default:"[]"`
Debug bool `yaml:"debug" default:"false"`
Docker DockerConfig `yaml:"docker" json:"docker"`
Proxy ProxyConfig `yaml:"proxy" json:"proxy"`
PodRegexStr string `yaml:"regex" json:"regex" default:".*"`
Namespaces []string `yaml:"namespaces" json:"namespaces" default:"[]"`
Release ReleaseConfig `yaml:"release" json:"release"`
PersistentStorage bool `yaml:"persistentstorage" json:"persistentstorage" default:"false"`
StorageLimit string `yaml:"storagelimit" json:"storagelimit" default:"200Mi"`
StorageClass string `yaml:"storageclass" json:"storageclass" default:"standard"`
DryRun bool `yaml:"dryrun" json:"dryrun" default:"false"`
Pcap string `yaml:"pcap" json:"pcap" default:""`
Resources ResourcesConfig `yaml:"resources" json:"resources"`
ServiceMesh bool `yaml:"servicemesh" json:"servicemesh" default:"true"`
Tls bool `yaml:"tls" json:"tls" default:"true"`
PacketCapture string `yaml:"packetcapture" json:"packetcapture" default:"libpcap"`
IgnoreTainted bool `yaml:"ignoretainted" json:"ignoretainted" default:"false"`
Labels map[string]string `yaml:"labels" json:"labels" default:"{}"`
Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"`
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeselectorterms" json:"nodeselectorterms" default:"[]"`
Auth AuthConfig `yaml:"auth" json:"auth"`
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
IPv6 bool `yaml:"ipv6" json:"ipv6" default:"true"`
Debug bool `yaml:"debug" json:"debug" default:"false"`
}
func (config *TapConfig) PodRegex() *regexp.Regexp {

View File

@@ -22,9 +22,9 @@ func FormatError(err error) error {
"in the config file or setting the targeted namespace with --%s %s=<NAMEPSACE>",
err,
misc.Software,
configStructs.SelfNamespaceLabel,
configStructs.ReleaseNamespaceLabel,
config.SetCommandName,
configStructs.SelfNamespaceLabel)
configStructs.ReleaseNamespaceLabel)
} else if syntaxError, isSyntaxError := asRegexSyntaxError(err); isSyntaxError {
errorNew = fmt.Errorf("regex %s is invalid: %w", syntaxError.Expr, err)
} else {

195
go.mod
View File

@@ -1,118 +1,185 @@
module github.com/kubeshark/kubeshark
go 1.17
go 1.19
require (
github.com/aws/aws-sdk-go-v2 v1.18.1
github.com/aws/aws-sdk-go-v2/config v1.18.27
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.70
github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0
github.com/creasty/defaults v1.5.2
github.com/docker/docker v20.10.24+incompatible
github.com/docker/go-connections v0.4.0
github.com/fsnotify/fsnotify v1.5.1
github.com/gin-gonic/gin v1.7.7
github.com/fsnotify/fsnotify v1.6.0
github.com/gin-gonic/gin v1.9.1
github.com/google/go-github/v37 v37.0.0
github.com/gorilla/websocket v1.4.2
github.com/ohler55/ojg v1.14.5
github.com/otiai10/copy v1.10.0
github.com/pkg/errors v0.9.1
github.com/robertkrimen/otto v0.2.1
github.com/rs/zerolog v1.28.0
github.com/spf13/cobra v1.3.0
github.com/spf13/cobra v1.6.1
github.com/spf13/pflag v1.0.5
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.23.3
k8s.io/apimachinery v0.23.3
k8s.io/client-go v0.23.3
k8s.io/kubectl v0.23.3
helm.sh/helm/v3 v3.12.0
k8s.io/api v0.27.1
k8s.io/apimachinery v0.27.1
k8s.io/client-go v0.27.1
k8s.io/kubectl v0.27.1
)
require (
cloud.google.com/go/compute v1.2.0 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/BurntSushi/toml v1.2.1 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.2.0 // indirect
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
github.com/Masterminds/squirrel v1.5.3 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.26 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 // indirect
github.com/aws/smithy-go v1.13.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bytedance/sonic v1.9.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/containerd/containerd v1.7.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.0+incompatible // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/docker/cli v20.10.21+incompatible // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.0.5 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/swag v0.21.1 // indirect
github.com/go-playground/locales v0.13.0 // indirect
github.com/go-playground/universal-translator v0.17.0 // indirect
github.com/go-playground/validator/v10 v10.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.1 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/go-cmp v0.5.7 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/huandu/xstrings v1.4.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jmoiron/sqlx v1.3.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/leodido/go-urn v1.2.0 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/russross/blackfriday v1.6.0 // indirect
github.com/sirupsen/logrus v1.7.0 // indirect
github.com/stretchr/testify v1.8.1 // indirect
github.com/ugorji/go/codec v1.1.7 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/rubenv/sql-migrate v1.3.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
go.opentelemetry.io/otel v1.14.0 // indirect
go.opentelemetry.io/otel/trace v1.14.0 // indirect
go.starlark.net v0.0.0-20220203230714-bb14e151c28f // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
golang.org/x/tools v0.1.12 // indirect
golang.org/x/arch v0.3.0 // indirect
golang.org/x/crypto v0.9.0 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/oauth2 v0.4.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/term v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.7.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.27.1 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
google.golang.org/grpc v1.53.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/sourcemap.v1 v1.0.5 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/cli-runtime v0.23.3 // indirect
k8s.io/component-base v0.23.3 // indirect
k8s.io/klog/v2 v2.40.1 // indirect
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect
k8s.io/utils v0.0.0-20220127004650-9b3446523e65 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/kustomize/api v0.11.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
k8s.io/apiextensions-apiserver v0.27.1 // indirect
k8s.io/apiserver v0.27.1 // indirect
k8s.io/cli-runtime v0.27.1 // indirect
k8s.io/component-base v0.27.1 // indirect
k8s.io/klog/v2 v2.90.1 // indirect
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
oras.land/oras-go v1.2.2 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/api v0.13.2 // indirect
sigs.k8s.io/kustomize/kyaml v0.14.1 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

816
go.sum

File diff suppressed because it is too large Load Diff

6
helm-chart/Chart.lock Normal file
View File

@@ -0,0 +1,6 @@
dependencies:
- name: cert-manager
repository: https://charts.jetstack.io
version: v1.11.0
digest: sha256:a1643d3bb03dc0d5043d123d72f2d3bde692493784666b00d08fbea3ec5cb2c3
generated: "2023-08-12T02:52:53.562234195+03:00"

View File

@@ -1,5 +1,5 @@
apiVersion: v2
appVersion: "40.0"
appVersion: "50.0"
description: The API Traffic Analyzer for Kubernetes
home: https://kubeshark.co
keywords:
@@ -22,4 +22,5 @@ name: kubeshark
sources:
- https://github.com/kubeshark/kubeshark/tree/master/helm-chart
type: application
version: "40.0"
version: "50.0"
icon: https://raw.githubusercontent.com/kubeshark/assets/master/logo/vector/logo.svg

View File

@@ -51,3 +51,34 @@ kubectl port-forward -n kubeshark service/kubeshark-front 8899:80
```
Visit [localhost:8899](http://localhost:8899)
## Installing with Ingress Enabled
```shell
helm install kubeshark kubeshark/kubeshark \
--set tap.ingress.enabled=true \
--set tap.ingress.host=ks.svc.cluster.local \
--set "tap.ingress.approveddomains={gmail.com}" \
--set license=LICENSE_GOES_HERE
```
You can get your license [here](https://console.kubeshark.co/).
## Installing with Persistent Storage Enabled
```shell
helm install kubeshark kubeshark/kubeshark \
--set tap.persistentstorage=true \
--set license=LICENSE_GOES_HERE
```
You can get your license [here](https://console.kubeshark.co/).
## Disabling IPV6
Not all have IPV6 enabled, hence this has to be disabled as follows:
```shell
helm install kubeshark kubeshark/kubeshark \
--set tap.ipv6=false
```

View File

@@ -1,12 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: '{{ .Values.tap.selfnamespace }}'
spec: {}
status: {}

View File

@@ -1,12 +1,12 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-service-account
namespace: '{{ .Values.tap.selfnamespace }}'
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: {{ include "kubeshark.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}

View File

@@ -1,15 +1,15 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-cluster-role
namespace: '{{ .Values.tap.selfnamespace }}'
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""

View File

@@ -1,20 +1,20 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-cluster-role-binding
namespace: '{{ .Values.tap.selfnamespace }}'
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeshark-cluster-role
subjects:
- kind: ServiceAccount
name: kubeshark-service-account
namespace: '{{ .Values.tap.selfnamespace }}'
name: {{ include "kubeshark.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,54 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kubeshark.fullname" . }}-hub
namespace: {{ .Release.Namespace }}
labels:
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
spec:
replicas: 1 # Set the desired number of replicas
selector:
matchLabels:
app.kubeshark.co/app: hub
template:
metadata:
labels:
app.kubeshark.co/app: hub
sidecar.istio.io/inject: "false"
spec:
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
containers:
- name: kubeshark-hub
command:
- ./hub
{{ .Values.tap.debug | ternary "- -debug" "" }}
envFrom:
- configMapRef:
name: kubeshark-config-map
- secretRef:
name: kubeshark-secret
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}'
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
readinessProbe:
periodSeconds: 1
initialDelaySeconds: 3
tcpSocket:
port: 80
livenessProbe:
periodSeconds: 1
initialDelaySeconds: 3
tcpSocket:
port: 80
resources:
limits:
cpu: {{ .Values.tap.resources.hub.limits.cpu }}
memory: {{ .Values.tap.resources.hub.limits.memory }}
requests:
cpu: {{ .Values.tap.resources.hub.requests.cpu }}
memory: {{ .Values.tap.resources.hub.requests.memory }}

View File

@@ -1,46 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-hub
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-hub
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
containers:
- command:
- '{{ .Values.tap.debug | ternary "./hub -debug" "./hub" }}'
env:
- name: POD_REGEX
value: '{{ .Values.tap.regex }}'
- name: NAMESPACES
value: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}'
- name: LICENSE
value: '{{ .Values.license }}'
- name: SCRIPTING_ENV
value: '{}'
- name: SCRIPTING_SCRIPTS
value: '[]'
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
name: kubeshark-hub
resources:
limits:
cpu: '{{ .Values.tap.resources.hub.limits.cpu }}'
memory: '{{ .Values.tap.resources.hub.limits.memory }}'
requests:
cpu: '{{ .Values.tap.resources.hub.requests.cpu }}'
memory: '{{ .Values.tap.resources.hub.requests.memory }}'
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}

View File

@@ -1,21 +1,23 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-hub
namespace: '{{ .Values.tap.selfnamespace }}'
namespace: {{ .Release.Namespace }}
spec:
ports:
- name: kubeshark-hub
port: 80
targetPort: 80
selector:
app: kubeshark-hub
app.kubeshark.co/app: hub
type: ClusterIP
status:
loadBalancer: {}

View File

@@ -0,0 +1,64 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kubeshark.fullname" . }}-front
namespace: {{ .Release.Namespace }}
labels:
app.kubeshark.co/app: front
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
spec:
replicas: 1 # Set the desired number of replicas
selector:
matchLabels:
app.kubeshark.co/app: front
template:
metadata:
labels:
app.kubeshark.co/app: front
spec:
containers:
- env:
- name: REACT_APP_DEFAULT_FILTER
value: ' '
- name: REACT_APP_HUB_HOST
value: ' '
- name: REACT_APP_HUB_PORT
value: '{{ .Values.tap.ingress.enabled | ternary "/api" (print ":" .Values.tap.proxy.hub.port) }}'
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}'
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
name: kubeshark-front
livenessProbe:
failureThreshold: 3
periodSeconds: 1
successThreshold: 1
tcpSocket:
port: 80
readinessProbe:
failureThreshold: 3
periodSeconds: 1
successThreshold: 1
tcpSocket:
port: 80
timeoutSeconds: 1
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
readOnly: true
volumes:
- name: nginx-config
configMap:
name: kubeshark-nginx-config-map
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}

View File

@@ -1,47 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-front
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-front
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
containers:
- env:
- name: REACT_APP_DEFAULT_FILTER
value: ' '
- name: REACT_APP_HUB_HOST
value: ' '
- name: REACT_APP_HUB_PORT
value: "8898"
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
name: kubeshark-front
readinessProbe:
failureThreshold: 3
periodSeconds: 1
successThreshold: 1
tcpSocket:
port: 80
timeoutSeconds: 1
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}

View File

@@ -1,21 +1,22 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-front
namespace: '{{ .Values.tap.selfnamespace }}'
namespace: {{ .Release.Namespace }}
spec:
ports:
- name: kubeshark-front
port: 80
targetPort: 80
selector:
app: kubeshark-front
app.kubeshark.co/app: front
type: ClusterIP
status:
loadBalancer: {}

View File

@@ -1,20 +1,22 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
{{- if .Values.tap.persistentstorage }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-persistent-volume-claim
namespace: '{{ .Values.tap.selfnamespace }}'
namespace: {{ .Release.Namespace }}
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: '{{ .Values.tap.storagelimit }}'
storageClassName: '{{ .Values.tap.storageclass }}'
storage: {{ .Values.tap.storagelimit }}
storageClassName: {{ .Values.tap.storageclass }}
status: {}
{{- end }}

View File

@@ -1,54 +1,64 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
app.kubeshark.co/app: worker
sidecar.istio.io/inject: "false"
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-worker-daemon-set
namespace: '{{ .Values.tap.selfnamespace }}'
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
app.kubeshark.co/app: worker
{{- include "kubeshark.labels" . | nindent 6 }}
template:
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
app.kubeshark.co/app: worker
{{- include "kubeshark.labels" . | nindent 8 }}
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
containers:
- command:
- '{{ .Values.tap.debug | ternary "./worker -debug" "./worker" }}'
- ./worker
- -i
- any
- -port
- "8897"
- -packet-capture
- '{{ .Values.tap.packetcapture }}'
- '{{ .Values.tap.proxy.worker.srvport }}'
- -servicemesh
- -tls
- -procfs
- /hostproc
{{ .Values.tap.debug | ternary "- -debug" "" }}
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
name: kubeshark-worker-daemon-set
envFrom:
- secretRef:
name: kubeshark-secret
{{- if .Values.tap.debug }}
env:
- name: PROFILING_ENABLED
value: "true"
- name: PROFILING_DUMP_PATH
value: "pprof"
- name: PROFILING_INTERVAL_SECONDS
value: "60"
{{- end }}
resources:
limits:
cpu: '{{ .Values.tap.resources.worker.limits.cpu }}'
memory: '{{ .Values.tap.resources.worker.limits.memory }}'
cpu: {{ .Values.tap.resources.worker.limits.cpu }}
memory: {{ .Values.tap.resources.worker.limits.memory }}
requests:
cpu: '{{ .Values.tap.resources.worker.requests.cpu }}'
memory: '{{ .Values.tap.resources.worker.requests.memory }}'
cpu: {{ .Values.tap.resources.worker.requests.cpu }}
memory: {{ .Values.tap.resources.worker.requests.memory }}
securityContext:
capabilities:
add:
@@ -58,8 +68,19 @@ spec:
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
- SYS_MODULE
drop:
- ALL
readinessProbe:
periodSeconds: 1
initialDelaySeconds: 1
tcpSocket:
port: {{ .Values.tap.proxy.worker.srvport }}
livenessProbe:
periodSeconds: 1
initialDelaySeconds: 1
tcpSocket:
port: {{ .Values.tap.proxy.worker.srvport }}
volumeMounts:
- mountPath: /hostproc
name: proc
@@ -67,17 +88,28 @@ spec:
- mountPath: /sys
name: sys
readOnly: true
{{- if .Values.tap.persistentstorage }}
- mountPath: /app/data
name: kubeshark-persistent-volume
{{- end }}
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
{{- if not .Values.tap.ignoretainted }}
- effect: NoSchedule
operator: Exists
{{- end }}
{{- if gt (len .Values.tap.nodeselectorterms) 0}}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
{{- toYaml .Values.tap.nodeselectorterms | nindent 12 }}
{{- end }}
volumes:
- hostPath:
path: /proc
@@ -85,6 +117,8 @@ spec:
- hostPath:
path: /sys
name: sys
{{- if .Values.tap.persistentstorage }}
- name: kubeshark-persistent-volume
persistentVolumeClaim:
claimName: kubeshark-persistent-volume-claim
{{- end }}

View File

@@ -0,0 +1,16 @@
---
{{- if .Values.tap.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-ingress-class
namespace: {{ .Release.Namespace }}
spec:
controller: {{ .Values.tap.ingress.controller }}
{{- end }}

View File

@@ -0,0 +1,42 @@
---
{{- if .Values.tap.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
certmanager.k8s.io/cluster-issuer: {{ .Values.tap.ingress.certmanager }}
nginx.ingress.kubernetes.io/rewrite-target: /$2
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
name: kubeshark-ingress
namespace: {{ .Release.Namespace }}
spec:
ingressClassName: {{ .Values.tap.ingress.classname }}
rules:
- host: {{ .Values.tap.ingress.host }}
http:
paths:
- backend:
service:
name: kubeshark-hub
port:
number: 80
path: /api(/|$)(.*)
pathType: Prefix
- backend:
service:
name: kubeshark-front
port:
number: 80
path: /()(.*)
pathType: Prefix
tls:
{{- if gt (len .Values.tap.ingress.tls) 0}}
{{- toYaml .Values.tap.ingress.tls | nindent 2 }}
{{- end }}
status:
loadBalancer: {}
{{- end }}

View File

@@ -0,0 +1,28 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeshark-nginx-config-map
namespace: {{ .Release.Namespace }}
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
data:
default.conf: |
server {
listen 80;
{{- if .Values.tap.ipv6 }}
listen [::]:80;
{{- end }}
add_header Cache-Control no-cache;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
expires -1;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -0,0 +1,16 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: kubeshark-config-map
namespace: {{ .Release.Namespace }}
labels:
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 4 }}
data:
POD_REGEX: '{{ .Values.tap.regex }}'
NAMESPACES: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}'
SCRIPTING_ENV: '{{ .Values.scripting.env | toJson }}'
SCRIPTING_SCRIPTS: '[]'
AUTH_ENABLED: '{{ .Values.tap.auth.enabled | ternary "true" "" }}'
AUTH_APPROVED_EMAILS: '{{ gt (len .Values.tap.auth.approvedemails) 0 | ternary (join "," .Values.tap.auth.approvedemails) "" }}'
AUTH_APPROVED_DOMAINS: '{{ gt (len .Values.tap.auth.approveddomains) 0 | ternary (join "," .Values.tap.auth.approveddomains) "" }}'

View File

@@ -0,0 +1,10 @@
kind: Secret
apiVersion: v1
metadata:
name: kubeshark-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 4 }}
stringData:
LICENSE: '{{ .Values.license }}'

View File

@@ -0,0 +1,27 @@
Thank you for installing {{ title .Chart.Name }}.
Your deployment has been successful. The release is named {{ .Release.Name }} and it has been deployed in the {{ .Release.Namespace }} namespace.
{{- if .Values.tap.ingress.enabled }}
{{ if not .Values.license -}}
warning:
> Ingress option enabled but license not set. The application should not work as expected.
> Get a license at https://console.kubeshark.co/
{{- else }}
You can now access the application through the following URL:
http{{ if .Values.tap.ingress.tls }}s{{ end }}://{{ .Values.tap.ingress.host }}
{{- end -}}
{{- else }}
To access the application, follow these steps:
1. Perform port forwarding with the following commands:
kubectl port-forward -n {{ .Release.Namespace }} service/kubeshark-hub 8898:80 & \
kubectl port-forward -n {{ .Release.Namespace }} service/kubeshark-front 8899:80
2. Once port forwarding is done, you can access the application by visiting the following URL in your web browser:
http://0.0.0.0:8899
{{ end }}

View File

@@ -0,0 +1,68 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "kubeshark.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kubeshark.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kubeshark.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kubeshark.labels" -}}
helm.sh/chart: {{ include "kubeshark.chart" . }}
{{ include "kubeshark.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.additionalLabels }}
{{ toYaml . }}
{{- end }}
{{- if .Values.tap.labels }}
{{ toYaml .Values.tap.labels }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kubeshark.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kubeshark.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "kubeshark.serviceAccountName" -}}
{{- if and .Values.serviceAccount .Values.serviceAccount.create }}
{{- default (include "kubeshark.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- printf "%s-service-account" .Release.Name }}
{{- end }}
{{- end }}

View File

@@ -1,36 +1,74 @@
config: {}
dumplogs: false
headless: false
kube:
configpath: ""
context: ""
license: ""
logs:
file: ""
manifests:
dump: false
scripting:
env: null
source: ""
watchscripts: true
tap:
annotations: {}
auth:
approveddomains: []
approvedemails: []
enabled: false
debug: false
docker:
imagepullpolicy: Always
imagepullsecrets: null
registry: docker.io/kubeshark
tag: latest
imagepullpolicy: Always
imagepullsecrets: []
dryrun: false
ignoretainted: false
ingress:
certmanager: letsencrypt-prod
classname: kubeshark-ingress-class
controller: k8s.io/ingress-nginx
enabled: false
host: ks.svc.cluster.local
tls: null
ipv6: true
labels: {}
namespaces: []
nodeselectorterms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
packetcapture: libpcap
pcap: ""
persistentstorage: false
proxy:
worker:
port: 8897
srvport: 8897
hub:
port: 8898
srvport: 80
front:
port: 8899
srvport: 80
host: 127.0.0.1
regex: .*
namespaces: []
selfnamespace: kubeshark
storagelimit: 200Mi
storageclass: standard
dryrun: false
pcap: ""
resources:
hub:
port: 8898
srvport: 8898
worker:
srvport: 8897
regex: .*
release:
name: kubeshark
namespace: default
repo: https://helm.kubeshark.co
resources:
hub:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
hub:
worker:
limits:
cpu: 750m
memory: 1Gi
@@ -38,21 +76,6 @@ tap:
cpu: 50m
memory: 50Mi
servicemesh: true
storageclass: standard
storagelimit: 200Mi
tls: true
packetcapture: libpcap
ignoreTainted: false
resourceLabels: {}
nodeSelectorTerms: []
debug: false
logs:
file: ""
kube:
configpath: ""
context: ""
dumplogs: false
headless: false
license: ""
scripting:
env: {}
source: ""
watchScripts: true

View File

@@ -9,6 +9,7 @@ import (
"net/url"
"time"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
@@ -73,7 +74,7 @@ func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postWorkerUrl, "application/json", bytes.NewBuffer(podMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postWorkerUrl, "application/json", bytes.NewBuffer(podMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
@@ -106,7 +107,7 @@ func (connector *Connector) PostRegexToHub(regex string, namespaces []string) {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postRegexUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postRegexUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
@@ -137,7 +138,7 @@ func (connector *Connector) PostLicense(license string) {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
@@ -151,6 +152,26 @@ func (connector *Connector) PostLicense(license string) {
}
}
func (connector *Connector) PostLicenseSingle(license string) {
postLicenseUrl := fmt.Sprintf("%s/license", connector.url)
payload := postLicenseRequest{
License: license,
}
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the payload:")
} else {
var resp *http.Response
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
log.Warn().Err(err).Msg("Failed sending the license to Hub.")
} else {
log.Debug().Str("license", license).Msg("Reported license to Hub:")
return
}
}
}
func (connector *Connector) PostEnv(env map[string]interface{}) {
if len(env) == 0 {
return
@@ -164,7 +185,7 @@ func (connector *Connector) PostEnv(env map[string]interface{}) {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postEnvUrl, "application/json", bytes.NewBuffer(envMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postEnvUrl, "application/json", bytes.NewBuffer(envMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
@@ -188,7 +209,7 @@ func (connector *Connector) PostScript(script *misc.Script) (index int64, err er
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postScriptUrl, "application/json", bytes.NewBuffer(scriptMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postScriptUrl, "application/json", bytes.NewBuffer(scriptMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
@@ -237,6 +258,7 @@ func (connector *Connector) PutScript(script *misc.Script, index int64) (err err
return
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("License-Key", config.Config.License)
var resp *http.Response
resp, err = client.Do(req)
@@ -275,6 +297,7 @@ func (connector *Connector) DeleteScript(index int64) (err error) {
return
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("License-Key", config.Config.License)
var resp *http.Response
resp, err = client.Do(req)
@@ -305,7 +328,7 @@ func (connector *Connector) PostScriptDone() {
var err error
for !ok {
var resp *http.Response
if resp, err = utils.Post(postScripDonetUrl, "application/json", nil, connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postScripDonetUrl, "application/json", nil, connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}

View File

@@ -6,21 +6,6 @@ const (
FrontServiceName = FrontPodName
HubPodName = SelfResourcesPrefix + "hub"
HubServiceName = HubPodName
ClusterRoleBindingName = SelfResourcesPrefix + "cluster-role-binding"
ClusterRoleName = SelfResourcesPrefix + "cluster-role"
K8sAllNamespaces = ""
RoleBindingName = SelfResourcesPrefix + "role-binding"
RoleName = SelfResourcesPrefix + "role"
ServiceAccountName = SelfResourcesPrefix + "service-account"
WorkerDaemonSetName = SelfResourcesPrefix + "worker-daemon-set"
WorkerPodName = SelfResourcesPrefix + "worker"
PersistentVolumeName = SelfResourcesPrefix + "persistent-volume"
PersistentVolumeClaimName = SelfResourcesPrefix + "persistent-volume-claim"
PersistentVolumeHostPath = "/app/data"
MinKubernetesServerVersion = "1.16.0"
)
const (
LabelManagedBy = SelfResourcesPrefix + "managed-by"
LabelCreatedBy = SelfResourcesPrefix + "created-by"
)

186
kubernetes/helm/helm.go Normal file
View File

@@ -0,0 +1,186 @@
package helm
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/misc"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chart/loader"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/downloader"
"helm.sh/helm/v3/pkg/getter"
"helm.sh/helm/v3/pkg/kube"
"helm.sh/helm/v3/pkg/registry"
"helm.sh/helm/v3/pkg/release"
"helm.sh/helm/v3/pkg/repo"
)
const ENV_HELM_DRIVER = "HELM_DRIVER"
var settings = cli.New()
type Helm struct {
repo string
releaseName string
releaseNamespace string
}
func NewHelm(repo string, releaseName string, releaseNamespace string) *Helm {
return &Helm{
repo: repo,
releaseName: releaseName,
releaseNamespace: releaseNamespace,
}
}
func parseOCIRef(chartRef string) (string, string, error) {
refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`)
caps := refTagRegexp.FindStringSubmatch(chartRef)
if len(caps) != 4 {
return "", "", errors.Errorf("improperly formatted oci chart reference: %s", chartRef)
}
chartRef = caps[1]
tag := caps[3]
return chartRef, tag, nil
}
func (h *Helm) Install() (rel *release.Release, err error) {
kubeConfigPath := config.Config.KubeConfigPath()
actionConfig := new(action.Configuration)
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
log.Info().Msgf(format, v...)
}); err != nil {
return
}
client := action.NewInstall(actionConfig)
client.Namespace = h.releaseNamespace
client.ReleaseName = h.releaseName
chartPath := os.Getenv(fmt.Sprintf("%s_HELM_CHART_PATH", strings.ToUpper(misc.Program)))
if chartPath == "" {
var chartURL string
chartURL, err = repo.FindChartInRepoURL(h.repo, h.releaseName, "", "", "", "", getter.All(&cli.EnvSettings{}))
if err != nil {
return
}
var cp string
cp, err = client.ChartPathOptions.LocateChart(chartURL, settings)
if err != nil {
return
}
m := &downloader.Manager{
Out: os.Stdout,
ChartPath: cp,
Keyring: client.ChartPathOptions.Keyring,
SkipUpdate: false,
Getters: getter.All(settings),
RepositoryConfig: settings.RepositoryConfig,
RepositoryCache: settings.RepositoryCache,
Debug: settings.Debug,
}
dl := downloader.ChartDownloader{
Out: m.Out,
Verify: m.Verify,
Keyring: m.Keyring,
RepositoryConfig: m.RepositoryConfig,
RepositoryCache: m.RepositoryCache,
RegistryClient: m.RegistryClient,
Getters: m.Getters,
Options: []getter.Option{
getter.WithInsecureSkipVerifyTLS(false),
},
}
repoPath := filepath.Dir(m.ChartPath)
err = os.MkdirAll(repoPath, os.ModePerm)
if err != nil {
return
}
version := ""
if registry.IsOCI(chartURL) {
chartURL, version, err = parseOCIRef(chartURL)
if err != nil {
return
}
dl.Options = append(dl.Options,
getter.WithRegistryClient(m.RegistryClient),
getter.WithTagName(version))
}
log.Info().
Str("url", chartURL).
Str("repo-path", repoPath).
Msg("Downloading Helm chart:")
if _, _, err = dl.DownloadTo(chartURL, version, repoPath); err != nil {
return
}
chartPath = m.ChartPath
}
var chart *chart.Chart
chart, err = loader.Load(chartPath)
if err != nil {
return
}
log.Info().
Str("release", chart.Metadata.Name).
Str("version", chart.Metadata.Version).
Strs("source", chart.Metadata.Sources).
Str("kube-version", chart.Metadata.KubeVersion).
Msg("Installing using Helm:")
var configMarshalled []byte
configMarshalled, err = json.Marshal(config.Config)
if err != nil {
return
}
var configUnmarshalled map[string]interface{}
err = json.Unmarshal(configMarshalled, &configUnmarshalled)
if err != nil {
return
}
rel, err = client.Run(chart, configUnmarshalled)
if err != nil {
return
}
return
}
func (h *Helm) Uninstall() (resp *release.UninstallReleaseResponse, err error) {
kubeConfigPath := config.Config.KubeConfigPath()
actionConfig := new(action.Configuration)
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
log.Info().Msgf(format, v...)
}); err != nil {
return
}
client := action.NewUninstall(actionConfig)
resp, err = client.Run(h.releaseName)
if err != nil {
return
}
return
}

File diff suppressed because it is too large Load Diff

View File

@@ -11,6 +11,7 @@ import (
"strings"
"time"
"github.com/kubeshark/kubeshark/config"
"github.com/rs/zerolog/log"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/client-go/tools/portforward"
@@ -23,6 +24,7 @@ const selfServicePort = 80
func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16, selfNamespace string, selfServiceName string) (*http.Server, error) {
log.Info().
Str("proxy-host", proxyHost).
Str("namespace", selfNamespace).
Str("service", selfServiceName).
Int("src-port", int(srcPort)).
@@ -66,8 +68,8 @@ func getSelfHubProxiedHostAndPath(selfNamespace string, selfServiceName string)
return fmt.Sprintf("/api/v1/namespaces/%s/services/%s:%d/proxy", selfNamespace, selfServiceName, selfServicePort)
}
func GetLocalhostOnPort(port uint16) string {
return fmt.Sprintf("http://localhost:%d", port)
func GetProxyOnPort(port uint16) string {
return fmt.Sprintf("http://%s:%d", config.Config.Tap.Proxy.Host, port)
}
func getRerouteHttpHandlerSelfAPI(proxyHandler http.Handler, selfNamespace string, selfServiceName string) http.Handler {
@@ -100,7 +102,7 @@ func getRerouteHttpHandlerSelfStatic(proxyHandler http.Handler, selfNamespace st
}
func NewPortForward(kubernetesProvider *Provider, namespace string, podRegex *regexp.Regexp, srcPort uint16, dstPort uint16, ctx context.Context) (*portforward.PortForwarder, error) {
pods, err := kubernetesProvider.ListAllRunningPodsMatchingRegex(ctx, podRegex, []string{namespace})
pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, namespace, map[string]string{"app.kubeshark.co/app": "hub"})
if err != nil {
return nil, err
} else if len(pods) == 0 {

View File

@@ -1,147 +0,0 @@
package kubernetes
import (
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
applyconfapp "k8s.io/client-go/applyconfigurations/apps/v1"
applyconfcore "k8s.io/client-go/applyconfigurations/core/v1"
v1 "k8s.io/client-go/applyconfigurations/core/v1"
applyconfmeta "k8s.io/client-go/applyconfigurations/meta/v1"
)
type DaemonSetPod struct {
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Spec core.PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
type DaemonSetSpec struct {
Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"`
Template DaemonSetPod `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
}
type DaemonSet struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
func (d *DaemonSet) GenerateApplyConfiguration(name string, namespace string, podName string, provider *Provider) *applyconfapp.DaemonSetApplyConfiguration {
// Pod
p := d.Spec.Template.Spec
podSpec := applyconfcore.PodSpec()
podSpec.WithHostNetwork(p.HostNetwork)
podSpec.WithDNSPolicy(p.DNSPolicy)
podSpec.WithTerminationGracePeriodSeconds(*p.TerminationGracePeriodSeconds)
podSpec.WithServiceAccountName(p.ServiceAccountName)
// Containers
for _, c := range d.Spec.Template.Spec.Containers {
// Common
container := applyconfcore.Container()
container.WithName(c.Name)
container.WithImage(c.Image)
container.WithImagePullPolicy(c.ImagePullPolicy)
container.WithCommand(c.Command...)
// Linux capabilities
caps := applyconfcore.Capabilities().WithAdd(c.SecurityContext.Capabilities.Add...).WithDrop(c.SecurityContext.Capabilities.Drop...)
container.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
// Environment variables
var envvars []*v1.EnvVarApplyConfiguration
for _, e := range c.Env {
envvars = append(envvars, applyconfcore.EnvVar().WithName(e.Name).WithValue(e.Value))
}
container.WithEnv(envvars...)
// Resource limits
resources := applyconfcore.ResourceRequirements().WithRequests(c.Resources.Requests).WithLimits(c.Resources.Limits)
container.WithResources(resources)
// Volume mounts
for _, m := range c.VolumeMounts {
volumeMount := applyconfcore.VolumeMount().WithName(m.Name).WithMountPath(m.MountPath).WithReadOnly(m.ReadOnly)
container.WithVolumeMounts(volumeMount)
}
podSpec.WithContainers(container)
}
// Node affinity (RequiredDuringSchedulingIgnoredDuringExecution only)
if p.Affinity != nil {
nodeSelector := applyconfcore.NodeSelector()
for _, term := range p.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
nodeSelectorTerm := applyconfcore.NodeSelectorTerm()
for _, selector := range term.MatchExpressions {
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
nodeSelectorRequirement.WithKey(selector.Key)
nodeSelectorRequirement.WithOperator(selector.Operator)
nodeSelectorRequirement.WithValues(selector.Values...)
nodeSelectorTerm.WithMatchExpressions(nodeSelectorRequirement)
}
for _, selector := range term.MatchFields {
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
nodeSelectorRequirement.WithKey(selector.Key)
nodeSelectorRequirement.WithOperator(selector.Operator)
nodeSelectorRequirement.WithValues(selector.Values...)
nodeSelectorTerm.WithMatchFields(nodeSelectorRequirement)
}
nodeSelector.WithNodeSelectorTerms(nodeSelectorTerm)
}
nodeAffinity := applyconfcore.NodeAffinity()
nodeAffinity.WithRequiredDuringSchedulingIgnoredDuringExecution(nodeSelector)
affinity := applyconfcore.Affinity()
affinity.WithNodeAffinity(nodeAffinity)
podSpec.WithAffinity(affinity)
}
// Tolerations
for _, t := range p.Tolerations {
toleration := applyconfcore.Toleration()
toleration.WithKey(t.Key)
toleration.WithOperator(t.Operator)
toleration.WithValue(t.Value)
toleration.WithEffect(t.Effect)
if t.TolerationSeconds != nil {
toleration.WithTolerationSeconds(*t.TolerationSeconds)
}
podSpec.WithTolerations(toleration)
}
// Volumes
for _, v := range p.Volumes {
volume := applyconfcore.Volume()
if v.HostPath != nil {
volume.WithName(v.Name).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath(v.HostPath.Path))
}
if v.PersistentVolumeClaim != nil {
volume.WithName(v.Name).WithPersistentVolumeClaim(applyconfcore.PersistentVolumeClaimVolumeSource().WithClaimName(v.PersistentVolumeClaim.ClaimName))
}
podSpec.WithVolumes(volume)
}
// Image pull secrets
if len(p.ImagePullSecrets) > 0 {
localObjectReference := applyconfcore.LocalObjectReference()
for _, o := range p.ImagePullSecrets {
localObjectReference.WithName(o.Name)
}
podSpec.WithImagePullSecrets(localObjectReference)
}
podTemplate := applyconfcore.PodTemplateSpec()
podTemplate.WithLabels(buildWithDefaultLabels(map[string]string{
"app": podName,
}, provider))
podTemplate.WithSpec(podSpec)
labelSelector := applyconfmeta.LabelSelector()
labelSelector.WithMatchLabels(map[string]string{"app": podName})
daemonSet := applyconfapp.DaemonSet(name, namespace)
daemonSet.
WithLabels(buildWithDefaultLabels(map[string]string{}, provider)).
WithSpec(applyconfapp.DaemonSetSpec().WithSelector(labelSelector).WithTemplate(podTemplate))
return daemonSet
}

View File

@@ -1,16 +0,0 @@
package kubernetes
import (
"github.com/kubeshark/kubeshark/config"
)
func buildWithDefaultLabels(labels map[string]string, provider *Provider) map[string]string {
labels[LabelManagedBy] = provider.managedBy
labels[LabelCreatedBy] = provider.createdBy
for k, v := range config.Config.Tap.ResourceLabels {
labels[k] = v
}
return labels
}

View File

@@ -1,68 +0,0 @@
package kubernetes
import (
"context"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/rs/zerolog/log"
core "k8s.io/api/core/v1"
)
func CreateWorkers(
kubernetesProvider *Provider,
selfServiceAccountExists bool,
ctx context.Context,
namespace string,
resources configStructs.ResourceRequirements,
imagePullPolicy core.PullPolicy,
imagePullSecrets []core.LocalObjectReference,
serviceMesh bool,
tls bool,
debug bool,
) error {
persistentVolumeClaim, err := kubernetesProvider.BuildPersistentVolumeClaim()
if err != nil {
return err
}
if _, err = kubernetesProvider.CreatePersistentVolumeClaim(
ctx,
namespace,
persistentVolumeClaim,
); err != nil {
return err
}
image := docker.GetWorkerImage()
var serviceAccountName string
if selfServiceAccountExists {
serviceAccountName = ServiceAccountName
} else {
serviceAccountName = ""
}
log.Info().Msg("Creating the worker DaemonSet...")
if err := kubernetesProvider.ApplyWorkerDaemonSet(
ctx,
namespace,
WorkerDaemonSetName,
image,
WorkerPodName,
serviceAccountName,
resources,
imagePullPolicy,
imagePullSecrets,
serviceMesh,
tls,
debug,
); err != nil {
return err
}
log.Info().Msg("Successfully created the worker DaemonSet.")
return nil
}

View File

@@ -1,12 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark
spec: {}
status: {}

View File

@@ -1,12 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-service-account
namespace: kubeshark

View File

@@ -1,26 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-cluster-role
namespace: kubeshark
rules:
- apiGroups:
- ""
- extensions
- apps
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
verbs:
- list
- get
- watch

View File

@@ -1,20 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-cluster-role-binding
namespace: kubeshark
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeshark-cluster-role
subjects:
- kind: ServiceAccount
name: kubeshark-service-account
namespace: kubeshark

View File

@@ -1,44 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-hub
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-hub
namespace: kubeshark
spec:
containers:
- command:
- ./hub
env:
- name: POD_REGEX
value: .*
- name: NAMESPACES
- name: LICENSE
- name: SCRIPTING_ENV
value: '{}'
- name: SCRIPTING_SCRIPTS
value: '[]'
image: docker.io/kubeshark/hub:latest
imagePullPolicy: Always
name: kubeshark-hub
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}

View File

@@ -1,21 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-hub
namespace: kubeshark
spec:
ports:
- name: kubeshark-hub
port: 80
targetPort: 80
selector:
app: kubeshark-hub
type: ClusterIP
status:
loadBalancer: {}

View File

@@ -1,47 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-front
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-front
namespace: kubeshark
spec:
containers:
- env:
- name: REACT_APP_DEFAULT_FILTER
value: ' '
- name: REACT_APP_HUB_HOST
value: ' '
- name: REACT_APP_HUB_PORT
value: "8898"
image: docker.io/kubeshark/front:latest
imagePullPolicy: Always
name: kubeshark-front
readinessProbe:
failureThreshold: 3
periodSeconds: 1
successThreshold: 1
tcpSocket:
port: 80
timeoutSeconds: 1
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}

View File

@@ -1,21 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-front
namespace: kubeshark
spec:
ports:
- name: kubeshark-front
port: 80
targetPort: 80
selector:
app: kubeshark-front
type: ClusterIP
status:
loadBalancer: {}

View File

@@ -1,20 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-persistent-volume-claim
namespace: kubeshark
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 200Mi
storageClassName: standard
status: {}

View File

@@ -1,90 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
selector:
matchLabels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
template:
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
containers:
- command:
- ./worker
- -i
- any
- -port
- "8897"
- -packet-capture
- libpcap
- -servicemesh
- -tls
- -procfs
- /hostproc
image: docker.io/kubeshark/worker:latest
imagePullPolicy: Always
name: kubeshark-worker-daemon-set
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
securityContext:
capabilities:
add:
- NET_RAW
- NET_ADMIN
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
drop:
- ALL
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
- mountPath: /app/data
name: kubeshark-persistent-volume
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
- hostPath:
path: /proc
name: proc
- hostPath:
path: /sys
name: sys
- name: kubeshark-persistent-volume
persistentVolumeClaim:
claimName: kubeshark-persistent-volume-claim

421
manifests/complete.yaml Normal file
View File

@@ -0,0 +1,421 @@
---
# Source: kubeshark/templates/01-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: kubeshark-50.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-service-account
namespace: default
---
# Source: kubeshark/templates/14-secret.yaml
kind: Secret
apiVersion: v1
metadata:
name: kubeshark-secret
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-50.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/managed-by: Helm
stringData:
LICENSE: ''
---
# Source: kubeshark/templates/12-nginx-config-map.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeshark-nginx-config-map
namespace: default
labels:
helm.sh/chart: kubeshark-50.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/managed-by: Helm
data:
default.conf: |
server {
listen 80;
listen [::]:80;
add_header Cache-Control no-cache;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
expires -1;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
---
# Source: kubeshark/templates/13-config-map.yaml
kind: ConfigMap
apiVersion: v1
metadata:
name: kubeshark-config-map
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-50.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/managed-by: Helm
data:
POD_REGEX: '.*'
NAMESPACES: ''
SCRIPTING_ENV: 'null'
SCRIPTING_SCRIPTS: '[]'
AUTH_ENABLED: ''
AUTH_APPROVED_EMAILS: ''
AUTH_APPROVED_DOMAINS: ''
---
# Source: kubeshark/templates/02-cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: kubeshark-50.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role
namespace: default
rules:
- apiGroups:
- ""
- extensions
- apps
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
verbs:
- list
- get
- watch
---
# Source: kubeshark/templates/03-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-50.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeshark-cluster-role
subjects:
- kind: ServiceAccount
name: kubeshark-service-account
namespace: default
---
# Source: kubeshark/templates/05-hub-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-50.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub
namespace: default
spec:
ports:
- name: kubeshark-hub
port: 80
targetPort: 80
selector:
app.kubeshark.co/app: hub
type: ClusterIP
status:
loadBalancer: {}
---
# Source: kubeshark/templates/07-front-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: kubeshark-50.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front
namespace: default
spec:
ports:
- name: kubeshark-front
port: 80
targetPort: 80
selector:
app.kubeshark.co/app: front
type: ClusterIP
status:
loadBalancer: {}
---
# Source: kubeshark/templates/09-worker-daemon-set.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app.kubeshark.co/app: worker
sidecar.istio.io/inject: "false"
helm.sh/chart: kubeshark-50.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-worker-daemon-set
namespace: default
spec:
selector:
matchLabels:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-50.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/managed-by: Helm
template:
metadata:
labels:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-50.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/managed-by: Helm
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
containers:
- command:
- ./worker
- -i
- any
- -port
- '8897'
- -servicemesh
- -tls
- -procfs
- /hostproc
image: 'docker.io/kubeshark/worker:latest'
imagePullPolicy: Always
name: kubeshark-worker-daemon-set
envFrom:
- secretRef:
name: kubeshark-secret
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
securityContext:
capabilities:
add:
- NET_RAW
- NET_ADMIN
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
- SYS_MODULE
drop:
- ALL
readinessProbe:
periodSeconds: 1
initialDelaySeconds: 1
tcpSocket:
port: 8897
livenessProbe:
periodSeconds: 1
initialDelaySeconds: 1
tcpSocket:
port: 8897
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
volumes:
- hostPath:
path: /proc
name: proc
- hostPath:
path: /sys
name: sys
---
# Source: kubeshark/templates/04-hub-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubeshark-hub
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-50.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/managed-by: Helm
annotations:
spec:
replicas: 1 # Set the desired number of replicas
selector:
matchLabels:
app.kubeshark.co/app: hub
template:
metadata:
labels:
app.kubeshark.co/app: hub
sidecar.istio.io/inject: "false"
spec:
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
containers:
- name: kubeshark-hub
command:
- ./hub
envFrom:
- configMapRef:
name: kubeshark-config-map
- secretRef:
name: kubeshark-secret
image: 'docker.io/kubeshark/hub:latest'
imagePullPolicy: Always
readinessProbe:
periodSeconds: 1
initialDelaySeconds: 3
tcpSocket:
port: 80
livenessProbe:
periodSeconds: 1
initialDelaySeconds: 3
tcpSocket:
port: 80
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
---
# Source: kubeshark/templates/06-front-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubeshark-front
namespace: default
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-50.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.0"
app.kubernetes.io/managed-by: Helm
annotations:
spec:
replicas: 1 # Set the desired number of replicas
selector:
matchLabels:
app.kubeshark.co/app: front
template:
metadata:
labels:
app.kubeshark.co/app: front
spec:
containers:
- env:
- name: REACT_APP_DEFAULT_FILTER
value: ' '
- name: REACT_APP_HUB_HOST
value: ' '
- name: REACT_APP_HUB_PORT
value: ':8898'
image: 'docker.io/kubeshark/front:latest'
imagePullPolicy: Always
name: kubeshark-front
livenessProbe:
failureThreshold: 3
periodSeconds: 1
successThreshold: 1
tcpSocket:
port: 80
readinessProbe:
failureThreshold: 3
periodSeconds: 1
successThreshold: 1
tcpSocket:
port: 80
timeoutSeconds: 1
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
readOnly: true
volumes:
- name: nginx-config
configMap:
name: kubeshark-nginx-config-map
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: kubeshark-tls
namespace: default
spec:
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
secretName: cert-kubeshark
dnsNames:
- ks.svc.cluster.local

View File

@@ -0,0 +1,14 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: info@kubeshark.co
privateKeySecretRef:
name: letsencrypt-prod-key
solvers:
- http01:
ingress:
class: kubeshark-ingress-class

15
manifests/tls/run.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
__dir="$(cd -P -- "$(dirname -- "$0")" && pwd -P)"
helm repo add jetstack https://charts.jetstack.io
helm repo update
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.crds.yaml
helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.9.1
kubectl apply -f ${__dir}/cluster-issuer.yaml
kubectl apply -f ${__dir}/certificate.yaml

View File

@@ -15,13 +15,13 @@ import (
func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath string) error {
podExactRegex := regexp.MustCompile("^" + kubernetes.SelfResourcesPrefix)
pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.Tap.SelfNamespace})
pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.Tap.Release.Namespace})
if err != nil {
return err
}
if len(pods) == 0 {
return fmt.Errorf("No %s pods found in namespace %s", misc.Software, config.Config.Tap.SelfNamespace)
return fmt.Errorf("No %s pods found in namespace %s", misc.Software, config.Config.Tap.Release.Namespace)
}
newZipFile, err := os.Create(filePath)
@@ -60,17 +60,17 @@ func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath strin
}
}
events, err := provider.GetNamespaceEvents(ctx, config.Config.Tap.SelfNamespace)
events, err := provider.GetNamespaceEvents(ctx, config.Config.Tap.Release.Namespace)
if err != nil {
log.Error().Err(err).Msg("Failed to get k8b events!")
} else {
log.Debug().Str("namespace", config.Config.Tap.SelfNamespace).Msg("Successfully read events.")
log.Debug().Str("namespace", config.Config.Tap.Release.Namespace).Msg("Successfully read events.")
}
if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.Tap.SelfNamespace)); err != nil {
if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.Tap.Release.Namespace)); err != nil {
log.Error().Err(err).Msg("Failed write logs!")
} else {
log.Debug().Str("namespace", config.Config.Tap.SelfNamespace).Msg("Successfully added events.")
log.Debug().Str("namespace", config.Config.Tap.Release.Namespace).Msg("Successfully added events.")
}
if err := AddFileToZip(zipWriter, config.ConfigFilePath); err != nil {

View File

@@ -16,7 +16,7 @@ import (
)
func CheckNewerVersion() {
if os.Getenv("KUBESHARK_DISABLE_VERSION_CHECK") != "" {
if os.Getenv(fmt.Sprintf("%s_DISABLE_VERSION_CHECK", strings.ToUpper(misc.Program))) != "" {
return
}
@@ -44,7 +44,7 @@ func CheckNewerVersion() {
} else {
downloadCommand = fmt.Sprintf("sh <(curl -Ls %s/install)", misc.Website)
}
msg := fmt.Sprintf("There is a new release! %v -> %v run:", misc.Ver, latestVersion)
msg := fmt.Sprintf("There is a new release! %v -> %v Please upgrade to the latest release, as new releases are not always backward compatible. Run:", misc.Ver, latestVersion)
log.Warn().Str("command", downloadCommand).Msg(fmt.Sprintf(utils.Yellow, msg))
}
}

View File

@@ -1,173 +0,0 @@
package resources
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/errormessage"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"k8s.io/apimachinery/pkg/util/wait"
)
func CleanUpSelfResources(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfResourcesNamespace string) {
log.Warn().Msg(fmt.Sprintf("Removing %s resources...", misc.Software))
var leftoverResources []string
if isNsRestrictedMode {
leftoverResources = cleanUpRestrictedMode(ctx, kubernetesProvider, selfResourcesNamespace)
} else {
leftoverResources = cleanUpNonRestrictedMode(ctx, cancel, kubernetesProvider, selfResourcesNamespace)
}
if len(leftoverResources) > 0 {
errMsg := "Failed to remove the following resources."
for _, resource := range leftoverResources {
errMsg += "\n- " + resource
}
log.Error().Msg(fmt.Sprintf(utils.Red, errMsg))
}
}
func cleanUpNonRestrictedMode(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string {
leftoverResources := make([]string, 0)
if err := kubernetesProvider.RemoveNamespace(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("Namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
defer waitUntilNamespaceDeleted(ctx, cancel, kubernetesProvider, selfResourcesNamespace)
}
if resources, err := kubernetesProvider.ListManagedClusterRoles(ctx); err != nil {
resourceDesc := "ClusterRoles"
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveClusterRole(ctx, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("ClusterRole %s", resource.Name)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if resources, err := kubernetesProvider.ListManagedClusterRoleBindings(ctx); err != nil {
resourceDesc := "ClusterRoleBindings"
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveClusterRoleBinding(ctx, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("ClusterRoleBinding %s", resource.Name)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
return leftoverResources
}
func waitUntilNamespaceDeleted(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) {
// Call cancel if a terminating signal was received. Allows user to skip the wait.
go func() {
utils.WaitForTermination(ctx, cancel)
}()
if err := kubernetesProvider.WaitUtilNamespaceDeleted(ctx, selfResourcesNamespace); err != nil {
switch {
case ctx.Err() == context.Canceled:
log.Printf("Do nothing. User interrupted the wait")
log.Warn().
Str("namespace", selfResourcesNamespace).
Msg("Did nothing. User interrupted the wait.")
case err == wait.ErrWaitTimeout:
log.Warn().
Str("namespace", selfResourcesNamespace).
Msg("Timed out while deleting the namespace.")
default:
log.Warn().
Err(errormessage.FormatError(err)).
Str("namespace", selfResourcesNamespace).
Msg("Unknown error while deleting the namespace.")
}
}
}
func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string {
leftoverResources := make([]string, 0)
if err := kubernetesProvider.RemoveService(ctx, selfResourcesNamespace, kubernetes.FrontServiceName); err != nil {
resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.FrontServiceName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemoveService(ctx, selfResourcesNamespace, kubernetes.HubServiceName); err != nil {
resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.HubServiceName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemovePersistentVolumeClaim(ctx, selfResourcesNamespace, kubernetes.PersistentVolumeClaimName); err != nil {
resourceDesc := fmt.Sprintf("Persistent Volume %s in namespace %s", kubernetes.PersistentVolumeClaimName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemoveDaemonSet(ctx, selfResourcesNamespace, kubernetes.WorkerDaemonSetName); err != nil {
resourceDesc := fmt.Sprintf("DaemonSet %s in namespace %s", kubernetes.WorkerDaemonSetName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if resources, err := kubernetesProvider.ListManagedServiceAccounts(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("ServiceAccounts in namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveServiceAccount(ctx, selfResourcesNamespace, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("ServiceAccount %s in namespace %s", resource.Name, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if resources, err := kubernetesProvider.ListManagedRoles(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("Roles in namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveRole(ctx, selfResourcesNamespace, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("Role %s in namespace %s", resource.Name, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if resources, err := kubernetesProvider.ListManagedRoleBindings(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("RoleBindings in namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveRoleBinding(ctx, selfResourcesNamespace, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("RoleBinding %s in namespace %s", resource.Name, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if err := kubernetesProvider.RemovePod(ctx, selfResourcesNamespace, kubernetes.HubPodName); err != nil {
resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.HubPodName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemovePod(ctx, selfResourcesNamespace, kubernetes.FrontPodName); err != nil {
resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.FrontPodName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
return leftoverResources
}
func handleDeletionError(err error, resourceDesc string, leftoverResources *[]string) {
log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Error while removing %s", resourceDesc))
*leftoverResources = append(*leftoverResources, resourceDesc)
}

View File

@@ -1,106 +0,0 @@
package resources
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/errormessage"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/rs/zerolog/log"
core "k8s.io/api/core/v1"
)
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, hubResources configStructs.ResourceRequirements, imagePullPolicy core.PullPolicy, imagePullSecrets []core.LocalObjectReference, debug bool) (bool, error) {
if !isNsRestrictedMode {
if err := createSelfNamespace(ctx, kubernetesProvider, selfNamespace); err != nil {
log.Debug().Err(err).Send()
}
}
err := kubernetesProvider.CreateSelfRBAC(ctx, selfNamespace)
var selfServiceAccountExists bool
if err != nil {
selfServiceAccountExists = true
log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Failed to ensure the resources required for IP resolving. %s will not resolve target IPs to names.", misc.Software))
}
hubOpts := &kubernetes.PodOptions{
Namespace: selfNamespace,
PodName: kubernetes.HubPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Debug: debug,
}
frontOpts := &kubernetes.PodOptions{
Namespace: selfNamespace,
PodName: kubernetes.FrontPodName,
PodImage: docker.GetWorkerImage(),
ServiceAccountName: kubernetes.ServiceAccountName,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Debug: debug,
}
if err := createSelfHubPod(ctx, kubernetesProvider, hubOpts); err != nil {
return selfServiceAccountExists, err
}
if err := createFrontPod(ctx, kubernetesProvider, frontOpts); err != nil {
return selfServiceAccountExists, err
}
// TODO: Why the port values need to be 80?
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildHubService(selfNamespace))
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("service", kubernetes.HubServiceName).Msg("Successfully created a service.")
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetesProvider.BuildFrontService(selfNamespace))
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("service", kubernetes.FrontServiceName).Msg("Successfully created a service.")
return selfServiceAccountExists, nil
}
func createSelfNamespace(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfNamespace string) error {
_, err := kubernetesProvider.CreateNamespace(ctx, kubernetesProvider.BuildNamespace(selfNamespace))
return err
}
func createSelfHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
pod, err := kubernetesProvider.BuildHubPod(opts)
if err != nil {
return err
}
if _, err = kubernetesProvider.CreatePod(ctx, opts.Namespace, pod); err != nil {
return err
}
log.Info().Str("pod", pod.Name).Msg("Successfully created a pod.")
return nil
}
func createFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
pod, err := kubernetesProvider.BuildFrontPod(opts, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
if err != nil {
return err
}
if _, err = kubernetesProvider.CreatePod(ctx, opts.Namespace, pod); err != nil {
return err
}
log.Info().Str("pod", pod.Name).Msg("Successfully created a pod.")
return nil
}

View File

@@ -16,8 +16,15 @@ func Get(url string, client *http.Client) (*http.Response, error) {
// Post - When err is nil, resp always contains a non-nil resp.Body.
// Caller should close resp.Body when done reading from it.
func Post(url, contentType string, body io.Reader, client *http.Client) (*http.Response, error) {
return checkError(client.Post(url, contentType, body))
func Post(url, contentType string, body io.Reader, client *http.Client, licenseKey string) (*http.Response, error) {
req, err := http.NewRequest(http.MethodPost, url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("License-Key", licenseKey)
return checkError(client.Do(req))
}
// Do - When err is nil, resp always contains a non-nil resp.Body.

View File

@@ -7,46 +7,27 @@ import (
"gopkg.in/yaml.v3"
)
const (
empty = ""
tab = "\t"
)
func PrettyJson(data interface{}) (string, error) {
buffer := new(bytes.Buffer)
encoder := json.NewEncoder(buffer)
encoder.SetIndent(empty, tab)
err := encoder.Encode(data)
func PrettyYaml(data interface{}) (result string, err error) {
var marshalled []byte
marshalled, err = json.Marshal(data)
if err != nil {
return empty, err
return
}
var unmarshalled interface{}
err = json.Unmarshal(marshalled, &unmarshalled)
if err != nil {
return
}
return buffer.String(), nil
}
func PrettyYaml(data interface{}) (string, error) {
buffer := new(bytes.Buffer)
encoder := yaml.NewEncoder(buffer)
encoder.SetIndent(2)
err := encoder.Encode(data)
err = encoder.Encode(unmarshalled)
if err != nil {
return empty, err
return
}
return buffer.String(), nil
}
func PrettyYamlOmitEmpty(data interface{}) (string, error) {
d, err := json.Marshal(data)
if err != nil {
return empty, err
}
var cleanData map[string]interface{}
err = json.Unmarshal(d, &cleanData)
if err != nil {
return empty, err
}
return PrettyYaml(cleanData)
result = buffer.String()
return
}