Compare commits

...

211 Commits
38.4 ... 41.2

Author SHA1 Message Date
M. Mert Yildiran
4c2884c40f Add KUBESHARK_HELM_CHART_PATH environment variable to set a local path for the Helm chart 2023-07-03 17:15:47 +03:00
M. Mert Yildiran
4fb179f623 ⬆️ Bump the Helm chart version 2023-07-03 17:05:20 +03:00
M. Mert Yildiran
796fc1453c Fix the hub and worker commands 2023-07-03 16:47:00 +03:00
M. Mert Yildiran
0ef3e2d018 Fix the issues related to release namespace 2023-07-03 16:33:50 +03:00
M. Mert Yildiran
77a14410f4 Revert " Rename releasenamespace field to selfnamespace"
This reverts commit d8ee89225c.
2023-07-03 15:11:21 +03:00
M. Mert Yildiran
f269a61842 Revert "Revert "🐛 Fix the commands in case of -debug flag enabled""
This reverts commit 64b22daa2a.
2023-07-03 15:11:04 +03:00
M. Mert Yildiran
51eddd3ae4 Fix the -r flag behavior in config command 2023-07-03 13:14:30 +03:00
M. Mert Yildiran
64b22daa2a Revert "🐛 Fix the commands in case of -debug flag enabled"
This reverts commit 3a2d34647e.
2023-07-03 12:32:15 +03:00
M. Mert Yildiran
3a2d34647e 🐛 Fix the commands in case of -debug flag enabled 2023-07-03 12:08:57 +03:00
M. Mert Yildiran
d8ee89225c Rename releasenamespace field to selfnamespace 2023-07-03 11:54:06 +03:00
M. Mert Yildiran
f7ce141d0d Remove an unnecessary check 2023-07-03 11:45:00 +03:00
M. Mert Yildiran
3c25cec633 Regenerate the complete.yaml 2023-06-30 16:57:44 +03:00
M. Mert Yildiran
7b86d32174 Remove the hostPort field from the manifests 2023-06-30 16:57:26 +03:00
M. Mert Yildiran
aeda619104 Download files in parallel 2023-06-29 16:45:59 +03:00
M. Mert Yildiran
98738cb5a6 Use Prefix field of ListObjectsV2Input instead of strings.HasPrefix check 2023-06-29 16:06:52 +03:00
M. Mert Yildiran
bf3285cb8b 🐛 Fix the collision of the -d flag in tap with the root level debug flag 2023-06-29 02:49:01 +03:00
M. Mert Yildiran
5f9084e497 Make the config command print the current config instead of the default config 2023-06-29 02:39:35 +03:00
M. Mert Yildiran
f2a384c8db Change the S3 URL to S3 URI in the flag description 2023-06-29 02:29:34 +03:00
M. Mert Yildiran
207d89fa17 🐛 Fix the cleanUpOldContainers method by adding All: true to ContainerListOptions 2023-06-29 02:22:57 +03:00
M. Mert Yildiran
3b758d15a0 Change the pattern of temporary file downloaded from S3 2023-06-29 02:21:32 +03:00
M. Mert Yildiran
261e850a59 Support folder URLs 2023-06-29 02:13:47 +03:00
M. Mert Yildiran
242a276c5f Download all the objects in bucket and TAR them in case of key is empty in the S3 URL 2023-06-29 01:42:51 +03:00
M. Mert Yildiran
b9f9e860b6 Change the default namespace from kubeshark to default and use .Release.Namespace in Helm templates 2023-06-27 21:06:44 +03:00
M. Mert Yildiran
1404c68a22 Fix the annotations in Ingress 2023-06-27 20:50:02 +03:00
M. Mert Yildiran
400c681369 Fix the issues in Ingress Helm template 2023-06-27 20:36:46 +03:00
M. Mert Yildiran
a4761e3262 Handle the column character in REACT_APP_HUB_PORT environment variable 2023-06-27 14:43:53 +03:00
M. Mert Yildiran
313d26670b Regenerate the manifests 2023-06-27 03:47:27 +03:00
M. Mert Yildiran
16f1e116c0 Template the annotations in all resources 2023-06-27 03:45:47 +03:00
M. Mert Yildiran
2d625eccaa Rename resourcelabels to labels 2023-06-27 03:33:46 +03:00
M. Mert Yildiran
19443501da Have consistent key style in values.yaml 2023-06-27 03:32:03 +03:00
M. Mert Yildiran
4ef91a2701 Template the controller field in IngressClass resource 2023-06-27 03:27:40 +03:00
M. Mert Yildiran
bc031be0fe 🔧 Add generate-helm-values Makefile rule 2023-06-27 03:26:20 +03:00
M. Mert Yildiran
f32a7d97ec Template the ingressClassName field in Ingress resource 2023-06-27 03:25:58 +03:00
M. Mert Yildiran
aeda024986 Remove the unnecessary single quotes from the Helm templates 2023-06-27 03:19:45 +03:00
M. Mert Yildiran
98198b9733 Remove the unused labels from the resources 2023-06-27 02:48:46 +03:00
M. Mert Yildiran
0bf7c83b86 Use toYaml and nindent instead of range in the Helm templates 2023-06-27 02:45:55 +03:00
M. Mert Yildiran
a8df589076 Bring back the functionality of nodeselectorterms field into the Helm chart 2023-06-27 01:32:16 +03:00
M. Mert Yildiran
c07f1851b3 🔥 Delete the manifests and add complete.yaml instead 2023-06-27 01:22:30 +03:00
M. Mert Yildiran
5c4c913a27 Bring back the functionality of resourcelabels field into the Helm chart 2023-06-27 01:12:04 +03:00
M. Mert Yildiran
71111248bd Add icon field to Chart.yaml 2023-06-27 00:30:53 +03:00
M. Mert Yildiran
5efb48f0c5 Bring back the functionality of ignoretainted field into the Helm chart 2023-06-27 00:15:04 +03:00
M. Mert Yildiran
cc980dbaf8 Print a warning if the storage limit modified while persistent storage is disabled and default its value 2023-06-23 02:08:42 +03:00
M. Mert Yildiran
1afe27e969 Add S3 URL support to --pcap flag 2023-06-22 20:59:14 +03:00
M. Mert Yildiran
8df5e015c5 Call os.Exit if the Helm install fails 2023-06-21 17:11:03 +03:00
M. Mert Yildiran
6b898077f1 ⬆️ Bump the Helm chart version 2023-06-21 17:04:25 +03:00
Victor Login
e93cd978e8 Update TLS for ingress (#1367) 2023-06-21 17:02:44 +03:00
M. Mert Yildiran
bada6dae68 🐛 Fix <len .Values.tap.namespaces>: error calling len: len of nil pointer Helm install error 2023-06-20 22:14:06 +03:00
M. Mert Yildiran
8814e08871 ⬆️ Bump the Helm chart version 2023-06-19 23:59:38 +03:00
M. Mert Yildiran
6b7a94a850 Revert values.yaml 2023-06-19 02:40:07 +03:00
M. Mert Yildiran
7b004e7a1f Change GetLocalhostOnPort method to GetProxyOnPort 2023-06-19 02:19:52 +03:00
M. Mert Yildiran
836b87d517 Template the SCRIPTING_ENV env in Hub pod (Helm) 2023-06-19 01:46:51 +03:00
M. Mert Yildiran
646da4810d Allow license key holders to bypass the auth 2023-06-19 01:44:01 +03:00
Alon Girmonsky
a6d349a8fa Update README.md
Changed the announcement part
2023-06-13 10:11:44 -07:00
Alon Girmonsky
9d58c662a8 Update README.md
Announcing Self-hosted Kubeshark
2023-06-13 10:01:28 -07:00
M. Mert Yildiran
e4a09be4e2 Change the PRO_URL constant 2023-06-07 01:09:30 +03:00
Alon Girmonsky
7208ed85d3 Update README.md
Adding a way to get the license where relevant.
2023-06-06 17:43:35 +03:00
M. Mert Yildiran
7a5bf83336 Use the Helm chart in tap command to install Kubeshark (#1362)
*  Use the Helm chart in `tap` command to install Kubeshark

* ⬆️ Set Go version to `1.19` in `go.mod` file

*  Add `Helm` struct`, `NewHelm` and `NewHelmDefault` methods

*  Better logging and error return

*  Pass the config as `values.yaml` to Helm install

* 🔥 Remove `helm-chart`, `manifests` and `check` commands

*  Run `go mod tidy`

* 🎨 Move `helm` package into `kubernetes` package

* 🔥 Remove `# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!` notice from the manifests and Helm templates

* 🔥 Remove the unused `GenerateApplyConfiguration` and `buildWithDefaultLabels` methods
2023-06-06 12:16:03 +03:00
Alon Girmonskys
87b8a067c9 changed com to co 2023-05-28 20:54:57 -07:00
M. Mert Yildiran
3fe765e072 ⬆️ Bump the Helm chart version 2023-05-26 00:07:46 +03:00
M. Mert Yildiran
a163f9cc0e Change the new release warning 2023-05-25 20:42:57 +03:00
M. Mert Yildiran
2edb987c07 Template REACT_APP_HUB_PORT in the Helm chart 2023-05-25 20:24:29 +03:00
M. Mert Yildiran
c0d7d0fe80 Update Helm README.md 2023-05-25 05:46:10 +03:00
M. Mert Yildiran
be5bd6a372 Template the AUTH_APPROVED_DOMAINS and certmanager.k8s.io/cluster-issuer
Also add `networking.k8s.io` to `apiGroups` in `ClusterRole`
2023-05-25 05:07:42 +03:00
M. Mert Yildiran
42df7aa42f Update the Certificate resource name 2023-05-24 06:32:48 +03:00
M. Mert Yildiran
9a9052198f ⬆️ Bump the Helm chart version 2023-05-24 05:44:18 +03:00
M. Mert Yildiran
2fb83c3642 Fix the Bash script 2023-05-24 04:18:27 +03:00
M. Mert Yildiran
d44674fe86 Update the secret name of certificate 2023-05-24 04:10:46 +03:00
M. Mert Yildiran
c57ed1efd3 Run kubeshark manifests --dump && kubeshark helm-chart 2023-05-24 04:04:34 +03:00
M. Mert Yildiran
c19cd00c77 Add CertManager field to IngressConfig and add an Ingress TLS example 2023-05-24 04:01:45 +03:00
M. Mert Yildiran
39f8d40b76 Revert " Add Refresh-Token to the list of Access-Control-Allow-Headers"
This reverts commit bf731073c8.
2023-05-24 02:10:48 +03:00
M. Mert Yildiran
bf731073c8 Add Refresh-Token to the list of Access-Control-Allow-Headers 2023-05-24 02:04:56 +03:00
M. Mert Yildiran
4bb68afaaf Add AuthConfig struct and pass domains in AUTH_APPROVED_DOMAINS environment variable 2023-05-24 01:50:59 +03:00
M. Mert Yildiran
2126fc83a7 🐛 Remove the cancel() call 2023-05-22 19:20:39 +03:00
M. Mert Yildiran
d0c1dbcd5e Print and open a different URL in case of Ingress is enabled 2023-05-17 03:57:27 +03:00
M. Mert Yildiran
ad9dfbce40 Add Ingress (#1357)
*  Add `Ingress`

*  Rewrite the target in `Ingress`

*  Fix the path of front pod in `Ingress`

*  Add `IngressConfig` struct

*  Generate the correct Helm chart based on `tap.ingress` field of `values.yaml`
2023-05-16 19:46:47 +03:00
M. Mert Yildiran
139336d4ee Template hostPort(s) in the Helm chart 2023-05-10 14:38:38 +03:00
M. Mert Yildiran
f68fed0de8 🐛 Fix the effect of proxy config port changes 2023-05-10 01:28:43 +03:00
M. Mert Yildiran
1d7d242e6c Generate the missing new line in 08-persistent-volume-claim.yaml 2023-05-09 00:00:07 +03:00
M. Mert Yildiran
aa904e23c7 Add --persistentstorage option to tap command 2023-05-08 23:57:22 +03:00
M. Mert Yildiran
baf0e65337 Template the Helm chart based on persistentstorage value 2023-05-08 23:52:14 +03:00
M. Mert Yildiran
a33a3467fc Add persistentstorage option 2023-05-08 00:50:56 +03:00
M. Mert Yildiran
a9b598bc41 ⬆️ Bump the Helm chart version 2023-05-04 21:45:55 +03:00
M. Mert Yildiran
0aee367ad5 Omit the license string in helm-chart and manifests commands 2023-05-04 21:37:55 +03:00
M. Mert Yildiran
8c7d9ea8fd Fix the updateLicense method 2023-05-04 21:33:38 +03:00
M. Mert Yildiran
fab0f713ed 🐛 Pass the license string 2023-05-04 21:18:34 +03:00
M. Mert Yildiran
2563cc1922 🐛 Fix the imagePullPolicy to imagepullpolicy in helm-chart command 2023-04-24 02:03:58 +03:00
Jay R. Wren
26c9f42eba 📚 Remove kubeshark tap -A example from README.md (#1339) 2023-04-21 18:35:24 -07:00
M. Mert Yildiran
00dd3a93df Update the Helm chart version 2023-04-20 21:20:50 +03:00
M. Mert Yildiran
d02293ab55 Add a workflow for publishing Helm chart 2023-04-20 21:20:07 +03:00
M. Mert Yildiran
60cfa92efb Apply the same Kubernetes tolerations to all pods 2023-04-20 20:27:22 +03:00
M. Mert Yildiran
01b187aaa3 Update a log message 2023-04-20 20:22:33 +03:00
M. Mert Yildiran
38d121556c Add storageclass option to config.yaml 2023-04-20 20:20:24 +03:00
M. Mert Yildiran
2d73b46b44 Change the Docker Hub repository in the badges to kubeshark/worker 2023-04-20 20:10:52 +03:00
M. Mert Yildiran
466b9099bd Ignore the Kubernetes version check in certain commands while creating the Kubernetes provider 2023-04-20 20:09:19 +03:00
M. Mert Yildiran
bbe3338c3c Rename 08-persistent-volume.yaml to 08-persistent-volume-claim.yaml 2023-04-20 20:04:47 +03:00
M. Mert Yildiran
2780791068 Clean YAML files before generating the new ones in manifests and helm-chart command 2023-04-20 04:00:37 +03:00
M. Mert Yildiran
e65656c1df 🔥 Delete permissionFiles folder 2023-04-20 03:52:15 +03:00
M. Mert Yildiran
df7d1ac10c Give the permission of listing or watching the persistentvolumeclaims to the ClusterRole 2023-04-20 03:01:25 +03:00
M. Mert Yildiran
c342885cae Set the default storage limit to 200Mi 2023-04-20 02:48:18 +03:00
M. Mert Yildiran
44adb397c1 🔥 Remove the old DaemonSet manifests 2023-04-20 00:26:01 +03:00
M. Mert Yildiran
657ea8570c Add PersistentVolumeClaim and mount it to worker DaemonSet 2023-04-20 00:09:22 +03:00
M. Mert Yildiran
686dd5fba1 🔥 Remove the -A flag and allnamespaces field from config.yaml 2023-04-19 20:52:28 +03:00
M. Mert Yildiran
90e6e99386 Run the manifests --dump and helm-chart commands 2023-04-19 20:30:11 +03:00
M. Mert Yildiran
aa9109df12 Update helm-chart command 2023-04-19 20:29:17 +03:00
M. Mert Yildiran
9a37781355 🔥 Remove the grace period 2023-04-19 05:18:59 +03:00
M. Mert Yildiran
5ce10b626f Pass every config through environment variables and don't make HTTP calls in first tap command 2023-04-18 03:21:23 +03:00
M. Mert Yildiran
26d75da588 Set the Helm chart version to 39.6 2023-04-13 01:57:26 +03:00
M. Mert Yildiran
95edac9f8f 📚 Remove the WIP notice in the Helm chart README.md 2023-04-13 01:53:04 +03:00
M. Mert Yildiran
f6c4d43eb1 Update a README.md(s) 2023-04-12 03:12:32 +03:00
M. Mert Yildiran
47b9cd0c8d Update the README.md(s) 2023-04-12 03:10:36 +03:00
M. Mert Yildiran
fb06545887 Add a header comment to generated manifests and Helm chart templates 2023-04-12 03:10:23 +03:00
M. Mert Yildiran
ea594ea70a Update helm-chart/README.md 2023-04-12 02:51:34 +03:00
M. Mert Yildiran
3cc543827a Fix all of the remaining issues in the Helm chart 2023-04-12 02:50:12 +03:00
M. Mert Yildiran
18addbb980 Fix the issues in Helm chart such that helm template succeeds 2023-04-12 02:12:12 +03:00
M. Mert Yildiran
d2b9bddf78 Do more Helm templating 2023-04-12 01:33:41 +03:00
M. Mert Yildiran
3ebf816a68 Generate Helm chart templates 2023-04-12 01:06:39 +03:00
M. Mert Yildiran
504ecc4f83 Generate values.yaml out of config.yaml 2023-04-11 23:01:29 +03:00
dependabot[bot]
562dff0d6c ⬆️ Bump github.com/docker/docker (#1333)
Bumps [github.com/docker/docker](https://github.com/docker/docker) from 20.10.22+incompatible to 20.10.24+incompatible.
- [Release notes](https://github.com/docker/docker/releases)
- [Commits](https://github.com/docker/docker/compare/v20.10.22...v20.10.24)

---
updated-dependencies:
- dependency-name: github.com/docker/docker
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-04-11 23:00:11 +03:00
M. Mert Yildiran
02990912b7 Move ResourceLabels and NodeSelectorTerms fields into TapConfig 2023-04-11 22:37:29 +03:00
M. Mert Yildiran
0aedc023aa Add README.md to helm-chart directory 2023-04-11 22:31:48 +03:00
M. Mert Yildiran
0801ea8c74 Generate Chart.yaml 2023-04-11 22:17:37 +03:00
M. Mert Yildiran
83be3558ed Copy the license into Helm chart 2023-04-11 19:29:19 +03:00
M. Mert Yildiran
100b397cdf Run kubeshark helm-chart 2023-04-11 19:21:04 +03:00
M. Mert Yildiran
c2cad11e0a Add helm-chart command 2023-04-11 19:20:04 +03:00
M. Mert Yildiran
c42481deb8 Add POD_REGEX, NAMESPACES, STORAGE_LIMIT and LICENSE environment variables to Hub 2023-04-11 18:40:34 +03:00
M. Mert Yildiran
39d1b77045 Fix the issues in worker DaemonSet 2023-04-11 02:33:17 +03:00
M. Mert Yildiran
f19db77228 Fix more issues in manifests command 2023-04-11 02:18:23 +03:00
M. Mert Yildiran
077fc6c126 Set the apiVersion in the manifests 2023-04-11 02:09:03 +03:00
M. Mert Yildiran
aeeb9e6c9f Run kubeshark manifests --dump and add the manifest files 2023-04-11 02:02:29 +03:00
M. Mert Yildiran
384ed4e16b Set YAML indent to 2 2023-04-11 01:57:04 +03:00
M. Mert Yildiran
5dafc015bb Add manifests command to generate Kubernetes manifests 2023-04-11 01:54:06 +03:00
M. Mert Yildiran
d1b17d4534 Build worker DaemonSet separately then apply it by converting it to ApplyConfiguration 2023-04-10 22:24:54 +03:00
M. Mert Yildiran
b9333e4d67 🔧 Add some useful kubectl commands to Makefile 2023-04-10 01:09:34 +03:00
M. Mert Yildiran
c962864d0b 🐛 Fix the clean command cause leftover ClusterRole and ClusterRoleBinding 2023-04-10 00:48:22 +03:00
M. Mert Yildiran
07b080e97a 🔥 Remove the unused methods in the kubernetes package 2023-04-01 21:36:48 +03:00
M. Mert Yildiran
e4684a10af Add --ignoreTainted flag to tap command 2023-03-27 16:26:09 +03:00
dependabot[bot]
8b5e55d53a ⬆️ Bump golang.org/x/crypto (#1326)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.0.0-20220208050332-20e1d8d225ab to 0.1.0.
- [Release notes](https://github.com/golang/crypto/releases)
- [Commits](https://github.com/golang/crypto/commits/v0.1.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-26 23:36:04 +03:00
dependabot[bot]
ee968adec5 ⬆️ Bump golang.org/x/net from 0.2.0 to 0.7.0 (#1325)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.2.0 to 0.7.0.
- [Release notes](https://github.com/golang/net/releases)
- [Commits](https://github.com/golang/net/compare/v0.2.0...v0.7.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-26 23:34:34 +03:00
dependabot[bot]
2e7fad77a9 ⬆️ Bump github.com/docker/distribution (#1324)
Bumps [github.com/docker/distribution](https://github.com/docker/distribution) from 2.7.1+incompatible to 2.8.0+incompatible.
- [Release notes](https://github.com/docker/distribution/releases)
- [Commits](https://github.com/docker/distribution/compare/v2.7.1...v2.8.0)

---
updated-dependencies:
- dependency-name: github.com/docker/distribution
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-26 23:32:40 +03:00
M. Mert Yildiran
7f6f710b3f 🐛 Fix selfnamespace issue by changing its location in the config and adding --selfnamespace flag to tap and clean commands 2023-03-26 23:26:35 +03:00
M. Mert Yildiran
261df8261f 🐛 Use tap config for console, pro, proxy and scripts commands 2023-03-26 21:40:49 +03:00
M. Mert Yildiran
e14bc8064b 🐛 Don't stop watching in case of an error occurs in scripts command 2023-03-24 02:58:49 +03:00
M. Mert Yildiran
6f3b2c6755 Update README.md 2023-03-19 21:41:26 +03:00
M. Mert Yildiran
bdaa626f30 🐛 Fix release.yml GitHub workflow 2023-03-19 21:20:01 +03:00
M. Mert Yildiran
e1ada1768d 🐛 Remove configpath field from the config and call os.MkdirAll before regenerating config 2023-03-19 20:58:20 +03:00
Christopher Redwine
b05dbed71e Upgrade github actions versions to latest and update deprecated options (#1319)
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-03-19 20:20:47 +03:00
M. Mert Yildiran
9b66600602 Update README.md 2023-03-19 20:09:49 +03:00
M. Mert Yildiran
d826b95687 Update README.md 2023-03-19 20:08:37 +03:00
M. Mert Yildiran
0dac046f57 🐛 Fix the issues in pro, console, proxy, config and scripts commands 2023-03-19 18:39:22 +03:00
Alon Girmonsky
abc506af1a top banner 2023-03-19 00:40:44 -07:00
M. Mert Yildiran
08f42a7b96 🐛 Replace another panic with a error log in pro command 2023-03-19 06:08:55 +03:00
M. Mert Yildiran
b90edfe1fb 🐛 Replace panic with a warning log in pro command 2023-03-19 06:06:44 +03:00
M. Mert Yildiran
c32a13094e 🐛 Remove a check that's forgotten to be removed 2023-03-18 16:52:50 +03:00
Alon Girmonsky
3f01b2de60 Removed the old 38.2 top message 2023-03-17 23:09:18 -07:00
M. Mert Yildiran
bdf81380d8 Remove the dots at the end of command flag descriptions 2023-03-18 00:47:44 +03:00
M. Mert Yildiran
889730e9e5 🔨 Viewer to Analyzer 2023-03-17 20:09:09 +03:00
Alon Girmonsky
3b3927ce3a Preparing for the release 2023-03-17 00:23:16 -07:00
Alon Girmonsky
8e07226060 Preparing for a new release 2023-03-17 00:19:01 -07:00
M. Mert Yildiran
b7f0dd7f3c 🔨 Rename the newly added customLabels field to resourceLabels in the config 2023-03-16 02:34:41 +03:00
M. Mert Yildiran
d9ec538aff Add customLabels field to config.yaml 2023-03-14 23:45:41 +03:00
M. Mert Yildiran
362b17dec4 🔥 Remove a log message 2023-03-13 22:54:10 +03:00
M. Mert Yildiran
4216f07ec7 🐛 os.Exit after returning from the HTTP handler 2023-03-13 22:51:48 +03:00
M. Mert Yildiran
a4d35599df Change some logs 2023-03-13 22:45:57 +03:00
M. Mert Yildiran
bcc1a36d71 👕 Fix the linter error 2023-03-13 22:42:56 +03:00
M. Mert Yildiran
b4a3a0451e Increase DefaultSleep value 2023-03-13 22:42:17 +03:00
M. Mert Yildiran
1a2892d46e Increase DefaultSleep value 2023-03-13 22:35:03 +03:00
M. Mert Yildiran
db8c9ec163 Ask for license key input 2023-03-10 18:41:38 +03:00
M. Mert Yildiran
b11619fbfa Panic if WriteConfig fails in pro command 2023-03-09 19:50:17 +03:00
M. Mert Yildiran
49ce15f9da Update some command descriptions 2023-03-09 19:48:42 +03:00
M. Mert Yildiran
65c5df3211 Add pro command to acquire a Pro license 2023-03-09 19:48:16 +03:00
M. Mert Yildiran
de7f598001 🐛 Fix an issue in the script title retrieval from the first comment once more 2023-03-08 01:04:20 +03:00
Christopher Redwine
28fa6e494f Upgrade deprecated set-output GitHub Actions syntax to avoid warnings (#1314)
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-03-08 00:29:32 +03:00
M. Mert Yildiran
0c68c0f99f 🐛 Fix an issue in the script title retrieval from the first comment 2023-03-08 00:14:35 +03:00
M. Mert Yildiran
cedb7bc8bc Watch scripts inside tap command 2023-03-07 20:21:28 +03:00
M. Mert Yildiran
ca35177b44 Change the log printing strategy in console command 2023-03-05 21:08:44 +03:00
M. Mert Yildiran
08b96c2663 👷 Fix brew tap reports 0.0 as version number 2023-03-05 21:00:35 +03:00
M. Mert Yildiran
606fddc776 🐛 Fix the log messages related to POST /scripts/env request 2023-03-04 14:12:03 +03:00
M. Mert Yildiran
9a95fa364c Change consts config field to env 2023-03-03 17:32:19 +03:00
M. Mert Yildiran
72a18871b1 Update Hub about the changes in the config in case of kubeshark tap re-run 2023-02-16 20:06:39 +03:00
M. Mert Yildiran
0a00dfbaf5 Add grace period 2023-02-16 00:58:01 +03:00
M. Mert Yildiran
b9a7cfb4fa Reduce DefaultSleep value 2023-02-16 00:49:45 +03:00
M. Mert Yildiran
f0b68cb44b Revert "Create Worker(s) before Hub" 2023-02-15 22:33:59 +03:00
M. Mert Yildiran
dcd42798c7 Establish a proxy in console command if it's not present 2023-02-15 00:56:33 +03:00
M. Mert Yildiran
384f62f94f Create Worker(s) before Hub and reduce the grace periods 2023-02-15 00:43:30 +03:00
M. Mert Yildiran
7312addb65 Change the key into index 2023-02-14 23:10:38 +03:00
M. Mert Yildiran
41ba509428 Add scripts command 2023-02-14 20:23:25 +03:00
M. Mert Yildiran
85da7f71ac Color the error messages in console command 2023-02-09 15:19:05 +03:00
M. Mert Yildiran
6ca727373c Fix the command description style 2023-02-09 15:11:08 +03:00
M. Mert Yildiran
f4c6613e6e Add console command 2023-02-09 15:09:52 +03:00
M. Mert Yildiran
37f615c9bb Add a grace period of a second before calling Hub endpoints 2023-02-07 12:44:32 +03:00
M. Mert Yildiran
ac9298d681 Log the POST to Hub calls even if debug is false 2023-02-07 12:43:31 +03:00
M. Mert Yildiran
2a20dc173c Call POST /scripts/done after the posting the scripts to mark the end 2023-02-07 12:25:41 +03:00
M. Mert Yildiran
a6dd98d241 Log the found scripts 2023-02-06 15:27:07 +03:00
M. Mert Yildiran
5d15667a03 POST storage limit to Hub and better error handling 2023-02-06 07:58:43 +03:00
M. Mert Yildiran
43e9e90f7d ⬆️ Upgrade to github.com/kubeshark/base@v0.6.3 2023-02-05 23:48:59 +03:00
M. Mert Yildiran
776f9ce9af Change consts type to map[string]interface{} 2023-02-05 22:55:02 +03:00
M. Mert Yildiran
e62ebea8d8 Try to load the config YAML from CWD first 2023-02-05 22:36:40 +03:00
M. Mert Yildiran
eee641082e Add ScriptingConfig and read JavaScript files from the source directory using github.com/robertkrimen/otto 2023-02-05 22:00:01 +03:00
M. Mert Yildiran
a1096cf2b9 Comment out GODEBUG=netdns=go environment variable 2023-02-05 09:02:13 +03:00
M. Mert Yildiran
1c31f5df01 🐛 Fix a data race in pod watching 2023-02-05 08:41:53 +03:00
M. Mert Yildiran
6e0f2aa10a 🔧 Add build-race Makefile rule 2023-02-05 08:39:09 +03:00
M. Mert Yildiran
22e8490b93 Add GODEBUG=netdns=go environment variable to worker 2023-02-04 03:27:12 +03:00
M. Mert Yildiran
17e038e703 Do POST /license after Hub is started 2023-01-31 22:31:44 +03:00
M. Mert Yildiran
4cb4ba568b Change a debug log message 2023-01-31 00:02:41 +03:00
M. Mert Yildiran
79cc2e70b6 Revert " Add community, edition and upgrade commands"
This reverts commit 4b2c678fa3.
2023-01-31 00:01:55 +03:00
M. Mert Yildiran
fe2423e9d9 Revert " Pull the images from public ECR repositories in case of the edition is not community"
This reverts commit 964d30bf80.
2023-01-31 00:01:43 +03:00
M. Mert Yildiran
964d30bf80 Pull the images from public ECR repositories in case of the edition is not community 2023-01-30 05:11:42 +03:00
M. Mert Yildiran
4b2c678fa3 Add community, edition and upgrade commands 2023-01-29 23:52:49 +03:00
M. Mert Yildiran
a9fdde4110 🐛 Fix the missing / in image URL 2023-01-29 03:30:23 +03:00
M. Mert Yildiran
4eeab41e6d Don't fail if self namespace does not exist 2023-01-29 01:57:05 +03:00
M. Mert Yildiran
dce7de6cc9 Treat Docker registry as an absolute prefix 2023-01-29 01:56:18 +03:00
84 changed files with 3341 additions and 2605 deletions

35
.github/workflows/helm.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
on:
push:
tags:
- '*'
name: Release Helm Charts
jobs:
release:
# depending on default permission settings for your org (contents being read-only or read-write for workloads), you will have to add permissions
# see: https://docs.github.com/en/actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v3
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.5.0
with:
charts_dir: .
charts_repo_url: https://kubeshark.github.io/kubeshark
env:
CR_TOKEN: "${{ secrets.HELM_TOKEN }}"

View File

@@ -16,16 +16,18 @@ jobs:
name: Golint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: actions/setup-go@v2
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '^1.17'
go-version-file: 'go.mod'
- name: Go lint
uses: golangci/golangci-lint-action@v2
uses: golangci/golangci-lint-action@v3
with:
version: latest
args: --timeout=10m

View File

@@ -14,21 +14,23 @@ jobs:
name: Build and publish a new release
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.17
uses: actions/setup-go@v2
with:
go-version: '1.17'
- name: Check out the repo
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
- name: Version
id: version
shell: bash
run: |
echo "##[set-output name=tag;]$(echo ${GITHUB_REF#refs/*/})"
echo "##[set-output name=build_timestamp;]$(echo $(date +%s))"
echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
{
echo "tag=${GITHUB_REF#refs/*/}"
echo "build_timestamp=$(date +%s)"
echo "branch=${GITHUB_REF#refs/heads/}"
} >> "$GITHUB_OUTPUT"
- name: Build
run: make build-all VER='${{ steps.version.outputs.tag }}' BUILD_TIMESTAMP='${{ steps.version.outputs.build_timestamp }}'
@@ -53,23 +55,35 @@ jobs:
needs: [release]
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Version
id: version
shell: bash
run: |
{
echo "tag=${GITHUB_REF#refs/*/}"
echo "build_timestamp=$(date +%s)"
echo "branch=${GITHUB_REF#refs/heads/}"
} >> "$GITHUB_OUTPUT"
- name: Fetch all tags
run: git fetch --force --tags
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v4
with:
go-version: 1.17
go-version-file: 'go.mod'
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
uses: goreleaser/goreleaser-action@v4
with:
distribution: goreleaser
version: ${{ env.GITHUB_REF_NAME }}
args: release --rm-dist
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.HOMEBREW_TOKEN }}
VER: ${{ steps.version.outputs.tag }}
BUILD_TIMESTAMP: ${{ steps.version.outputs.build_timestamp }}

View File

@@ -15,17 +15,17 @@ jobs:
timeout-minutes: 20
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Set up Go 1.17
uses: actions/setup-go@v2
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '^1.17'
go-version-file: 'go.mod'
- name: Test
run: make test
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
uses: codecov/codecov-action@v3

7
.gitignore vendored
View File

@@ -56,4 +56,11 @@ cypress.env.json
# Object files
*.o
# Binaries
bin
# Scripts
scripts/
# CWD config YAML
kubeshark.yaml

View File

@@ -1,4 +1,4 @@
![Kubeshark: The API Traffic Viewer for Kubernetes](https://raw.githubusercontent.com/kubeshark/assets/master/svg/kubeshark-logo.svg)
![Kubeshark: The API Traffic Analyzer for Kubernetes](https://raw.githubusercontent.com/kubeshark/assets/master/svg/kubeshark-logo.svg)
# Contributing to Kubeshark

View File

@@ -15,15 +15,23 @@ help: ## Print this help message.
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
build-debug: ## Build for debuging.
export CGO_ENABLED=1
export GCLFAGS='-gcflags="all=-N -l"'
${MAKE} build-base
build: ## Build.
export CGO_ENABLED=0
export LDFLAGS_EXT='-extldflags=-static -s -w'
${MAKE} build-base
build-race: ## Build with -race flag.
export CGO_ENABLED=1
export GCLFAGS='-race'
export LDFLAGS_EXT='-extldflags=-static -s -w'
${MAKE} build-base
build-base: ## Build binary (select the platform via GOOS / GOARCH env variables).
CGO_ENABLED=0 go build ${GCLFAGS} -ldflags="${LDFLAGS_EXT} \
go build ${GCLFAGS} -ldflags="${LDFLAGS_EXT} \
-X 'github.com/kubeshark/kubeshark/misc.GitCommitHash=$(COMMIT_HASH)' \
-X 'github.com/kubeshark/kubeshark/misc.Branch=$(GIT_BRANCH)' \
-X 'github.com/kubeshark/kubeshark/misc.BuildTimestamp=$(BUILD_TIMESTAMP)' \
@@ -33,6 +41,7 @@ build-base: ## Build binary (select the platform via GOOS / GOARCH env variables
cd bin && shasum -a 256 kubeshark_${SUFFIX} > kubeshark_${SUFFIX}.sha256
build-all: ## Build for all supported platforms.
export CGO_ENABLED=0
echo "Compiling for every OS and Platform" && \
mkdir -p bin && sed s/_VER_/$(VER)/g RELEASE.md.TEMPLATE > bin/README.md && \
$(MAKE) build GOOS=linux GOARCH=amd64 && \
@@ -53,3 +62,15 @@ test: ## Run cli tests.
lint: ## Lint the source code.
golangci-lint run
kubectl-view-all-resources: ## This command outputs all Kubernetes resources using YAML format and pipes it to VS Code
./kubectl.sh view-all-resources
kubectl-view-kubeshark-resources: ## This command outputs all Kubernetes resources in "kubeshark" namespace using YAML format and pipes it to VS Code
./kubectl.sh view-kubeshark-resources
generate-helm-values: ## Generate the Helm values from config.yaml
./bin/kubeshark__ config > ./helm-chart/values.yaml
generate-manifests: ## Generate the manifests from the Helm chart using default configuration
helm template ./helm-chart > ./manifests/complete.yaml

View File

@@ -1,18 +1,15 @@
<p align="center">
<img src="https://raw.githubusercontent.com/kubeshark/assets/master/svg/kubeshark-logo.svg" alt="Kubeshark: Traffic viewer for Kubernetes." height="128px"/>
<img src="https://raw.githubusercontent.com/kubeshark/assets/master/svg/kubeshark-logo.svg" alt="Kubeshark: Traffic analyzer for Kubernetes." height="128px"/>
</p>
<p align="center">
<a href="https://github.com/kubeshark/kubeshark/blob/main/LICENSE">
<img alt="GitHub License" src="https://img.shields.io/github/license/kubeshark/kubeshark?logo=GitHub&style=flat-square">
</a>
<a href="https://github.com/kubeshark/kubeshark/releases/latest">
<img alt="GitHub Latest Release" src="https://img.shields.io/github/v/release/kubeshark/kubeshark?logo=GitHub&style=flat-square">
</a>
<a href="https://hub.docker.com/r/kubeshark/kubeshark">
<a href="https://hub.docker.com/r/kubeshark/worker">
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/kubeshark/kubeshark?color=%23099cec&logo=Docker&style=flat-square">
</a>
<a href="https://hub.docker.com/r/kubeshark/kubeshark">
<a href="https://hub.docker.com/r/kubeshark/worker">
<img alt="Image size" src="https://img.shields.io/docker/image-size/kubeshark/kubeshark/latest?logo=Docker&style=flat-square">
</a>
<a href="https://discord.gg/WkvRGMUcx7">
@@ -25,11 +22,15 @@
<p align="center">
<b>
<a href="https://github.com/kubeshark/kubeshark/releases/latest">V38.2</a> is out with <a href="https://docs.kubeshark.co/en/pcap_export_import">PCAP</a> export, <a href="https://docs.kubeshark.co/en/dns">DNS</a>, <a href="https://docs.kubeshark.co/en/service_map">Identity-aware Service Map</a> and so much more. Read about it <a href="https://kubeshark.co/pcap-or-it-didnt-happen">here</a>.
<span>NEW: Introducing </span>
<a href="https://docs.kubeshark.co/en/self_hosted">self-hosted Kubeshark</a>, with
<a href="https://docs.kubeshark.co/en/install#helm">Helm</a>,
<a href="https://docs.kubeshark.co/en/self_hosted">Ingress & Authentication</a>. Read more about it
<a href="https://kubeshark.co/self-hosting">here</a>.
</b>
</p>
**Kubeshark** is an API Traffic Viewer for [**Kubernetes**](https://kubernetes.io/) providing real-time, protocol-aware visibility into Kubernetes internal network, capturing, dissecting and monitoring all traffic and payloads going in, out and across containers, pods, nodes and clusters.
**Kubeshark** is an API Traffic Analyzer for [**Kubernetes**](https://kubernetes.io/) providing real-time, protocol-level visibility into Kubernetes internal network, capturing and monitoring all traffic and payloads going in, out and across containers, pods, nodes and clusters.
![Simple UI](https://github.com/kubeshark/assets/raw/master/png/kubeshark-ui.png)
@@ -43,10 +44,6 @@ Download **Kubeshark**'s binary distribution [latest release](https://github.com
kubeshark tap
```
```shell
kubeshark tap -A
```
```shell
kubeshark tap -n sock-shop "(catalo*|front-end*)"
```

View File

@@ -1,21 +0,0 @@
package cmd
import (
"fmt"
"github.com/kubeshark/kubeshark/misc"
"github.com/spf13/cobra"
)
var checkCmd = &cobra.Command{
Use: "check",
Short: fmt.Sprintf("Check the %s resources for potential problems", misc.Software),
RunE: func(cmd *cobra.Command, args []string) error {
runCheck()
return nil
},
}
func init() {
rootCmd.AddCommand(checkCmd)
}

View File

@@ -1,28 +0,0 @@
package check
import (
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/semver"
"github.com/rs/zerolog/log"
)
func KubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
log.Info().Str("procedure", "kubernetes-api").Msg("Checking:")
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.Kube.Context)
if err != nil {
log.Error().Err(err).Msg("Can't initialize the client!")
return nil, nil, false
}
log.Info().Msg("Initialization of the client is passed.")
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
log.Error().Err(err).Msg("Can't query the Kubernetes API!")
return nil, nil, false
}
log.Info().Msg("Querying the Kubernetes API is passed.")
return kubernetesProvider, kubernetesVersion, true
}

View File

@@ -1,91 +0,0 @@
package check
import (
"context"
"embed"
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
rbac "k8s.io/api/rbac/v1"
"k8s.io/client-go/kubernetes/scheme"
)
func KubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "kubernetes-permissions").Msg("Checking:")
var filePath string
if config.Config.IsNsRestrictedMode() {
filePath = "permissionFiles/permissions-ns-tap.yaml"
} else {
filePath = "permissionFiles/permissions-all-namespaces-tap.yaml"
}
data, err := embedFS.ReadFile(filePath)
if err != nil {
log.Error().Err(err).Msg("While checking Kubernetes permissions!")
return false
}
decode := scheme.Codecs.UniversalDeserializer().Decode
obj, _, err := decode(data, nil, nil)
if err != nil {
log.Error().Err(err).Msg("While checking Kubernetes permissions!")
return false
}
switch resource := obj.(type) {
case *rbac.Role:
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.SelfNamespace)
case *rbac.ClusterRole:
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, "")
}
log.Error().Msg("While checking Kubernetes permissions! Resource of types 'Role' or 'ClusterRole' are not found in permission files.")
return false
}
func checkRulesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider, rules []rbac.PolicyRule, namespace string) bool {
permissionsExist := true
for _, rule := range rules {
for _, group := range rule.APIGroups {
for _, resource := range rule.Resources {
for _, verb := range rule.Verbs {
exist, err := kubernetesProvider.CanI(ctx, namespace, resource, verb, group)
permissionsExist = checkPermissionExist(group, resource, verb, namespace, exist, err) && permissionsExist
}
}
}
}
return permissionsExist
}
func checkPermissionExist(group string, resource string, verb string, namespace string, exist bool, err error) bool {
var groupAndNamespace string
if group != "" && namespace != "" {
groupAndNamespace = fmt.Sprintf("in api group '%v' and namespace '%v'", group, namespace)
} else if group != "" {
groupAndNamespace = fmt.Sprintf("in api group '%v'", group)
} else if namespace != "" {
groupAndNamespace = fmt.Sprintf("in namespace '%v'", namespace)
}
if err != nil {
log.Error().
Str("verb", verb).
Str("resource", resource).
Str("group-and-namespace", groupAndNamespace).
Err(err).
Msg("While checking Kubernetes permissions!")
return false
} else if !exist {
log.Error().Msg(fmt.Sprintf("Can't %v %v %v", verb, resource, groupAndNamespace))
return false
}
log.Info().Msg(fmt.Sprintf("Can %v %v %v", verb, resource, groupAndNamespace))
return true
}

View File

@@ -1,118 +0,0 @@
package check
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
)
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "k8s-components").Msg("Checking:")
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.SelfNamespace)
allResourcesExist := checkResourceExist(config.Config.SelfNamespace, "namespace", exist, err)
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.SelfNamespace, kubernetes.ServiceAccountName)
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
if config.Config.IsNsRestrictedMode() {
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.SelfNamespace, kubernetes.RoleName)
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.SelfNamespace, kubernetes.RoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
} else {
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesClusterRoleBindingExist(ctx, kubernetes.ClusterRoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
}
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.SelfNamespace, kubernetes.HubServiceName)
allResourcesExist = checkResourceExist(kubernetes.HubServiceName, "service", exist, err) && allResourcesExist
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
return allResourcesExist
}
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.SelfNamespace, kubernetes.HubPodName); err != nil {
log.Error().
Str("name", kubernetes.HubPodName).
Err(err).
Msg("While checking if pod is running!")
return false
} else if len(pods) == 0 {
log.Error().
Str("name", kubernetes.HubPodName).
Msg("Pod doesn't exist!")
return false
} else if !kubernetes.IsPodRunning(&pods[0]) {
log.Error().
Str("name", kubernetes.HubPodName).
Msg("Pod is not running!")
return false
}
log.Info().
Str("name", kubernetes.HubPodName).
Msg("Pod is running.")
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.SelfNamespace, kubernetes.WorkerPodName); err != nil {
log.Error().
Str("name", kubernetes.WorkerPodName).
Err(err).
Msg("While checking if pods are running!")
return false
} else {
workers := 0
notRunningWorkers := 0
for _, pod := range pods {
workers += 1
if !kubernetes.IsPodRunning(&pod) {
notRunningWorkers += 1
}
}
if notRunningWorkers > 0 {
log.Error().
Str("name", kubernetes.WorkerPodName).
Msg(fmt.Sprintf("%d/%d pods are not running!", notRunningWorkers, workers))
return false
}
log.Info().
Str("name", kubernetes.WorkerPodName).
Msg(fmt.Sprintf("All %d pods are running.", workers))
return true
}
}
func checkResourceExist(resourceName string, resourceType string, exist bool, err error) bool {
if err != nil {
log.Error().
Str("name", resourceName).
Str("type", resourceType).
Err(err).
Msg("Checking if resource exists!")
return false
} else if !exist {
log.Error().
Str("name", resourceName).
Str("type", resourceType).
Msg("Resource doesn't exist!")
return false
}
log.Info().
Str("name", resourceName).
Str("type", resourceType).
Msg("Resource exist.")
return true
}

View File

@@ -1,22 +0,0 @@
package check
import (
"fmt"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/semver"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
)
func KubernetesVersion(kubernetesVersion *semver.SemVersion) bool {
log.Info().Str("procedure", "kubernetes-version").Msg("Checking:")
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
log.Error().Str("k8s-version", string(*kubernetesVersion)).Err(err).Msg(fmt.Sprintf(utils.Red, "The cluster does not have the minimum required Kubernetes API version!"))
return false
}
log.Info().Str("k8s-version", string(*kubernetesVersion)).Msg("Minimum required Kubernetes API version is passed.")
return true
}

View File

@@ -1,40 +0,0 @@
package check
import (
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
)
func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "server-connectivity").Msg("Checking:")
var connectedToHub, connectedToFront bool
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), "/echo", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Hub using proxy!")
} else {
connectedToHub = true
log.Info().Msg("Connected successfully to Hub using proxy.")
}
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort), "", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Front using proxy!")
} else {
connectedToFront = true
log.Info().Msg("Connected successfully to Front using proxy.")
}
return connectedToHub && connectedToFront
}
func checkProxy(serverUrl string, path string, kubernetesProvider *kubernetes.Provider) error {
log.Info().Str("url", serverUrl).Msg("Connecting:")
connector := connect.NewConnector(serverUrl, connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(path); err != nil {
return err
}
return nil
}

View File

@@ -1,53 +0,0 @@
package cmd
import (
"context"
"embed"
"fmt"
"os"
"github.com/kubeshark/kubeshark/cmd/check"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
)
var (
//go:embed permissionFiles
embedFS embed.FS
)
func runCheck() {
log.Info().Msg(fmt.Sprintf("Checking the %s resources...", misc.Software))
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel will be called when this function exits
kubernetesProvider, kubernetesVersion, checkPassed := check.KubernetesApi()
if checkPassed {
checkPassed = check.KubernetesVersion(kubernetesVersion)
}
if checkPassed {
checkPassed = check.KubernetesPermissions(ctx, embedFS, kubernetesProvider)
}
if checkPassed {
checkPassed = check.KubernetesResources(ctx, kubernetesProvider)
}
if checkPassed {
checkPassed = check.ServerConnection(kubernetesProvider)
}
if checkPassed {
log.Info().Msg(fmt.Sprintf(utils.Green, "All checks are passed."))
} else {
log.Error().
Str("command1", fmt.Sprintf("%s %s", misc.Program, cleanCmd.Use)).
Str("command2", fmt.Sprintf("%s %s", misc.Program, tapCmd.Use)).
Msg(fmt.Sprintf(utils.Red, fmt.Sprintf("There are issues in your %s resources! Run these commands:", misc.Software)))
os.Exit(1)
}
}

View File

@@ -3,7 +3,12 @@ package cmd
import (
"fmt"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/kubernetes/helm"
"github.com/kubeshark/kubeshark/misc"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
@@ -11,11 +16,27 @@ var cleanCmd = &cobra.Command{
Use: "clean",
Short: fmt.Sprintf("Removes all %s resources", misc.Software),
RunE: func(cmd *cobra.Command, args []string) error {
performCleanCommand()
resp, err := helm.NewHelm(
config.Config.Tap.Release.Repo,
config.Config.Tap.Release.Name,
config.Config.Tap.Release.Namespace,
).Uninstall()
if err != nil {
log.Error().Err(err).Send()
} else {
log.Info().Msgf("Uninstalled the Helm release: %s", resp.Release.Name)
}
return nil
},
}
func init() {
rootCmd.AddCommand(cleanCmd)
defaultTapConfig := configStructs.TapConfig{}
if err := defaults.Set(&defaultTapConfig); err != nil {
log.Debug().Err(err).Send()
}
cleanCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}

View File

@@ -1,14 +0,0 @@
package cmd
import (
"github.com/kubeshark/kubeshark/config"
)
func performCleanCommand() {
kubernetesProvider, err := getKubernetesProviderForCli()
if err != nil {
return
}
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace, false)
}

View File

@@ -14,12 +14,11 @@ import (
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/misc/fsUtils"
"github.com/kubeshark/kubeshark/resources"
"github.com/rs/zerolog/log"
)
func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx context.Context, serviceName string, podName string, proxyPortLabel string, srcPort uint16, dstPort uint16, healthCheck string) {
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.Proxy.Host, srcPort, config.Config.SelfNamespace, serviceName)
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.Proxy.Host, srcPort, config.Config.Tap.Release.Namespace, serviceName)
if err != nil {
log.Error().
Err(errormessage.FormatError(err)).
@@ -27,7 +26,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
return
}
connector := connect.NewConnector(kubernetes.GetLocalhostOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector := connect.NewConnector(kubernetes.GetProxyOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(healthCheck); err != nil {
log.Warn().
Str("service", serviceName).
@@ -39,7 +38,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
}
podRegex, _ := regexp.Compile(podName)
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.SelfNamespace, podRegex, srcPort, dstPort, ctx); err != nil {
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.Tap.Release.Namespace, podRegex, srcPort, dstPort, ctx); err != nil {
log.Error().
Str("pod-regex", podRegex.String()).
Err(errormessage.FormatError(err)).
@@ -47,7 +46,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
return
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(healthCheck); err != nil {
log.Error().
Str("service", serviceName).
@@ -58,7 +57,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
}
}
func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
func getKubernetesProviderForCli(silent bool, dontCheckVersion bool) (*kubernetes.Provider, error) {
kubeConfigPath := config.Config.KubeConfigPath()
kubernetesProvider, err := kubernetes.NewProvider(kubeConfigPath, config.Config.Kube.Context)
if err != nil {
@@ -66,22 +65,26 @@ func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
return nil, err
}
log.Info().Str("path", kubeConfigPath).Msg("Using kubeconfig:")
if !silent {
log.Info().Str("path", kubeConfigPath).Msg("Using kubeconfig:")
}
if err := kubernetesProvider.ValidateNotProxy(); err != nil {
handleKubernetesProviderError(err)
return nil, err
}
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
handleKubernetesProviderError(err)
return nil, err
}
if !dontCheckVersion {
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
handleKubernetesProviderError(err)
return nil, err
}
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
handleKubernetesProviderError(err)
return nil, err
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
handleKubernetesProviderError(err)
return nil, err
}
}
return kubernetesProvider, nil
@@ -96,13 +99,10 @@ func handleKubernetesProviderError(err error) {
}
}
func finishSelfExecution(kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, withoutCleanup bool) {
func finishSelfExecution(kubernetesProvider *kubernetes.Provider) {
removalCtx, cancel := context.WithTimeout(context.Background(), cleanupTimeout)
defer cancel()
dumpLogsIfNeeded(removalCtx, kubernetesProvider)
if !withoutCleanup {
resources.CleanUpSelfResources(removalCtx, cancel, kubernetesProvider, isNsRestrictedMode, selfNamespace)
}
}
func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provider) {

View File

@@ -2,6 +2,7 @@ package cmd
import (
"fmt"
"path"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
@@ -16,27 +17,21 @@ var configCmd = &cobra.Command{
Use: "config",
Short: fmt.Sprintf("Generate %s config with default values", misc.Software),
RunE: func(cmd *cobra.Command, args []string) error {
configWithDefaults, err := config.GetConfigWithDefaults()
if err != nil {
log.Error().Err(err).Msg("Failed generating config with defaults.")
return nil
}
if config.Config.Config.Regenerate {
if err := config.WriteConfig(configWithDefaults); err != nil {
if err := config.WriteConfig(&config.Config); err != nil {
log.Error().Err(err).Msg("Failed generating config with defaults.")
return nil
}
log.Info().Str("config-path", config.Config.ConfigFilePath).Msg("Template file written to config path.")
log.Info().Str("config-path", config.ConfigFilePath).Msg("Template file written to config path.")
} else {
template, err := utils.PrettyYaml(configWithDefaults)
template, err := utils.PrettyYaml(config.Config)
if err != nil {
log.Error().Err(err).Msg("Failed converting config with defaults to YAML.")
return nil
}
log.Debug().Str("template", template).Msg("Writing template config...")
log.Debug().Str("template", template).Msg("Printing template config...")
fmt.Printf("%v", template)
}
@@ -52,5 +47,5 @@ func init() {
log.Debug().Err(err).Send()
}
configCmd.Flags().BoolP(configStructs.RegenerateConfigName, "r", defaultConfig.Config.Regenerate, fmt.Sprintf("Regenerate the config file with default values to path %s or to chosen path using --%s", defaultConfig.ConfigFilePath, config.ConfigFilePathCommandName))
configCmd.Flags().BoolP(configStructs.RegenerateConfigName, "r", defaultConfig.Config.Regenerate, fmt.Sprintf("Regenerate the config file with default values to path %s", path.Join(misc.GetDotFolderPath(), "config.yaml")))
}

112
cmd/console.go Normal file
View File

@@ -0,0 +1,112 @@
package cmd
import (
"fmt"
"net/http"
"net/url"
"os"
"os/signal"
"strings"
"time"
"github.com/creasty/defaults"
"github.com/gorilla/websocket"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
var consoleCmd = &cobra.Command{
Use: "console",
Short: "Stream the scripting console logs into shell",
RunE: func(cmd *cobra.Command, args []string) error {
runConsole()
return nil
},
}
func init() {
rootCmd.AddCommand(consoleCmd)
defaultTapConfig := configStructs.TapConfig{}
if err := defaults.Set(&defaultTapConfig); err != nil {
log.Debug().Err(err).Send()
}
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
func runConsole() {
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.Port).Msg("Connecting to:")
u := url.URL{
Scheme: "ws",
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.Port),
Path: "/scripts/logs",
}
c, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
if err != nil {
log.Error().Err(err).Send()
return
}
defer c.Close()
done := make(chan struct{})
go func() {
defer close(done)
for {
_, message, err := c.ReadMessage()
if err != nil {
log.Error().Err(err).Send()
return
}
msg := string(message)
if strings.Contains(msg, ":ERROR]") {
msg = fmt.Sprintf(utils.Red, msg)
fmt.Fprintln(os.Stderr, msg)
} else {
fmt.Fprintln(os.Stdout, msg)
}
}
}()
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-done:
return
case <-interrupt:
log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Received interrupt, exiting..."))
err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
log.Error().Err(err).Send()
return
}
select {
case <-done:
case <-time.After(time.Second):
}
return
}
}
}

View File

@@ -18,7 +18,7 @@ var logsCmd = &cobra.Command{
Use: "logs",
Short: "Create a ZIP file with logs for GitHub issues or troubleshooting",
RunE: func(cmd *cobra.Command, args []string) error {
kubernetesProvider, err := getKubernetesProviderForCli()
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return nil
}

View File

@@ -1,25 +0,0 @@
# This example shows permissions that enrich the logs with additional info
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-clusterrole
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-runner-debug-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,37 +0,0 @@
# This example shows permissions that are required for Kubeshark to resolve IPs to service names
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-clusterrole
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "create"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterrolebindings"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["", "apps", "extensions"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["services"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["endpoints"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-resolver-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,40 +0,0 @@
# This example shows the permissions that are required in order to run the `kubeshark tap` command
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-clusterrole
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "watch", "create"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "create"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["create", "patch"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["services/proxy"]
verbs: ["get", "create"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-runner-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,25 +0,0 @@
# This example shows permissions that enrich the logs with additional info in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-role
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-runner-debug-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,37 +0,0 @@
# This example shows permissions that are required for Kubeshark to resolve IPs to service names in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-role
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["rolebindings"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["", "apps", "extensions"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["services"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["endpoints"]
verbs: ["get", "list", "watch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-resolver-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,37 +0,0 @@
# This example shows the permissions that are required in order to run the `kubeshark tap` command in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "watch", "create"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "create", "delete"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["create", "patch", "delete"]
- apiGroups: [""]
resources: ["services/proxy"]
verbs: ["get", "create", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "delete"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-runner-role
apiGroup: rbac.authorization.k8s.io

125
cmd/pro.go Normal file
View File

@@ -0,0 +1,125 @@
package cmd
import (
"fmt"
"io"
"net/http"
"os"
"time"
"github.com/creasty/defaults"
"github.com/gin-gonic/gin"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
var proCmd = &cobra.Command{
Use: "pro",
Short: "Acquire a Pro license",
RunE: func(cmd *cobra.Command, args []string) error {
acquireLicense()
return nil
},
}
const (
PRO_URL = "https://console.kubeshark.co/cli"
PRO_PORT = 5252
)
func init() {
rootCmd.AddCommand(proCmd)
defaultTapConfig := configStructs.TapConfig{}
if err := defaults.Set(&defaultTapConfig); err != nil {
log.Debug().Err(err).Send()
}
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
proCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
func acquireLicense() {
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
log.Info().Str("url", PRO_URL).Msg("Opening in the browser:")
utils.OpenBrowser(PRO_URL)
runLicenseRecieverServer()
}
func updateLicense(licenseKey string) {
log.Info().Str("key", licenseKey).Msg("Received license:")
config.Config.License = licenseKey
err := config.WriteConfig(&config.Config)
if err != nil {
log.Error().Err(err).Send()
}
connector.PostLicenseSingle(config.Config.License)
log.Info().Msg("Updated the license. Exiting.")
go func() {
time.Sleep(2 * time.Second)
os.Exit(0)
}()
}
func runLicenseRecieverServer() {
gin.SetMode(gin.ReleaseMode)
ginApp := gin.New()
ginApp.Use(func(c *gin.Context) {
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
c.Writer.Header().Set("Access-Control-Allow-Credentials", "true")
c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With, x-session-token")
c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT, DELETE")
c.Writer.Header().Set("Access-Control-Expose-Headers", "Content-Disposition")
if c.Request.Method == "OPTIONS" {
c.AbortWithStatus(http.StatusNoContent)
return
}
c.Next()
})
ginApp.POST("/", func(c *gin.Context) {
data, err := io.ReadAll(c.Request.Body)
if err != nil {
log.Error().Err(err).Send()
c.AbortWithStatus(http.StatusBadRequest)
return
}
licenseKey := string(data)
updateLicense(licenseKey)
})
go func() {
if err := ginApp.Run(fmt.Sprintf(":%d", PRO_PORT)); err != nil {
panic(err)
}
}()
log.Info().Msg("Alternatively enter your license key:")
var licenseKey string
fmt.Scanf("%s", &licenseKey)
updateLicense(licenseKey)
}

View File

@@ -9,9 +9,9 @@ import (
var proxyCmd = &cobra.Command{
Use: "proxy",
Short: "Open the web UI (front-end) in the browser via proxy/port-forward.",
Short: "Open the web UI (front-end) in the browser via proxy/port-forward",
RunE: func(cmd *cobra.Command, args []string) error {
runProxy()
runProxy(true, false)
return nil
},
}
@@ -24,7 +24,7 @@ func init() {
log.Debug().Err(err).Send()
}
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward.")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward.")
proxyCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward.")
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
proxyCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
}

View File

@@ -14,8 +14,8 @@ import (
"github.com/rs/zerolog/log"
)
func runProxy() {
kubernetesProvider, err := getKubernetesProviderForCli()
func runProxy(block bool, noBrowser bool) {
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return
}
@@ -23,7 +23,7 @@ func runProxy() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.SelfNamespace, kubernetes.FrontServiceName)
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.Release.Namespace, kubernetes.FrontServiceName)
if err != nil {
log.Error().
Str("service", kubernetes.FrontServiceName).
@@ -42,7 +42,7 @@ func runProxy() {
return
}
exists, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.SelfNamespace, kubernetes.HubServiceName)
exists, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.Release.Namespace, kubernetes.HubServiceName)
if err != nil {
log.Error().
Str("service", kubernetes.HubServiceName).
@@ -63,12 +63,12 @@ func runProxy() {
var establishedProxy bool
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
response, err := http.Get(fmt.Sprintf("%s/", hubUrl))
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.HubServiceName).
Int("port", int(config.Config.Tap.Proxy.Hub.SrcPort)).
Int("port", int(config.Config.Tap.Proxy.Hub.Port)).
Msg("Found a running service.")
okToOpen("Hub", hubUrl, true)
@@ -79,8 +79,8 @@ func runProxy() {
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
connector := connect.NewConnector(hubUrl, connect.DefaultRetries, connect.DefaultTimeout)
@@ -93,15 +93,15 @@ func runProxy() {
okToOpen("Hub", hubUrl, true)
}
frontUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
frontUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
response, err = http.Get(fmt.Sprintf("%s/", frontUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.FrontServiceName).
Int("port", int(config.Config.Tap.Proxy.Front.SrcPort)).
Int("port", int(config.Config.Tap.Proxy.Front.Port)).
Msg("Found a running service.")
okToOpen("Kubeshark", frontUrl, false)
okToOpen("Kubeshark", frontUrl, noBrowser)
} else {
startProxyReportErrorIfAny(
kubernetesProvider,
@@ -109,8 +109,8 @@ func runProxy() {
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
connector := connect.NewConnector(frontUrl, connect.DefaultRetries, connect.DefaultTimeout)
@@ -120,10 +120,10 @@ func runProxy() {
}
establishedProxy = true
okToOpen("Kubeshark", frontUrl, false)
okToOpen("Kubeshark", frontUrl, noBrowser)
}
if establishedProxy {
if establishedProxy && block {
utils.WaitForTermination(ctx, cancel)
}
}

View File

@@ -12,10 +12,10 @@ import (
var rootCmd = &cobra.Command{
Use: "kubeshark",
Short: fmt.Sprintf("%s: The API Traffic Viewer for Kubernetes", misc.Software),
Long: fmt.Sprintf(`%s: The API Traffic Viewer for Kubernetes
Short: fmt.Sprintf("%s: %s", misc.Software, misc.Description),
Long: fmt.Sprintf(`%s: %s
An extensible Kubernetes-aware network sniffer and kernel tracer.
For more info: %s`, misc.Software, misc.Website),
For more info: %s`, misc.Software, misc.Description, misc.Website),
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := config.InitConfig(cmd); err != nil {
log.Fatal().Err(err).Send()
@@ -32,8 +32,7 @@ func init() {
}
rootCmd.PersistentFlags().StringSlice(config.SetCommandName, []string{}, fmt.Sprintf("Override values using --%s", config.SetCommandName))
rootCmd.PersistentFlags().String(config.ConfigFilePathCommandName, defaultConfig.ConfigFilePath, fmt.Sprintf("Override config file path using --%s", config.ConfigFilePathCommandName))
rootCmd.PersistentFlags().BoolP(config.DebugFlag, "d", false, "Enable debug mode.")
rootCmd.PersistentFlags().BoolP(config.DebugFlag, "d", false, "Enable debug mode")
}
// Execute adds all child commands to the root command and sets flags appropriately.

152
cmd/scripts.go Normal file
View File

@@ -0,0 +1,152 @@
package cmd
import (
"context"
"fmt"
"net/http"
"github.com/creasty/defaults"
"github.com/fsnotify/fsnotify"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
var scriptsCmd = &cobra.Command{
Use: "scripts",
Short: "Watch the `scripting.source` directory for changes and update the scripts",
RunE: func(cmd *cobra.Command, args []string) error {
runScripts()
return nil
},
}
func init() {
rootCmd.AddCommand(scriptsCmd)
defaultTapConfig := configStructs.TapConfig{}
if err := defaults.Set(&defaultTapConfig); err != nil {
log.Debug().Err(err).Send()
}
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
func runScripts() {
if config.Config.Scripting.Source == "" {
log.Error().Msg("`scripting.source` field is empty.")
return
}
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
watchScripts(true)
}
func watchScripts(block bool) {
files := make(map[string]int64)
scripts, err := config.Config.Scripting.GetScripts()
if err != nil {
log.Error().Err(err).Send()
return
}
for _, script := range scripts {
index, err := connector.PostScript(script)
if err != nil {
log.Error().Err(err).Send()
return
}
files[script.Path] = index
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Error().Err(err).Send()
return
}
if block {
defer watcher.Close()
}
go func() {
for {
select {
// watch for events
case event := <-watcher.Events:
switch event.Op {
case fsnotify.Create:
script, err := misc.ReadScriptFile(event.Name)
if err != nil {
log.Error().Err(err).Send()
continue
}
index, err := connector.PostScript(script)
if err != nil {
log.Error().Err(err).Send()
continue
}
files[script.Path] = index
case fsnotify.Write:
index := files[event.Name]
script, err := misc.ReadScriptFile(event.Name)
if err != nil {
log.Error().Err(err).Send()
continue
}
err = connector.PutScript(script, index)
if err != nil {
log.Error().Err(err).Send()
continue
}
case fsnotify.Rename:
index := files[event.Name]
err := connector.DeleteScript(index)
if err != nil {
log.Error().Err(err).Send()
continue
}
default:
// pass
}
// watch for errors
case err := <-watcher.Errors:
log.Error().Err(err).Send()
}
}
}()
if err := watcher.Add(config.Config.Scripting.Source); err != nil {
log.Error().Err(err).Send()
}
log.Info().Str("directory", config.Config.Scripting.Source).Msg("Watching scripts against changes:")
if block {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
utils.WaitForTermination(ctx, cancel)
}
}

View File

@@ -15,8 +15,7 @@ import (
var tapCmd = &cobra.Command{
Use: "tap [POD REGEX]",
Short: "Capture the network traffic in your Kubernetes cluster.",
Long: "Capture the network traffic in your Kubernetes cluster.",
Short: "Capture the network traffic in your Kubernetes cluster",
RunE: func(cmd *cobra.Command, args []string) error {
tap()
return nil
@@ -44,19 +43,22 @@ func init() {
log.Debug().Err(err).Send()
}
tapCmd.Flags().StringP(configStructs.DockerRegistryLabel, "r", defaultTapConfig.Docker.Registry, "The Docker registry that's hosting the images.")
tapCmd.Flags().StringP(configStructs.DockerTagLabel, "t", defaultTapConfig.Docker.Tag, "The tag of the Docker images that are going to be pulled.")
tapCmd.Flags().String(configStructs.DockerImagePullPolicy, defaultTapConfig.Docker.ImagePullPolicy, "ImagePullPolicy for the Docker images.")
tapCmd.Flags().StringSlice(configStructs.DockerImagePullSecrets, defaultTapConfig.Docker.ImagePullSecrets, "ImagePullSecrets for the Docker images.")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward.")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward.")
tapCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward.")
tapCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultTapConfig.Namespaces, "Namespaces selector.")
tapCmd.Flags().BoolP(configStructs.AllNamespacesLabel, "A", defaultTapConfig.AllNamespaces, "Tap all namespaces.")
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit. (per node)")
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them.")
tapCmd.Flags().StringP(configStructs.PcapLabel, "p", defaultTapConfig.Pcap, fmt.Sprintf("Capture from a PCAP snapshot of %s (.tar.gz) using your Docker Daemon instead of Kubernetes.", misc.Software))
tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS.")
tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries.")
tapCmd.Flags().Bool(configStructs.DebugLabel, defaultTapConfig.Debug, "Enable the debug mode.")
tapCmd.Flags().StringP(configStructs.DockerRegistryLabel, "r", defaultTapConfig.Docker.Registry, "The Docker registry that's hosting the images")
tapCmd.Flags().StringP(configStructs.DockerTagLabel, "t", defaultTapConfig.Docker.Tag, "The tag of the Docker images that are going to be pulled")
tapCmd.Flags().String(configStructs.DockerImagePullPolicy, defaultTapConfig.Docker.ImagePullPolicy, "ImagePullPolicy for the Docker images")
tapCmd.Flags().StringSlice(configStructs.DockerImagePullSecrets, defaultTapConfig.Docker.ImagePullSecrets, "ImagePullSecrets for the Docker images")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
tapCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
tapCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultTapConfig.Namespaces, "Namespaces selector")
tapCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
tapCmd.Flags().Bool(configStructs.PersistentStorageLabel, defaultTapConfig.PersistentStorage, "Enable persistent storage (PersistentVolumeClaim)")
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit (per node)")
tapCmd.Flags().String(configStructs.StorageClassLabel, defaultTapConfig.StorageClass, "Override the default storage class of the PersistentVolumeClaim (per node)")
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
tapCmd.Flags().StringP(configStructs.PcapLabel, "p", defaultTapConfig.Pcap, fmt.Sprintf("Capture from a PCAP snapshot of %s (.tar.gz) using your Docker Daemon instead of Kubernetes. TAR path from the file system or an S3 URI (object, folder or the bucket)", misc.Software))
tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS")
tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries")
tapCmd.Flags().Bool(configStructs.IgnoreTaintedLabel, defaultTapConfig.IgnoreTainted, "Ignore tainted pods while running Worker DaemonSet")
tapCmd.Flags().Bool(configStructs.IngressEnabledLabel, defaultTapConfig.Ingress.Enabled, "Enable Ingress")
}

View File

@@ -1,18 +1,30 @@
package cmd
import (
"archive/tar"
"bufio"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
"github.com/aws/aws-sdk-go-v2/aws"
awsConfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3"
s3Types "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
@@ -61,6 +73,7 @@ func logPullingImage(image string, reader io.ReadCloser) {
}
func pullImages(ctx context.Context, cli *client.Client, imageFront string, imageHub string, imageWorker string) error {
log.Info().Msg("Pulling images...")
readerFront, err := cli.ImagePull(ctx, imageFront, types.ImagePullOptions{})
if err != nil {
return err
@@ -92,7 +105,7 @@ func cleanUpOldContainers(
nameHub string,
nameWorker string,
) error {
containers, err := cli.ContainerList(ctx, types.ContainerListOptions{})
containers, err := cli.ContainerList(ctx, types.ContainerListOptions{All: true})
if err != nil {
return err
}
@@ -141,10 +154,10 @@ func createAndStartContainers(
hostConfigFront := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Front.DstPort)): []nat.PortBinding{
nat.Port(fmt.Sprintf("%d/tcp", configStructs.ContainerPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.SrcPort),
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.Port),
},
},
},
@@ -156,7 +169,7 @@ func createAndStartContainers(
Env: []string{
"REACT_APP_DEFAULT_FILTER= ",
"REACT_APP_HUB_HOST= ",
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.SrcPort),
fmt.Sprintf("REACT_APP_HUB_PORT=:%d", config.Config.Tap.Proxy.Hub.Port),
},
}, hostConfigFront, nil, nil, nameFront)
if err != nil {
@@ -165,16 +178,16 @@ func createAndStartContainers(
hostConfigHub := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): []nat.PortBinding{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort),
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port),
},
},
},
}
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.DstPort)}
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrvPort)}
if config.DebugMode {
cmdHub = append(cmdHub, fmt.Sprintf("-%s", config.DebugFlag))
}
@@ -183,13 +196,13 @@ func createAndStartContainers(
Image: imageHub,
Cmd: cmdHub,
Tty: false,
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): {}},
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): {}},
}, hostConfigHub, nil, nil, nameHub)
if err != nil {
return
}
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.DstPort)}
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.SrvPort)}
if config.DebugMode {
cmdWorker = append(cmdWorker, fmt.Sprintf("-%s", config.DebugFlag))
}
@@ -270,7 +283,172 @@ func stopAndRemoveContainers(
return
}
func pcap(tarPath string) {
func downloadTarFromS3(s3Url string) (tarPath string, err error) {
u, err := url.Parse(s3Url)
if err != nil {
return
}
bucket := u.Host
key := u.Path[1:]
var cfg aws.Config
cfg, err = awsConfig.LoadDefaultConfig(context.TODO())
if err != nil {
return
}
client := s3.NewFromConfig(cfg)
var listObjectsOutput *s3.ListObjectsV2Output
listObjectsOutput, err = client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
Bucket: aws.String(bucket),
Prefix: aws.String(key),
})
if err != nil {
return
}
var file *os.File
file, err = os.CreateTemp(os.TempDir(), fmt.Sprintf("%s_*.%s", strings.TrimSuffix(filepath.Base(key), filepath.Ext(key)), filepath.Ext(key)))
if err != nil {
return
}
defer file.Close()
log.Info().Str("bucket", bucket).Str("key", key).Msg("Downloading from S3")
downloader := manager.NewDownloader(client)
_, err = downloader.Download(context.TODO(), file, &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
})
if err != nil {
log.Info().Err(err).Msg("S3 object is not found. Assuming URL is not a single object. Listing the objects in given folder or the bucket to download...")
var tempDirPath string
tempDirPath, err = os.MkdirTemp(os.TempDir(), "kubeshark_*")
if err != nil {
return
}
var wg sync.WaitGroup
for _, object := range listObjectsOutput.Contents {
wg.Add(1)
go func(object s3Types.Object) {
defer wg.Done()
objectKey := *object.Key
fullPath := filepath.Join(tempDirPath, objectKey)
err = os.MkdirAll(filepath.Dir(fullPath), os.ModePerm)
if err != nil {
return
}
var objectFile *os.File
objectFile, err = os.Create(fullPath)
if err != nil {
return
}
defer objectFile.Close()
log.Info().Str("bucket", bucket).Str("key", objectKey).Msg("Downloading from S3")
downloader := manager.NewDownloader(client)
_, err = downloader.Download(context.TODO(), objectFile, &s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(objectKey),
})
if err != nil {
return
}
}(object)
}
wg.Wait()
tarPath, err = tarDirectory(tempDirPath)
return
}
tarPath = file.Name()
return
}
func tarDirectory(dirPath string) (string, error) {
tarPath := fmt.Sprintf("%s.tar.gz", dirPath)
var file *os.File
file, err := os.Create(tarPath)
if err != nil {
return "", err
}
defer file.Close()
gzipWriter := gzip.NewWriter(file)
defer gzipWriter.Close()
tarWriter := tar.NewWriter(gzipWriter)
defer tarWriter.Close()
walker := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return err
}
header := &tar.Header{
Name: path[len(dirPath)+1:],
Size: stat.Size(),
Mode: int64(stat.Mode()),
ModTime: stat.ModTime(),
}
err = tarWriter.WriteHeader(header)
if err != nil {
return err
}
_, err = io.Copy(tarWriter, file)
if err != nil {
return err
}
return nil
}
err = filepath.Walk(dirPath, walker)
if err != nil {
return "", err
}
return tarPath, nil
}
func pcap(tarPath string) error {
if strings.HasPrefix(tarPath, "s3://") {
var err error
tarPath, err = downloadTarFromS3(tarPath)
if err != nil {
log.Error().Err(err).Msg("Failed downloading from S3")
return err
}
}
log.Info().Str("tar-path", tarPath).Msg("Openning")
docker.SetRegistry(config.Config.Tap.Docker.Registry)
docker.SetTag(config.Config.Tap.Docker.Tag)
@@ -278,7 +456,7 @@ func pcap(tarPath string) {
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Error().Err(err).Send()
return
return err
}
defer cli.Close()
@@ -289,13 +467,13 @@ func pcap(tarPath string) {
err = pullImages(ctx, cli, imageFront, imageHub, imageWorker)
if err != nil {
log.Error().Err(err).Send()
return
return err
}
tarFile, err := os.Open(tarPath)
if err != nil {
log.Error().Err(err).Send()
return
return err
}
defer tarFile.Close()
tarReader := bufio.NewReader(tarFile)
@@ -310,7 +488,7 @@ func pcap(tarPath string) {
)
if err != nil {
log.Error().Err(err).Send()
return
return err
}
workerPod := &v1.Pod{
@@ -328,14 +506,19 @@ func pcap(tarPath string) {
},
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
connector.PostWorkerPodToHub(workerPod)
// License
if config.Config.License != "" {
connector.PostLicense(config.Config.License)
}
log.Info().
Str("url", kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)).
Str("url", kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)).
Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
url := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {
@@ -349,5 +532,8 @@ func pcap(tarPath string) {
err = stopAndRemoveContainers(ctx, cli, respFront, respHub, respWorker)
if err != nil {
log.Error().Err(err).Send()
return err
}
return nil
}

View File

@@ -2,20 +2,19 @@ package cmd
import (
"context"
"errors"
"fmt"
"os"
"regexp"
"sync"
"time"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes/helm"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/resources"
"github.com/kubeshark/kubeshark/utils"
core "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
@@ -27,34 +26,48 @@ import (
const cleanupTimeout = time.Minute
type tapState struct {
startTime time.Time
targetNamespaces []string
selfServiceAccountExists bool
startTime time.Time
targetNamespaces []string
}
var state tapState
var connector *connect.Connector
var hubPodReady bool
var frontPodReady bool
var proxyDone bool
type Readiness struct {
Hub bool
Front bool
Proxy bool
sync.Mutex
}
var ready *Readiness
func tap() {
ready = &Readiness{}
state.startTime = time.Now()
docker.SetRegistry(config.Config.Tap.Docker.Registry)
docker.SetTag(config.Config.Tap.Docker.Tag)
log.Info().Str("registry", docker.GetRegistry()).Str("tag", docker.GetTag()).Msg("Using Docker:")
if config.Config.Tap.Pcap != "" {
pcap(config.Config.Tap.Pcap)
err := pcap(config.Config.Tap.Pcap)
if err != nil {
os.Exit(1)
}
return
}
if !config.Config.Tap.PersistentStorage {
config.Config.Tap.StorageLimit = "200Mi"
log.Warn().Msg("Storage limit cannot be modified while persistentstorage is set to false!")
}
log.Info().
Str("limit", config.Config.Tap.StorageLimit).
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP streams will be removed once the limit is reached.", misc.Software))
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP/UDP streams will be removed once the limit is reached.", misc.Software))
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
kubernetesProvider, err := getKubernetesProviderForCli()
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return
}
@@ -62,14 +75,7 @@ func tap() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel will be called when this function exits
state.targetNamespaces = getNamespaces(kubernetesProvider)
if config.Config.IsNsRestrictedMode() {
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.SelfNamespace) {
log.Error().Msg(fmt.Sprintf("%s can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", misc.Software, configStructs.NamespacesLabel, config.SelfNamespaceConfigName))
return
}
}
state.targetNamespaces = kubernetesProvider.GetNamespaces()
log.Info().Strs("namespaces", state.targetNamespaces).Msg("Targeting pods in:")
@@ -82,18 +88,17 @@ func tap() {
}
log.Info().Msg(fmt.Sprintf("Waiting for the creation of %s resources...", misc.Software))
if state.selfServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace, config.Config.Tap.Resources.Hub, config.Config.ImagePullPolicy(), config.Config.ImagePullSecrets(), config.Config.Tap.Debug); err != nil {
var statusError *k8serrors.StatusError
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
log.Info().Msg(fmt.Sprintf("%s is already running in this namespace, change the `selfnamespace` configuration or run `%s clean` to remove the currently running %s instance.", misc.Software, misc.Program, misc.Software))
connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces)
log.Info().Msg("Updated the targeted pods. Exiting.")
} else {
defer resources.CleanUpSelfResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace)
log.Error().Err(errormessage.FormatError(err)).Msg("Error creating resources!")
}
return
rel, err := helm.NewHelm(
config.Config.Tap.Release.Repo,
config.Config.Tap.Release.Name,
config.Config.Tap.Release.Namespace,
).Install()
if err != nil {
log.Error().Err(err).Send()
os.Exit(1)
} else {
log.Info().Msgf("Installed the Helm release: %s", rel.Name)
}
defer finishTapExecution(kubernetesProvider)
@@ -104,13 +109,20 @@ func tap() {
// block until exit signal or error
utils.WaitForTermination(ctx, cancel)
if !config.Config.Tap.Ingress.Enabled {
printProxyCommandSuggestion()
}
}
func printProxyCommandSuggestion() {
log.Warn().
Str("command", fmt.Sprintf("%s proxy", misc.Program)).
Msg(fmt.Sprintf(utils.Yellow, "To re-establish a proxy/port-forward, run:"))
}
func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace, true)
finishSelfExecution(kubernetesProvider)
}
/*
@@ -143,7 +155,7 @@ func printNoPodsFoundSuggestion(targetNamespaces []string) {
func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.HubPodName))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.SelfNamespace}, podWatchHelper)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.Release.Namespace}, podWatchHelper)
isPodReady := false
timeAfter := time.After(120 * time.Second)
@@ -178,12 +190,23 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
if modifiedPod.Status.Phase == core.PodRunning && !isPodReady {
isPodReady = true
hubPodReady = true
postHubStarted(ctx, kubernetesProvider, cancel)
ready.Lock()
ready.Hub = true
ready.Unlock()
postHubStarted(ctx, kubernetesProvider, cancel, false)
}
ready.Lock()
proxyDone := ready.Proxy
hubPodReady := ready.Hub
frontPodReady := ready.Front
ready.Unlock()
if !proxyDone && hubPodReady && frontPodReady {
proxyDone = true
ready.Lock()
ready.Proxy = true
ready.Unlock()
postFrontStarted(ctx, kubernetesProvider, cancel)
}
case kubernetes.EventBookmark:
@@ -199,7 +222,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
log.Error().
Str("pod", kubernetes.HubPodName).
Str("namespace", config.Config.SelfNamespace).
Str("namespace", config.Config.Tap.Release.Namespace).
Err(err).
Msg("Failed creating pod.")
cancel()
@@ -223,7 +246,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.FrontPodName))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.SelfNamespace}, podWatchHelper)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.Release.Namespace}, podWatchHelper)
isPodReady := false
timeAfter := time.After(120 * time.Second)
@@ -258,11 +281,21 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
if modifiedPod.Status.Phase == core.PodRunning && !isPodReady {
isPodReady = true
frontPodReady = true
ready.Lock()
ready.Front = true
ready.Unlock()
}
ready.Lock()
proxyDone := ready.Proxy
hubPodReady := ready.Hub
frontPodReady := ready.Front
ready.Unlock()
if !proxyDone && hubPodReady && frontPodReady {
proxyDone = true
ready.Lock()
ready.Proxy = true
ready.Unlock()
postFrontStarted(ctx, kubernetesProvider, cancel)
}
case kubernetes.EventBookmark:
@@ -278,10 +311,9 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
log.Error().
Str("pod", kubernetes.FrontPodName).
Str("namespace", config.Config.SelfNamespace).
Str("namespace", config.Config.Tap.Release.Namespace).
Err(err).
Msg("Failed creating pod.")
cancel()
case <-timeAfter:
if !isPodReady {
@@ -302,7 +334,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.HubPodName))
eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, podExactRegex, "pod")
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.SelfNamespace}, eventWatchHelper)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.Tap.Release.Namespace}, eventWatchHelper)
for {
select {
case wEvent, ok := <-eventChan:
@@ -368,38 +400,54 @@ func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider
}
}
func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, update bool) {
startProxyReportErrorIfAny(
kubernetesProvider,
ctx,
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
err := kubernetes.CreateWorkers(
kubernetesProvider,
state.selfServiceAccountExists,
ctx,
config.Config.SelfNamespace,
config.Config.Tap.Resources.Worker,
config.Config.ImagePullPolicy(),
config.Config.ImagePullSecrets(),
config.Config.Tap.ServiceMesh,
config.Config.Tap.Tls,
config.Config.Tap.Debug,
)
if err != nil {
log.Error().Err(err).Send()
if update {
// Pod regex
connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces)
// License
if config.Config.License != "" {
connector.PostLicense(config.Config.License)
}
// Scripting
connector.PostEnv(config.Config.Scripting.Env)
scripts, err := config.Config.Scripting.GetScripts()
if err != nil {
log.Error().Err(err).Send()
}
for _, script := range scripts {
_, err = connector.PostScript(script)
if err != nil {
log.Error().Err(err).Send()
}
}
connector.PostScriptDone()
}
connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces)
if !update && !config.Config.Tap.Ingress.Enabled {
// Hub proxy URL
url := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
}
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts {
watchScripts(false)
}
}
func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
@@ -409,29 +457,20 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
var url string
if config.Config.Tap.Ingress.Enabled {
url = fmt.Sprintf("http://%s", config.Config.Tap.Ingress.Host)
} else {
url = kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
}
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {
utils.OpenBrowser(url)
}
}
func getNamespaces(kubernetesProvider *kubernetes.Provider) []string {
if config.Config.Tap.AllNamespaces {
return []string{kubernetes.K8sAllNamespaces}
} else if len(config.Config.Tap.Namespaces) > 0 {
return utils.Unique(config.Config.Tap.Namespaces)
} else {
currentNamespace, err := kubernetesProvider.CurrentNamespace()
if err != nil {
log.Fatal().Err(err).Msg("Error getting current namespace!")
}
return []string{currentNamespace}
}
}

View File

@@ -5,11 +5,14 @@ import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/misc/version"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog"
@@ -28,9 +31,10 @@ const (
)
var (
Config ConfigStruct
DebugMode bool
cmdName string
Config ConfigStruct
DebugMode bool
cmdName string
ConfigFilePath string
)
func InitConfig(cmd *cobra.Command) error {
@@ -48,21 +52,38 @@ func InitConfig(cmd *cobra.Command) error {
return nil
}
go version.CheckNewerVersion()
if !utils.Contains([]string{
"console",
"pro",
"manifests",
}, cmd.Use) {
go version.CheckNewerVersion()
}
Config = CreateDefaultConfig()
Config.Tap.Debug = DebugMode
cmdName = cmd.Name()
if utils.Contains([]string{
"clean",
"console",
"pro",
"proxy",
"scripts",
}, cmdName) {
cmdName = "tap"
}
if err := defaults.Set(&Config); err != nil {
return err
}
configFilePathFlag := cmd.Flags().Lookup(ConfigFilePathCommandName)
configFilePath := configFilePathFlag.Value.String()
if err := loadConfigFile(configFilePath, &Config); err != nil {
if configFilePathFlag.Changed || !os.IsNotExist(err) {
ConfigFilePath = path.Join(misc.GetDotFolderPath(), "config.yaml")
if err := loadConfigFile(&Config, utils.Contains([]string{
"manifests",
}, cmd.Use)); err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("invalid config, %w\n"+
"you can regenerate the file by removing it (%v) and using `kubeshark config -r`", err, configFilePath)
"you can regenerate the file by removing it (%v) and using `kubeshark config -r`", err, ConfigFilePath)
}
}
@@ -92,29 +113,50 @@ func WriteConfig(config *ConfigStruct) error {
}
data := []byte(template)
if err := os.WriteFile(Config.ConfigFilePath, data, 0644); err != nil {
if _, err := os.Stat(ConfigFilePath); os.IsNotExist(err) {
err = os.MkdirAll(filepath.Dir(ConfigFilePath), 0700)
if err != nil {
return fmt.Errorf("failed creating directories, err: %v", err)
}
}
if err := os.WriteFile(ConfigFilePath, data, 0644); err != nil {
return fmt.Errorf("failed writing config, err: %v", err)
}
return nil
}
func loadConfigFile(configFilePath string, config *ConfigStruct) error {
reader, openErr := os.Open(configFilePath)
if openErr != nil {
return openErr
func loadConfigFile(config *ConfigStruct, silent bool) error {
cwd, err := os.Getwd()
if err != nil {
return err
}
buf, readErr := io.ReadAll(reader)
if readErr != nil {
return readErr
cwdConfig := filepath.Join(cwd, fmt.Sprintf("%s.yaml", misc.Program))
reader, err := os.Open(cwdConfig)
if err != nil {
reader, err = os.Open(ConfigFilePath)
if err != nil {
return err
}
} else {
ConfigFilePath = cwdConfig
}
buf, err := io.ReadAll(reader)
if err != nil {
return err
}
if err := yaml.Unmarshal(buf, config); err != nil {
return err
}
log.Info().Str("path", configFilePath).Msg("Found config file!")
if !silent {
log.Info().Str("path", ConfigFilePath).Msg("Found config file!")
}
return nil
}
@@ -123,9 +165,7 @@ func initFlag(f *pflag.Flag) {
configElemValue := reflect.ValueOf(&Config).Elem()
var flagPath []string
if !utils.Contains([]string{ConfigFilePathCommandName}, f.Name) {
flagPath = append(flagPath, cmdName)
}
flagPath = append(flagPath, cmdName)
flagPath = append(flagPath, strings.Split(f.Name, "-")...)

View File

@@ -2,19 +2,15 @@ package config
import (
"os"
"path"
"path/filepath"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/misc"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/util/homedir"
)
const (
SelfNamespaceConfigName = "selfnamespace"
ConfigFilePathCommandName = "configpath"
KubeConfigPathConfigName = "kube-configpath"
KubeConfigPathConfigName = "kube-configpath"
)
func CreateDefaultConfig() ConfigStruct {
@@ -22,23 +18,24 @@ func CreateDefaultConfig() ConfigStruct {
}
type KubeConfig struct {
ConfigPathStr string `yaml:"configpath"`
Context string `yaml:"context"`
ConfigPathStr string `yaml:"configpath" json:"configpath"`
Context string `yaml:"context" json:"context"`
}
type ManifestsConfig struct {
Dump bool `yaml:"dump" json:"dump"`
}
type ConfigStruct struct {
Tap configStructs.TapConfig `yaml:"tap"`
Logs configStructs.LogsConfig `yaml:"logs"`
Config configStructs.ConfigConfig `yaml:"config,omitempty"`
Kube KubeConfig `yaml:"kube"`
SelfNamespace string `yaml:"selfnamespace" default:"kubeshark"`
DumpLogs bool `yaml:"dumplogs" default:"false"`
ConfigFilePath string `yaml:"configpath,omitempty" readonly:""`
HeadlessMode bool `yaml:"headless" default:"false"`
}
func (config *ConfigStruct) SetDefaults() {
config.ConfigFilePath = path.Join(misc.GetDotFolderPath(), "config.yaml")
Tap configStructs.TapConfig `yaml:"tap" json:"tap"`
Logs configStructs.LogsConfig `yaml:"logs" json:"logs"`
Config configStructs.ConfigConfig `yaml:"config,omitempty" json:"config,omitempty"`
Kube KubeConfig `yaml:"kube" json:"kube"`
DumpLogs bool `yaml:"dumplogs" json:"dumplogs" default:"false"`
HeadlessMode bool `yaml:"headless" json:"headless" default:"false"`
License string `yaml:"license" json:"license" default:""`
Scripting configStructs.ScriptingConfig `yaml:"scripting" json:"scripting"`
Manifests ManifestsConfig `yaml:"manifests,omitempty" json:"manifests,omitempty"`
}
func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy {
@@ -54,10 +51,6 @@ func (config *ConfigStruct) ImagePullSecrets() []v1.LocalObjectReference {
return ref
}
func (config *ConfigStruct) IsNsRestrictedMode() bool {
return config.SelfNamespace != misc.Program // Notice "kubeshark" string must match the default SelfNamespace
}
func (config *ConfigStruct) KubeConfigPath() string {
if config.Kube.ConfigPathStr != "" {
return config.Kube.ConfigPathStr

View File

@@ -5,5 +5,5 @@ const (
)
type ConfigConfig struct {
Regenerate bool `yaml:"regenerate,omitempty" default:"false" readonly:""`
Regenerate bool `yaml:"regenerate,omitempty" json:"regenerate,omitempty" default:"false" readonly:""`
}

View File

@@ -13,7 +13,7 @@ const (
)
type LogsConfig struct {
FileStr string `yaml:"file"`
FileStr string `yaml:"file" json:"file"`
}
func (config *LogsConfig) Validate() error {

View File

@@ -0,0 +1,46 @@
package configStructs
import (
"io/fs"
"os"
"path/filepath"
"github.com/kubeshark/kubeshark/misc"
"github.com/rs/zerolog/log"
)
type ScriptingConfig struct {
Env map[string]interface{} `yaml:"env" json:"env"`
Source string `yaml:"source" json:"source" default:""`
WatchScripts bool `yaml:"watchScripts" json:"watchScripts" default:"true"`
}
func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error) {
if config.Source == "" {
return
}
var files []fs.DirEntry
files, err = os.ReadDir(config.Source)
if err != nil {
return
}
for _, f := range files {
if f.IsDir() {
continue
}
var script *misc.Script
path := filepath.Join(config.Source, f.Name())
script, err = misc.ReadScriptFile(path)
if err != nil {
return
}
scripts = append(scripts, script)
log.Debug().Str("path", path).Msg("Found script:")
}
return
}

View File

@@ -4,9 +4,8 @@ import (
"fmt"
"regexp"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
v1 "k8s.io/api/core/v1"
networking "k8s.io/api/networking/v1"
)
const (
@@ -18,63 +17,110 @@ const (
ProxyHubPortLabel = "proxy-hub-port"
ProxyHostLabel = "proxy-host"
NamespacesLabel = "namespaces"
AllNamespacesLabel = "allnamespaces"
ReleaseNamespaceLabel = "release-namespace"
PersistentStorageLabel = "persistentstorage"
StorageLimitLabel = "storagelimit"
StorageClassLabel = "storageclass"
DryRunLabel = "dryrun"
PcapLabel = "pcap"
ServiceMeshLabel = "servicemesh"
TlsLabel = "tls"
IgnoreTaintedLabel = "ignoretainted"
IngressEnabledLabel = "ingress-enabled"
DebugLabel = "debug"
ContainerPort = 80
ContainerPortStr = "80"
)
type ResourceLimits struct {
CPU string `yaml:"cpu" json:"cpu" default:"750m"`
Memory string `yaml:"memory" json:"memory" default:"1Gi"`
}
type ResourceRequests struct {
CPU string `yaml:"cpu" json:"cpu" default:"50m"`
Memory string `yaml:"memory" json:"memory" default:"50Mi"`
}
type ResourceRequirements struct {
Limits ResourceLimits `yaml:"limits" json:"limits"`
Requests ResourceRequests `yaml:"requests" json:"requests"`
}
type WorkerConfig struct {
SrcPort uint16 `yaml:"port" default:"8897"`
DstPort uint16 `yaml:"srvport" default:"8897"`
SrvPort uint16 `yaml:"srvport" json:"srvport" default:"8897"`
}
type HubConfig struct {
SrcPort uint16 `yaml:"port" default:"8898"`
DstPort uint16 `yaml:"srvport" default:"80"`
Port uint16 `yaml:"port" json:"port" default:"8898"`
SrvPort uint16 `yaml:"srvport" json:"srvport" default:"8898"`
}
type FrontConfig struct {
SrcPort uint16 `yaml:"port" default:"8899"`
DstPort uint16 `yaml:"srvport" default:"80"`
Port uint16 `yaml:"port" json:"port" default:"8899"`
SrvPort uint16 `yaml:"srvport" json:"srvport" default:"8899"`
}
type ProxyConfig struct {
Worker WorkerConfig `yaml:"worker"`
Hub HubConfig `yaml:"hub"`
Front FrontConfig `yaml:"front"`
Host string `yaml:"host" default:"127.0.0.1"`
Worker WorkerConfig `yaml:"worker" json:"worker"`
Hub HubConfig `yaml:"hub" json:"hub"`
Front FrontConfig `yaml:"front" json:"front"`
Host string `yaml:"host" json:"host" default:"127.0.0.1"`
}
type DockerConfig struct {
Registry string `yaml:"registry" default:"docker.io/kubeshark"`
Tag string `yaml:"tag" default:"latest"`
ImagePullPolicy string `yaml:"imagepullpolicy" default:"Always"`
ImagePullSecrets []string `yaml:"imagepullsecrets"`
Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"`
Tag string `yaml:"tag" json:"tag" default:"latest"`
ImagePullPolicy string `yaml:"imagepullpolicy" json:"imagepullpolicy" default:"Always"`
ImagePullSecrets []string `yaml:"imagepullsecrets" json:"imagepullsecrets"`
}
type ResourcesConfig struct {
Worker kubernetes.Resources `yaml:"worker"`
Hub kubernetes.Resources `yaml:"hub"`
Worker ResourceRequirements `yaml:"worker"`
Hub ResourceRequirements `yaml:"hub"`
}
type AuthConfig struct {
ApprovedDomains []string `yaml:"approveddomains" json:"approveddomains" default:"[]"`
}
type IngressConfig struct {
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
ClassName string `yaml:"classname" json:"classname" default:"kubeshark-ingress-class"`
Controller string `yaml:"controller" json:"controller" default:"k8s.io/ingress-nginx"`
Host string `yaml:"host" json:"host" default:"ks.svc.cluster.local"`
TLS []networking.IngressTLS `yaml:"tls" json:"tls"`
Auth AuthConfig `yaml:"auth" json:"auth"`
CertManager string `yaml:"certmanager" json:"certmanager" default:"letsencrypt-prod"`
}
type ReleaseConfig struct {
Repo string `yaml:"repo" json:"repo" default:"https://helm.kubeshark.co"`
Name string `yaml:"name" json:"name" default:"kubeshark"`
Namespace string `yaml:"namespace" json:"namespace" default:"default"`
}
type TapConfig struct {
Docker DockerConfig `yaml:"docker"`
Proxy ProxyConfig `yaml:"proxy"`
PodRegexStr string `yaml:"regex" default:".*"`
Namespaces []string `yaml:"namespaces"`
AllNamespaces bool `yaml:"allnamespaces" default:"false"`
StorageLimit string `yaml:"storagelimit" default:"200MB"`
DryRun bool `yaml:"dryrun" default:"false"`
Pcap string `yaml:"pcap" default:""`
Resources ResourcesConfig `yaml:"resources"`
ServiceMesh bool `yaml:"servicemesh" default:"true"`
Tls bool `yaml:"tls" default:"true"`
PacketCapture string `yaml:"packetcapture" default:"libpcap"`
Debug bool `yaml:"debug" default:"false"`
Docker DockerConfig `yaml:"docker" json:"docker"`
Proxy ProxyConfig `yaml:"proxy" json:"proxy"`
PodRegexStr string `yaml:"regex" json:"regex" default:".*"`
Namespaces []string `yaml:"namespaces" json:"namespaces" default:"[]"`
Release ReleaseConfig `yaml:"release" json:"release"`
PersistentStorage bool `yaml:"persistentstorage" json:"persistentstorage" default:"false"`
StorageLimit string `yaml:"storagelimit" json:"storagelimit" default:"200Mi"`
StorageClass string `yaml:"storageclass" json:"storageclass" default:"standard"`
DryRun bool `yaml:"dryrun" json:"dryrun" default:"false"`
Pcap string `yaml:"pcap" json:"pcap" default:""`
Resources ResourcesConfig `yaml:"resources" json:"resources"`
ServiceMesh bool `yaml:"servicemesh" json:"servicemesh" default:"true"`
Tls bool `yaml:"tls" json:"tls" default:"true"`
PacketCapture string `yaml:"packetcapture" json:"packetcapture" default:"libpcap"`
IgnoreTainted bool `yaml:"ignoretainted" json:"ignoretainted" default:"false"`
Labels map[string]string `yaml:"labels" json:"labels" default:"{}"`
Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"`
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeselectorterms" json:"nodeselectorterms" default:"[]"`
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
Debug bool `yaml:"debug" json:"debug" default:"false"`
}
func (config *TapConfig) PodRegex() *regexp.Regexp {
@@ -82,24 +128,11 @@ func (config *TapConfig) PodRegex() *regexp.Regexp {
return podRegex
}
func (config *TapConfig) StorageLimitBytes() int64 {
storageLimitBytes, err := utils.HumanReadableToBytes(config.StorageLimit)
if err != nil {
log.Fatal().Err(err).Send()
}
return storageLimitBytes
}
func (config *TapConfig) Validate() error {
_, compileErr := regexp.Compile(config.PodRegexStr)
if compileErr != nil {
return fmt.Errorf("%s is not a valid regex %s", config.PodRegexStr, compileErr)
}
_, parseHumanDataSizeErr := utils.HumanReadableToBytes(config.StorageLimit)
if parseHumanDataSizeErr != nil {
return fmt.Errorf("Could not parse --%s value %s", StorageLimitLabel, config.StorageLimit)
}
return nil
}

View File

@@ -1,6 +1,9 @@
package docker
import "fmt"
import (
"fmt"
"strings"
)
const (
hub = "hub"
@@ -9,7 +12,7 @@ const (
)
var (
registry = "docker.io/kubeshark"
registry = "docker.io/kubeshark/"
tag = "latest"
)
@@ -18,7 +21,11 @@ func GetRegistry() string {
}
func SetRegistry(value string) {
registry = value
if strings.HasPrefix(value, "docker.io/kubeshark") {
registry = "docker.io/kubeshark/"
} else {
registry = value
}
}
func GetTag() string {
@@ -30,7 +37,7 @@ func SetTag(value string) {
}
func getImage(image string) string {
return fmt.Sprintf("%s/%s:%s", registry, image, tag)
return fmt.Sprintf("%s%s:%s", registry, image, tag)
}
func GetHubImage() string {

View File

@@ -6,6 +6,7 @@ import (
regexpsyntax "regexp/syntax"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/misc"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
@@ -21,9 +22,9 @@ func FormatError(err error) error {
"in the config file or setting the targeted namespace with --%s %s=<NAMEPSACE>",
err,
misc.Software,
config.SelfNamespaceConfigName,
configStructs.ReleaseNamespaceLabel,
config.SetCommandName,
config.SelfNamespaceConfigName)
configStructs.ReleaseNamespaceLabel)
} else if syntaxError, isSyntaxError := asRegexSyntaxError(err); isSyntaxError {
errorNew = fmt.Errorf("regex %s is invalid: %w", syntaxError.Expr, err)
} else {

185
go.mod
View File

@@ -1,106 +1,177 @@
module github.com/kubeshark/kubeshark
go 1.17
go 1.19
require (
github.com/aws/aws-sdk-go-v2 v1.18.1
github.com/aws/aws-sdk-go-v2/config v1.18.27
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.70
github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0
github.com/creasty/defaults v1.5.2
github.com/docker/docker v20.10.22+incompatible
github.com/docker/docker v20.10.24+incompatible
github.com/docker/go-connections v0.4.0
github.com/docker/go-units v0.4.0
github.com/fsnotify/fsnotify v1.6.0
github.com/gin-gonic/gin v1.7.7
github.com/google/go-github/v37 v37.0.0
github.com/kubeshark/base v0.5.1
github.com/gorilla/websocket v1.4.2
github.com/pkg/errors v0.9.1
github.com/robertkrimen/otto v0.2.1
github.com/rs/zerolog v1.28.0
github.com/spf13/cobra v1.3.0
github.com/spf13/cobra v1.6.1
github.com/spf13/pflag v1.0.5
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.23.3
k8s.io/apimachinery v0.23.3
k8s.io/client-go v0.23.3
k8s.io/kubectl v0.23.3
helm.sh/helm/v3 v3.12.0
k8s.io/api v0.27.1
k8s.io/apimachinery v0.27.1
k8s.io/client-go v0.27.1
k8s.io/kubectl v0.27.1
)
require (
cloud.google.com/go/compute v1.2.0 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/BurntSushi/toml v1.2.1 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.2.0 // indirect
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
github.com/Masterminds/squirrel v1.5.3 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.26 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 // indirect
github.com/aws/smithy-go v1.13.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/containerd/containerd v1.7.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/cli v20.10.21+incompatible // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-gorp/gorp/v3 v3.0.5 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/swag v0.21.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.1 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-playground/locales v0.13.0 // indirect
github.com/go-playground/universal-translator v0.17.0 // indirect
github.com/go-playground/validator/v10 v10.4.1 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/go-cmp v0.5.7 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/huandu/xstrings v1.4.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jmoiron/sqlx v1.3.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/leodido/go-urn v1.2.0 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/russross/blackfriday v1.6.0 // indirect
github.com/sirupsen/logrus v1.7.0 // indirect
github.com/stretchr/testify v1.8.1 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/rubenv/sql-migrate v1.3.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/ugorji/go/codec v1.1.7 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
go.opentelemetry.io/otel v1.14.0 // indirect
go.opentelemetry.io/otel/trace v1.14.0 // indirect
go.starlark.net v0.0.0-20220203230714-bb14e151c28f // indirect
golang.org/x/crypto v0.0.0-20220208050332-20e1d8d225ab // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.2.0 // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sys v0.2.0 // indirect
golang.org/x/term v0.2.0 // indirect
golang.org/x/text v0.4.0 // indirect
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
golang.org/x/tools v0.1.12 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/oauth2 v0.4.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/term v0.6.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.7.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.27.1 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
google.golang.org/grpc v1.53.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/sourcemap.v1 v1.0.5 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/cli-runtime v0.23.3 // indirect
k8s.io/component-base v0.23.3 // indirect
k8s.io/klog/v2 v2.40.1 // indirect
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect
k8s.io/utils v0.0.0-20220127004650-9b3446523e65 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/kustomize/api v0.11.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
k8s.io/apiextensions-apiserver v0.27.1 // indirect
k8s.io/apiserver v0.27.1 // indirect
k8s.io/cli-runtime v0.27.1 // indirect
k8s.io/component-base v0.27.1 // indirect
k8s.io/klog/v2 v2.90.1 // indirect
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
oras.land/oras-go v1.2.2 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/api v0.13.2 // indirect
sigs.k8s.io/kustomize/kyaml v0.14.1 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

773
go.sum

File diff suppressed because it is too large Load Diff

26
helm-chart/Chart.yaml Normal file
View File

@@ -0,0 +1,26 @@
apiVersion: v2
appVersion: "41.2"
description: The API Traffic Analyzer for Kubernetes
home: https://kubeshark.co
keywords:
- kubeshark
- packet capture
- traffic capture
- traffic analyzer
- network sniffer
- observability
- devops
- microservice
- forensics
- api
kubeVersion: '>= 1.16.0-0'
maintainers:
- email: info@kubeshark.co
name: Kubeshark
url: https://kubeshark.co
name: kubeshark
sources:
- https://github.com/kubeshark/kubeshark/tree/master/helm-chart
type: application
version: "41.2"
icon: https://raw.githubusercontent.com/kubeshark/assets/master/logo/vector/logo.svg

191
helm-chart/LICENSE Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2022 Kubeshark
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

73
helm-chart/README.md Normal file
View File

@@ -0,0 +1,73 @@
# Helm Chart of Kubeshark
## Officially
Add the Helm repo for Kubeshark:
```shell
helm repo add kubeshark https://helm.kubeshark.co
```
then install Kubeshark:
```shell
helm install kubeshark kubeshark/kubeshark
```
## Locally
Clone the repo:
```shell
git clone git@github.com:kubeshark/kubeshark.git --depth 1
cd kubeshark/helm-chart
```
Render the templates
```shell
helm template .
```
Install Kubeshark:
```shell
helm install kubeshark .
```
Uninstall Kubeshark:
```shell
helm uninstall kubeshark
```
## Accesing
Do the port forwarding:
```shell
kubectl port-forward -n kubeshark service/kubeshark-hub 8898:80 & \
kubectl port-forward -n kubeshark service/kubeshark-front 8899:80
```
Visit [localhost:8899](http://localhost:8899)
## Installing with Ingress Enabled
```shell
helm install kubeshark kubeshark/kubeshark \
--set tap.ingress.enabled=true \
--set tap.ingress.host=ks.svc.cluster.local \
--set "tap.ingress.auth.approveddomains={gmail.com}" \
--set license=LICENSE_GOES_HERE
```
You can get your license [here](https://console.kubeshark.co/).
## Installing with Persistent Storage Enabled
```shell
helm install kubeshark kubeshark/kubeshark \
--set tap.persistentstorage=true \
--set license=LICENSE_GOES_HERE
```
You can get your license [here](https://console.kubeshark.co/).

View File

@@ -0,0 +1,15 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
{{- if .Values.tap.labels }}
{{- toYaml .Values.tap.labels | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-service-account
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,31 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
{{- if .Values.tap.labels }}
{{- toYaml .Values.tap.labels | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-cluster-role
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""
- extensions
- apps
- networking.k8s.io
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- ingresses
verbs:
- list
- get
- watch

View File

@@ -0,0 +1,23 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
{{- if .Values.tap.labels }}
{{- toYaml .Values.tap.labels | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-cluster-role-binding
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeshark-cluster-role
subjects:
- kind: ServiceAccount
name: kubeshark-service-account
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,62 @@
---
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-hub
{{- if .Values.tap.labels }}
{{- toYaml .Values.tap.labels | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-hub
namespace: {{ .Release.Namespace }}
spec:
containers:
- command:
- ./hub
{{ .Values.tap.debug | ternary "- -debug" "" }}
env:
- name: POD_REGEX
value: '{{ .Values.tap.regex }}'
- name: NAMESPACES
value: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}'
- name: LICENSE
value: '{{ .Values.license }}'
- name: SCRIPTING_ENV
value: '{{ .Values.scripting.env | toJson }}'
- name: SCRIPTING_SCRIPTS
value: '[]'
- name: AUTH_APPROVED_DOMAINS
value: '{{ gt (len .Values.tap.ingress.auth.approveddomains) 0 | ternary (join "," .Values.tap.ingress.auth.approveddomains) "" }}'
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}'
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
name: kubeshark-hub
resources:
limits:
cpu: {{ .Values.tap.resources.hub.limits.cpu }}
memory: {{ .Values.tap.resources.hub.limits.memory }}
requests:
cpu: {{ .Values.tap.resources.hub.requests.cpu }}
memory: {{ .Values.tap.resources.hub.requests.memory }}
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
{{- if not .Values.tap.ignoretainted }}
- effect: NoSchedule
operator: Exists
{{- end }}
{{- if gt (len .Values.tap.nodeselectorterms) 0}}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
{{- toYaml .Values.tap.nodeselectorterms | nindent 8 }}
{{- end }}
status: {}

View File

@@ -0,0 +1,25 @@
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
{{- if .Values.tap.labels }}
{{- toYaml .Values.tap.labels | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-hub
namespace: {{ .Release.Namespace }}
spec:
ports:
- name: kubeshark-hub
port: 80
targetPort: 80
selector:
app: kubeshark-hub
type: NodePort
status:
loadBalancer: {}

View File

@@ -0,0 +1,60 @@
---
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-front
{{- if .Values.tap.labels }}
{{- toYaml .Values.tap.labels | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-front
namespace: {{ .Release.Namespace }}
spec:
containers:
- env:
- name: REACT_APP_DEFAULT_FILTER
value: ' '
- name: REACT_APP_HUB_HOST
value: ' '
- name: REACT_APP_HUB_PORT
value: '{{ .Values.tap.ingress.enabled | ternary "/api" ":8898" }}'
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}'
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
name: kubeshark-front
readinessProbe:
failureThreshold: 3
periodSeconds: 1
successThreshold: 1
tcpSocket:
port: 80
timeoutSeconds: 1
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
{{- if not .Values.tap.ignoretainted }}
- effect: NoSchedule
operator: Exists
{{- end }}
{{- if gt (len .Values.tap.nodeselectorterms) 0}}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
{{- toYaml .Values.tap.nodeselectorterms | nindent 8 }}
{{- end }}
status: {}

View File

@@ -0,0 +1,25 @@
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
{{- if .Values.tap.labels }}
{{- toYaml .Values.tap.labels | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-front
namespace: {{ .Release.Namespace }}
spec:
ports:
- name: kubeshark-front
port: 80
targetPort: 80
selector:
app: kubeshark-front
type: NodePort
status:
loadBalancer: {}

View File

@@ -0,0 +1,25 @@
---
{{- if .Values.tap.persistentstorage }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
{{- if .Values.tap.labels }}
{{- toYaml .Values.tap.labels | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-persistent-volume-claim
namespace: {{ .Release.Namespace }}
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: {{ .Values.tap.storagelimit }}
storageClassName: {{ .Values.tap.storageclass }}
status: {}
{{- end }}

View File

@@ -0,0 +1,110 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
{{- if .Values.tap.labels }}
{{- toYaml .Values.tap.labels | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-worker-daemon-set
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
app: kubeshark-worker-daemon-set
{{- if .Values.tap.labels }}
{{- toYaml .Values.tap.labels | nindent 6 }}
{{- end }}
template:
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
{{- if .Values.tap.labels }}
{{- toYaml .Values.tap.labels | nindent 8 }}
{{- end }}
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
containers:
- command:
- ./worker
{{ .Values.tap.debug | ternary "- -debug" "" }}
- -i
- any
- -port
- '{{ .Values.tap.proxy.worker.srvport }}'
- -packet-capture
- '{{ .Values.tap.packetcapture }}'
- -servicemesh
- -tls
- -procfs
- /hostproc
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}'
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
name: kubeshark-worker-daemon-set
resources:
limits:
cpu: {{ .Values.tap.resources.worker.limits.cpu }}
memory: {{ .Values.tap.resources.worker.limits.memory }}
requests:
cpu: {{ .Values.tap.resources.worker.requests.cpu }}
memory: {{ .Values.tap.resources.worker.requests.memory }}
securityContext:
capabilities:
add:
- NET_RAW
- NET_ADMIN
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
drop:
- ALL
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
{{- if .Values.tap.persistentstorage }}
- mountPath: /app/data
name: kubeshark-persistent-volume
{{- end }}
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
{{- if not .Values.tap.ignoretainted }}
- effect: NoSchedule
operator: Exists
{{- end }}
{{- if gt (len .Values.tap.nodeselectorterms) 0}}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
{{- toYaml .Values.tap.nodeselectorterms | nindent 12 }}
{{- end }}
volumes:
- hostPath:
path: /proc
name: proc
- hostPath:
path: /sys
name: sys
{{- if .Values.tap.persistentstorage }}
- name: kubeshark-persistent-volume
persistentVolumeClaim:
claimName: kubeshark-persistent-volume-claim
{{- end }}

View File

@@ -0,0 +1,19 @@
---
{{- if .Values.tap.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
creationTimestamp: null
labels:
{{- if .Values.tap.labels }}
{{- toYaml .Values.tap.labels | nindent 4 }}
{{- end }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-ingress-class
namespace: {{ .Release.Namespace }}
spec:
controller: {{ .Values.tap.ingress.controller }}
{{- end }}

View File

@@ -0,0 +1,45 @@
---
{{- if .Values.tap.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
certmanager.k8s.io/cluster-issuer: {{ .Values.tap.ingress.certmanager }}
nginx.ingress.kubernetes.io/rewrite-target: /$2
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
creationTimestamp: null
labels:
{{- if .Values.tap.labels }}
{{- toYaml .Values.tap.labels | nindent 4 }}
{{- end }}
name: kubeshark-ingress
namespace: {{ .Release.Namespace }}
spec:
ingressClassName: {{ .Values.tap.ingress.classname }}
rules:
- host: {{ .Values.tap.ingress.host }}
http:
paths:
- backend:
service:
name: kubeshark-hub
port:
number: 80
path: /api(/|$)(.*)
pathType: Prefix
- backend:
service:
name: kubeshark-front
port:
number: 80
path: /()(.*)
pathType: Prefix
tls:
{{- if gt (len .Values.tap.ingress.tls) 0}}
{{- toYaml .Values.tap.ingress.tls | nindent 2 }}
{{- end }}
status:
loadBalancer: {}
{{- end }}

71
helm-chart/values.yaml Normal file
View File

@@ -0,0 +1,71 @@
tap:
docker:
registry: docker.io/kubeshark
tag: latest
imagepullpolicy: Always
imagepullsecrets: []
proxy:
worker:
srvport: 8897
hub:
port: 8898
srvport: 8898
front:
port: 8899
srvport: 8899
host: 127.0.0.1
regex: .*
namespaces: []
release:
repo: https://helm.kubeshark.co
name: kubeshark
namespace: default
persistentstorage: false
storagelimit: 200Mi
storageclass: standard
dryrun: false
pcap: ""
resources:
worker:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
hub:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
servicemesh: true
tls: true
packetcapture: libpcap
ignoretainted: false
labels: {}
annotations: {}
nodeselectorterms: []
ingress:
enabled: false
classname: kubeshark-ingress-class
controller: k8s.io/ingress-nginx
host: ks.svc.cluster.local
tls: []
auth:
approveddomains: []
certmanager: letsencrypt-prod
debug: false
logs:
file: ""
kube:
configpath: ""
context: ""
dumplogs: false
headless: false
license: ""
scripting:
env: {}
source: ""
watchScripts: true

View File

@@ -3,12 +3,14 @@ package connect
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"time"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
@@ -23,6 +25,7 @@ type Connector struct {
const DefaultRetries = 3
const DefaultTimeout = 2 * time.Second
const DefaultSleep = 1 * time.Second
func NewConnector(url string, retries int, timeout time.Duration) *Connector {
return &Connector{
@@ -44,7 +47,7 @@ func (connector *Connector) TestConnection(path string) error {
break
}
retriesLeft -= 1
time.Sleep(time.Second)
time.Sleep(5 * DefaultSleep)
}
if retriesLeft == 0 {
@@ -70,46 +73,17 @@ func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
} else {
ok := false
for !ok {
if _, err = utils.Post(postWorkerUrl, "application/json", bytes.NewBuffer(podMarshalled), connector.client); err != nil {
var resp *http.Response
if resp, err = utils.Post(postWorkerUrl, "application/json", bytes.NewBuffer(podMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Debug().Err(err).Msg("Failed sending the Worker pod to Hub:")
log.Warn().Err(err).Msg("Failed sending the Worker pod to Hub. Retrying...")
} else {
ok = true
log.Debug().Interface("worker-pod", pod).Msg("Reported worker pod to Hub:")
connector.PostStorageLimitToHub(config.Config.Tap.StorageLimitBytes())
return
}
time.Sleep(time.Second)
}
}
}
type postStorageLimit struct {
Limit int64 `json:"limit"`
}
func (connector *Connector) PostStorageLimitToHub(limit int64) {
payload := &postStorageLimit{
Limit: limit,
}
postStorageLimitUrl := fmt.Sprintf("%s/pcaps/set-storage-limit", connector.url)
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the storage limit:")
} else {
ok := false
for !ok {
if _, err = utils.Post(postStorageLimitUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil {
if _, ok := err.(*url.Error); ok {
break
}
log.Debug().Err(err).Msg("Failed sending the storage limit to Hub:")
} else {
ok = true
log.Debug().Int("limit", int(limit)).Msg("Reported storage limit to Hub:")
}
time.Sleep(time.Second)
time.Sleep(DefaultSleep)
}
}
}
@@ -127,21 +101,242 @@ func (connector *Connector) PostRegexToHub(regex string, namespaces []string) {
Namespaces: namespaces,
}
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the pod regex:")
} else {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postRegexUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed sending the pod regex to Hub. Retrying...")
} else {
log.Debug().Str("regex", regex).Strs("namespaces", namespaces).Msg("Reported pod regex to Hub:")
return
}
time.Sleep(DefaultSleep)
}
}
}
type postLicenseRequest struct {
License string `json:"license"`
}
func (connector *Connector) PostLicense(license string) {
postLicenseUrl := fmt.Sprintf("%s/license", connector.url)
payload := postLicenseRequest{
License: license,
}
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the payload:")
} else {
ok := false
for !ok {
if _, err = utils.Post(postRegexUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil {
var resp *http.Response
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Debug().Err(err).Msg("Failed sending the payload to Hub:")
log.Warn().Err(err).Msg("Failed sending the license to Hub. Retrying...")
} else {
ok = true
log.Debug().Str("regex", regex).Strs("namespaces", namespaces).Msg("Reported payload to Hub:")
log.Debug().Str("license", license).Msg("Reported license to Hub:")
return
}
time.Sleep(time.Second)
time.Sleep(DefaultSleep)
}
}
}
func (connector *Connector) PostLicenseSingle(license string) {
postLicenseUrl := fmt.Sprintf("%s/license", connector.url)
payload := postLicenseRequest{
License: license,
}
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the payload:")
} else {
var resp *http.Response
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
log.Warn().Err(err).Msg("Failed sending the license to Hub.")
} else {
log.Debug().Str("license", license).Msg("Reported license to Hub:")
return
}
}
}
func (connector *Connector) PostEnv(env map[string]interface{}) {
if len(env) == 0 {
return
}
postEnvUrl := fmt.Sprintf("%s/scripts/env", connector.url)
if envMarshalled, err := json.Marshal(env); err != nil {
log.Error().Err(err).Msg("Failed to marshal the env:")
} else {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postEnvUrl, "application/json", bytes.NewBuffer(envMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed sending the scripting environment variables to Hub. Retrying...")
} else {
log.Debug().Interface("env", env).Msg("Reported scripting environment variables to Hub:")
return
}
time.Sleep(DefaultSleep)
}
}
}
func (connector *Connector) PostScript(script *misc.Script) (index int64, err error) {
postScriptUrl := fmt.Sprintf("%s/scripts", connector.url)
var scriptMarshalled []byte
if scriptMarshalled, err = json.Marshal(script); err != nil {
log.Error().Err(err).Msg("Failed to marshal the script:")
} else {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postScriptUrl, "application/json", bytes.NewBuffer(scriptMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed creating script Hub:")
} else {
var j map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&j)
if err != nil {
return
}
val, ok := j["index"]
if !ok {
err = errors.New("Response does not contain `key` field!")
return
}
index = int64(val.(float64))
log.Debug().Int("index", int(index)).Interface("script", script).Msg("Created script on Hub:")
return
}
time.Sleep(DefaultSleep)
}
}
return
}
func (connector *Connector) PutScript(script *misc.Script, index int64) (err error) {
putScriptUrl := fmt.Sprintf("%s/scripts/%d", connector.url, index)
var scriptMarshalled []byte
if scriptMarshalled, err = json.Marshal(script); err != nil {
log.Error().Err(err).Msg("Failed to marshal the script:")
} else {
ok := false
for !ok {
client := &http.Client{}
var req *http.Request
req, err = http.NewRequest(http.MethodPut, putScriptUrl, bytes.NewBuffer(scriptMarshalled))
if err != nil {
log.Error().Err(err).Send()
return
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("License-Key", config.Config.License)
var resp *http.Response
resp, err = client.Do(req)
if err != nil {
log.Error().Err(err).Send()
return
}
if resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed updating script on Hub:")
} else {
log.Debug().Int("index", int(index)).Interface("script", script).Msg("Updated script on Hub:")
return
}
time.Sleep(DefaultSleep)
}
}
return
}
func (connector *Connector) DeleteScript(index int64) (err error) {
deleteScriptUrl := fmt.Sprintf("%s/scripts/%d", connector.url, index)
ok := false
for !ok {
client := &http.Client{}
var req *http.Request
req, err = http.NewRequest(http.MethodDelete, deleteScriptUrl, nil)
if err != nil {
log.Error().Err(err).Send()
return
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("License-Key", config.Config.License)
var resp *http.Response
resp, err = client.Do(req)
if err != nil {
log.Error().Err(err).Send()
return
}
if resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed deleting script on Hub:")
} else {
log.Debug().Int("index", int(index)).Msg("Deleted script on Hub:")
return
}
time.Sleep(DefaultSleep)
}
return
}
func (connector *Connector) PostScriptDone() {
postScripDonetUrl := fmt.Sprintf("%s/scripts/done", connector.url)
ok := false
var err error
for !ok {
var resp *http.Response
if resp, err = utils.Post(postScripDonetUrl, "application/json", nil, connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed sending the POST scripts done to Hub. Retrying...")
} else {
log.Debug().Msg("Reported POST scripts done to Hub.")
return
}
time.Sleep(DefaultSleep)
}
}

13
kubectl.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
# Useful kubectl commands for Kubeshark development
# This command outputs all Kubernetes resources using YAML format and pipes it to VS Code
if [ $1 = "view-all-resources" ] ; then
kubectl get $(kubectl api-resources | awk '{print $1}' | tail -n +2 | tr '\n' ',' | sed s/,\$//) -o yaml | code -
fi
# This command outputs all Kubernetes resources in "kubeshark" namespace using YAML format and pipes it to VS Code
if [[ $1 = "view-kubeshark-resources" ]] ; then
kubectl get $(kubectl api-resources | awk '{print $1}' | tail -n +2 | tr '\n' ',' | sed s/,\$//) -n kubeshark -o yaml | code -
fi

View File

@@ -14,11 +14,15 @@ const (
ServiceAccountName = SelfResourcesPrefix + "service-account"
WorkerDaemonSetName = SelfResourcesPrefix + "worker-daemon-set"
WorkerPodName = SelfResourcesPrefix + "worker"
PersistentVolumeName = SelfResourcesPrefix + "persistent-volume"
PersistentVolumeClaimName = SelfResourcesPrefix + "persistent-volume-claim"
IngressName = SelfResourcesPrefix + "ingress"
IngressClassName = SelfResourcesPrefix + "ingress-class"
PersistentVolumeHostPath = "/app/data"
MinKubernetesServerVersion = "1.16.0"
)
const (
LabelPrefixApp = "app.kubernetes.io/"
LabelManagedBy = LabelPrefixApp + "managed-by"
LabelCreatedBy = LabelPrefixApp + "created-by"
LabelManagedBy = SelfResourcesPrefix + "managed-by"
LabelCreatedBy = SelfResourcesPrefix + "created-by"
)

186
kubernetes/helm/helm.go Normal file
View File

@@ -0,0 +1,186 @@
package helm
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/misc"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chart/loader"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/downloader"
"helm.sh/helm/v3/pkg/getter"
"helm.sh/helm/v3/pkg/kube"
"helm.sh/helm/v3/pkg/registry"
"helm.sh/helm/v3/pkg/release"
"helm.sh/helm/v3/pkg/repo"
)
const ENV_HELM_DRIVER = "HELM_DRIVER"
var settings = cli.New()
type Helm struct {
repo string
releaseName string
releaseNamespace string
}
func NewHelm(repo string, releaseName string, releaseNamespace string) *Helm {
return &Helm{
repo: repo,
releaseName: releaseName,
releaseNamespace: releaseNamespace,
}
}
func parseOCIRef(chartRef string) (string, string, error) {
refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`)
caps := refTagRegexp.FindStringSubmatch(chartRef)
if len(caps) != 4 {
return "", "", errors.Errorf("improperly formatted oci chart reference: %s", chartRef)
}
chartRef = caps[1]
tag := caps[3]
return chartRef, tag, nil
}
func (h *Helm) Install() (rel *release.Release, err error) {
kubeConfigPath := config.Config.KubeConfigPath()
actionConfig := new(action.Configuration)
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
log.Info().Msgf(format, v...)
}); err != nil {
return
}
client := action.NewInstall(actionConfig)
client.Namespace = h.releaseNamespace
client.ReleaseName = h.releaseName
chartPath := os.Getenv(fmt.Sprintf("%s_HELM_CHART_PATH", strings.ToUpper(misc.Program)))
if chartPath == "" {
var chartURL string
chartURL, err = repo.FindChartInRepoURL(h.repo, h.releaseName, "", "", "", "", getter.All(&cli.EnvSettings{}))
if err != nil {
return
}
var cp string
cp, err = client.ChartPathOptions.LocateChart(chartURL, settings)
if err != nil {
return
}
m := &downloader.Manager{
Out: os.Stdout,
ChartPath: cp,
Keyring: client.ChartPathOptions.Keyring,
SkipUpdate: false,
Getters: getter.All(settings),
RepositoryConfig: settings.RepositoryConfig,
RepositoryCache: settings.RepositoryCache,
Debug: settings.Debug,
}
dl := downloader.ChartDownloader{
Out: m.Out,
Verify: m.Verify,
Keyring: m.Keyring,
RepositoryConfig: m.RepositoryConfig,
RepositoryCache: m.RepositoryCache,
RegistryClient: m.RegistryClient,
Getters: m.Getters,
Options: []getter.Option{
getter.WithInsecureSkipVerifyTLS(false),
},
}
repoPath := filepath.Dir(m.ChartPath)
err = os.MkdirAll(repoPath, os.ModePerm)
if err != nil {
return
}
version := ""
if registry.IsOCI(chartURL) {
chartURL, version, err = parseOCIRef(chartURL)
if err != nil {
return
}
dl.Options = append(dl.Options,
getter.WithRegistryClient(m.RegistryClient),
getter.WithTagName(version))
}
log.Info().
Str("url", chartURL).
Str("repo-path", repoPath).
Msg("Downloading Helm chart:")
if _, _, err = dl.DownloadTo(chartURL, version, repoPath); err != nil {
return
}
chartPath = m.ChartPath
}
var chart *chart.Chart
chart, err = loader.Load(chartPath)
if err != nil {
return
}
log.Info().
Str("release", chart.Metadata.Name).
Str("version", chart.Metadata.Version).
Strs("source", chart.Metadata.Sources).
Str("kube-version", chart.Metadata.KubeVersion).
Msg("Installing using Helm:")
var configMarshalled []byte
configMarshalled, err = json.Marshal(config.Config)
if err != nil {
return
}
var configUnmarshalled map[string]interface{}
err = json.Unmarshal(configMarshalled, &configUnmarshalled)
if err != nil {
return
}
rel, err = client.Run(chart, configUnmarshalled)
if err != nil {
return
}
return
}
func (h *Helm) Uninstall() (resp *release.UninstallReleaseResponse, err error) {
kubeConfigPath := config.Config.KubeConfigPath()
actionConfig := new(action.Configuration)
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
log.Info().Msgf(format, v...)
}); err != nil {
return
}
client := action.NewUninstall(actionConfig)
resp, err = client.Run(h.releaseName)
if err != nil {
return
}
return
}

View File

@@ -3,36 +3,26 @@ package kubernetes
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/url"
"path/filepath"
"regexp"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/semver"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
auth "k8s.io/api/authorization/v1"
core "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/version"
"k8s.io/apimachinery/pkg/watch"
applyconfapp "k8s.io/client-go/applyconfigurations/apps/v1"
applyconfcore "k8s.io/client-go/applyconfigurations/core/v1"
applyconfmeta "k8s.io/client-go/applyconfigurations/meta/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
watchtools "k8s.io/client-go/tools/watch"
)
type Provider struct {
@@ -43,14 +33,6 @@ type Provider struct {
createdBy string
}
const (
fieldManagerName = "kubeshark-manager"
procfsVolumeName = "proc"
procfsMountPath = "/hostproc"
sysfsVolumeName = "sys"
sysfsMountPath = "/sys"
)
func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) {
kubernetesConfig := loadKubernetesConfiguration(kubeConfigPath, contextName)
restClientConfig, err := kubernetesConfig.ClientConfig()
@@ -89,305 +71,6 @@ func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) {
}, nil
}
//NewProviderInCluster Used in another repo that calls this function
func NewProviderInCluster() (*Provider, error) {
restClientConfig, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
clientSet, err := getClientSet(restClientConfig)
if err != nil {
return nil, err
}
return &Provider{
clientSet: clientSet,
kubernetesConfig: nil, // not relevant in cluster
clientConfig: *restClientConfig,
managedBy: misc.Program,
createdBy: misc.Program,
}, nil
}
func (provider *Provider) CurrentNamespace() (string, error) {
if provider.kubernetesConfig == nil {
return "", errors.New("kubernetesConfig is nil, The CLI will not work with in-cluster kubernetes config, use a kubeconfig file when initializing the Provider")
}
ns, _, err := provider.kubernetesConfig.Namespace()
return ns, err
}
func (provider *Provider) WaitUtilNamespaceDeleted(ctx context.Context, name string) error {
fieldSelector := fmt.Sprintf("metadata.name=%s", name)
var limit int64 = 1
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector
options.Limit = limit
return provider.clientSet.CoreV1().Namespaces().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fieldSelector
options.Limit = limit
return provider.clientSet.CoreV1().Namespaces().Watch(ctx, options)
},
}
var preconditionFunc watchtools.PreconditionFunc = func(store cache.Store) (bool, error) {
_, exists, err := store.Get(&core.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}})
if err != nil {
return false, err
}
if exists {
return false, nil
}
return true, nil
}
conditionFunc := func(e watch.Event) (bool, error) {
if e.Type == watch.Deleted {
return true, nil
}
return false, nil
}
obj := &core.Namespace{}
_, err := watchtools.UntilWithSync(ctx, lw, obj, preconditionFunc, conditionFunc)
return err
}
func (provider *Provider) CreateNamespace(ctx context.Context, name string) (*core.Namespace, error) {
namespaceSpec := &core.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
},
},
}
return provider.clientSet.CoreV1().Namespaces().Create(ctx, namespaceSpec, metav1.CreateOptions{})
}
type PodOptions struct {
Namespace string
PodName string
PodImage string
ServiceAccountName string
Resources Resources
ImagePullPolicy core.PullPolicy
ImagePullSecrets []core.LocalObjectReference
Debug bool
}
func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
cpuLimit, err := resource.ParseQuantity(opts.Resources.CpuLimit)
if err != nil {
return nil, fmt.Errorf("invalid cpu limit for %s container", opts.PodName)
}
memLimit, err := resource.ParseQuantity(opts.Resources.MemoryLimit)
if err != nil {
return nil, fmt.Errorf("invalid memory limit for %s container", opts.PodName)
}
cpuRequests, err := resource.ParseQuantity(opts.Resources.CpuRequests)
if err != nil {
return nil, fmt.Errorf("invalid cpu request for %s container", opts.PodName)
}
memRequests, err := resource.ParseQuantity(opts.Resources.MemoryRequests)
if err != nil {
return nil, fmt.Errorf("invalid memory request for %s container", opts.PodName)
}
command := []string{
"./hub",
}
if opts.Debug {
command = append(command, "-debug")
}
containers := []core.Container{
{
Name: opts.PodName,
Image: opts.PodImage,
ImagePullPolicy: opts.ImagePullPolicy,
Command: command,
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
"cpu": cpuLimit,
"memory": memLimit,
},
Requests: core.ResourceList{
"cpu": cpuRequests,
"memory": memRequests,
},
},
},
}
pod := &core.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: opts.PodName,
Labels: map[string]string{
"app": opts.PodName,
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
},
},
Spec: core.PodSpec{
Containers: containers,
DNSPolicy: core.DNSClusterFirstWithHostNet,
TerminationGracePeriodSeconds: new(int64),
Tolerations: []core.Toleration{
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoExecute,
},
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoSchedule,
},
},
ImagePullSecrets: opts.ImagePullSecrets,
},
}
//define the service account only when it exists to prevent pod crash
if opts.ServiceAccountName != "" {
pod.Spec.ServiceAccountName = opts.ServiceAccountName
}
return pod, nil
}
func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPort string) (*core.Pod, error) {
cpuLimit, err := resource.ParseQuantity(opts.Resources.CpuLimit)
if err != nil {
return nil, fmt.Errorf("invalid cpu limit for %s container", opts.PodName)
}
memLimit, err := resource.ParseQuantity(opts.Resources.MemoryLimit)
if err != nil {
return nil, fmt.Errorf("invalid memory limit for %s container", opts.PodName)
}
cpuRequests, err := resource.ParseQuantity(opts.Resources.CpuRequests)
if err != nil {
return nil, fmt.Errorf("invalid cpu request for %s container", opts.PodName)
}
memRequests, err := resource.ParseQuantity(opts.Resources.MemoryRequests)
if err != nil {
return nil, fmt.Errorf("invalid memory request for %s container", opts.PodName)
}
volumeMounts := []core.VolumeMount{}
volumes := []core.Volume{}
containers := []core.Container{
{
Name: opts.PodName,
Image: docker.GetFrontImage(),
ImagePullPolicy: opts.ImagePullPolicy,
VolumeMounts: volumeMounts,
ReadinessProbe: &core.Probe{
FailureThreshold: 3,
ProbeHandler: core.ProbeHandler{
TCPSocket: &core.TCPSocketAction{
Port: intstr.Parse("80"),
},
},
PeriodSeconds: 1,
SuccessThreshold: 1,
TimeoutSeconds: 1,
},
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
"cpu": cpuLimit,
"memory": memLimit,
},
Requests: core.ResourceList{
"cpu": cpuRequests,
"memory": memRequests,
},
},
Env: []core.EnvVar{
{
Name: "REACT_APP_DEFAULT_FILTER",
Value: " ",
},
{
Name: "REACT_APP_HUB_HOST",
Value: " ",
},
{
Name: "REACT_APP_HUB_PORT",
Value: hubPort,
},
},
},
}
pod := &core.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: opts.PodName,
Labels: map[string]string{
"app": opts.PodName,
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
},
},
Spec: core.PodSpec{
Containers: containers,
Volumes: volumes,
DNSPolicy: core.DNSClusterFirstWithHostNet,
TerminationGracePeriodSeconds: new(int64),
Tolerations: []core.Toleration{
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoExecute,
},
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoSchedule,
},
},
ImagePullSecrets: opts.ImagePullSecrets,
},
}
//define the service account only when it exists to prevent pod crash
if opts.ServiceAccountName != "" {
pod.Spec.ServiceAccountName = opts.ServiceAccountName
}
return pod, nil
}
func (provider *Provider) CreatePod(ctx context.Context, namespace string, podSpec *core.Pod) (*core.Pod, error) {
return provider.clientSet.CoreV1().Pods(namespace).Create(ctx, podSpec, metav1.CreateOptions{})
}
func (provider *Provider) CreateService(ctx context.Context, namespace string, serviceName string, appLabelValue string, targetPort int, port int32) (*core.Service, error) {
service := core.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Labels: map[string]string{
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
},
},
Spec: core.ServiceSpec{
Ports: []core.ServicePort{
{
Name: serviceName,
TargetPort: intstr.FromInt(targetPort),
Port: port,
},
},
Type: core.ServiceTypeClusterIP,
Selector: map[string]string{"app": appLabelValue},
},
}
return provider.clientSet.CoreV1().Services(namespace).Create(ctx, &service, metav1.CreateOptions{})
}
func (provider *Provider) CanI(ctx context.Context, namespace string, resource string, verb string, group string) (bool, error) {
selfSubjectAccessReview := &auth.SelfSubjectAccessReview{
Spec: auth.SelfSubjectAccessReviewSpec{
@@ -456,419 +139,6 @@ func (provider *Provider) doesResourceExist(resource interface{}, err error) (bo
return resource != nil, nil
}
func (provider *Provider) CreateSelfRBAC(ctx context.Context, namespace string, serviceAccountName string, clusterRoleName string, clusterRoleBindingName string, version string, resources []string) error {
serviceAccount := &core.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
Labels: map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
},
},
}
clusterRole := &rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRoleName,
Labels: map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
},
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{"", "extensions", "apps"},
Resources: resources,
Verbs: []string{"list", "get", "watch"},
},
},
}
clusterRoleBinding := &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRoleBindingName,
Labels: map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
},
},
RoleRef: rbac.RoleRef{
Name: clusterRoleName,
Kind: "ClusterRole",
APIGroup: "rbac.authorization.k8s.io",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccountName,
Namespace: namespace,
},
},
}
_, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Create(ctx, serviceAccount, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().ClusterRoles().Create(ctx, clusterRole, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().ClusterRoleBindings().Create(ctx, clusterRoleBinding, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
return nil
}
func (provider *Provider) CreateSelfRBACNamespaceRestricted(ctx context.Context, namespace string, serviceAccountName string, roleName string, roleBindingName string, version string) error {
serviceAccount := &core.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
Labels: map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
},
},
}
role := &rbac.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Labels: map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
},
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{"", "extensions", "apps"},
Resources: []string{"pods", "services", "endpoints"},
Verbs: []string{"list", "get", "watch"},
},
},
}
roleBinding := &rbac.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleBindingName,
Labels: map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
},
},
RoleRef: rbac.RoleRef{
Name: roleName,
Kind: "Role",
APIGroup: "rbac.authorization.k8s.io",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccountName,
Namespace: namespace,
},
},
}
_, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Create(ctx, serviceAccount, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().Roles(namespace).Create(ctx, role, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().RoleBindings(namespace).Create(ctx, roleBinding, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
return nil
}
func (provider *Provider) RemoveNamespace(ctx context.Context, name string) error {
err := provider.clientSet.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveClusterRole(ctx context.Context, name string) error {
err := provider.clientSet.RbacV1().ClusterRoles().Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveClusterRoleBinding(ctx context.Context, name string) error {
err := provider.clientSet.RbacV1().ClusterRoleBindings().Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveRoleBinding(ctx context.Context, namespace string, name string) error {
err := provider.clientSet.RbacV1().RoleBindings(namespace).Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveRole(ctx context.Context, namespace string, name string) error {
err := provider.clientSet.RbacV1().Roles(namespace).Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveServiceAccount(ctx context.Context, namespace string, name string) error {
err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemovePod(ctx context.Context, namespace string, podName string) error {
err := provider.clientSet.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveConfigMap(ctx context.Context, namespace string, configMapName string) error {
err := provider.clientSet.CoreV1().ConfigMaps(namespace).Delete(ctx, configMapName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveService(ctx context.Context, namespace string, serviceName string) error {
err := provider.clientSet.CoreV1().Services(namespace).Delete(ctx, serviceName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveDaemonSet(ctx context.Context, namespace string, daemonSetName string) error {
err := provider.clientSet.AppsV1().DaemonSets(namespace).Delete(ctx, daemonSetName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) handleRemovalError(err error) error {
// Ignore NotFound - There is nothing to delete.
// Ignore Forbidden - Assume that a user could not have created the resource in the first place.
if k8serrors.IsNotFound(err) || k8serrors.IsForbidden(err) {
return nil
}
return err
}
func (provider *Provider) ApplyWorkerDaemonSet(
ctx context.Context,
namespace string,
daemonSetName string,
podImage string,
workerPodName string,
serviceAccountName string,
resources Resources,
imagePullPolicy core.PullPolicy,
imagePullSecrets []core.LocalObjectReference,
serviceMesh bool,
tls bool,
debug bool,
) error {
log.Debug().
Str("namespace", namespace).
Str("daemonset-name", daemonSetName).
Str("image", podImage).
Str("pod", workerPodName).
Msg("Applying worker DaemonSets.")
command := []string{"./worker", "-i", "any", "-port", "8897"}
if debug {
command = append(command, "-debug")
}
if serviceMesh {
command = append(command, "-servicemesh")
}
if tls {
command = append(command, "-tls")
}
if serviceMesh || tls {
command = append(command, "-procfs", procfsMountPath)
}
workerContainer := applyconfcore.Container()
workerContainer.WithName(workerPodName)
workerContainer.WithImage(podImage)
workerContainer.WithImagePullPolicy(imagePullPolicy)
caps := applyconfcore.Capabilities().WithDrop("ALL")
caps = caps.WithAdd("NET_RAW").WithAdd("NET_ADMIN") // to listen to traffic using libpcap
if serviceMesh || tls {
caps = caps.WithAdd("SYS_ADMIN") // to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
caps = caps.WithAdd("SYS_PTRACE") // to set netns to other process + to open libssl.so of other process
if serviceMesh {
caps = caps.WithAdd("DAC_OVERRIDE") // to read /proc/PID/environ
}
if tls {
caps = caps.WithAdd("SYS_RESOURCE") // to change rlimits for eBPF
}
}
workerContainer.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
workerContainer.WithCommand(command...)
if debug {
workerContainer.WithEnv(
applyconfcore.EnvVar().WithName("MEMORY_PROFILING_ENABLED").WithValue("true"),
applyconfcore.EnvVar().WithName("MEMORY_PROFILING_INTERVAL_SECONDS").WithValue("10"),
applyconfcore.EnvVar().WithName("MEMORY_USAGE_INTERVAL_MILLISECONDS").WithValue("500"),
)
}
cpuLimit, err := resource.ParseQuantity(resources.CpuLimit)
if err != nil {
return fmt.Errorf("invalid cpu limit for %s container", workerPodName)
}
memLimit, err := resource.ParseQuantity(resources.MemoryLimit)
if err != nil {
return fmt.Errorf("invalid memory limit for %s container", workerPodName)
}
cpuRequests, err := resource.ParseQuantity(resources.CpuRequests)
if err != nil {
return fmt.Errorf("invalid cpu request for %s container", workerPodName)
}
memRequests, err := resource.ParseQuantity(resources.MemoryRequests)
if err != nil {
return fmt.Errorf("invalid memory request for %s container", workerPodName)
}
workerResourceLimits := core.ResourceList{
"cpu": cpuLimit,
"memory": memLimit,
}
workerResourceRequests := core.ResourceList{
"cpu": cpuRequests,
"memory": memRequests,
}
workerResources := applyconfcore.ResourceRequirements().WithRequests(workerResourceRequests).WithLimits(workerResourceLimits)
workerContainer.WithResources(workerResources)
nodeAffinity := applyconfcore.NodeAffinity()
affinity := applyconfcore.Affinity()
affinity.WithNodeAffinity(nodeAffinity)
noExecuteToleration := applyconfcore.Toleration()
noExecuteToleration.WithOperator(core.TolerationOpExists)
noExecuteToleration.WithEffect(core.TaintEffectNoExecute)
noScheduleToleration := applyconfcore.Toleration()
noScheduleToleration.WithOperator(core.TolerationOpExists)
noScheduleToleration.WithEffect(core.TaintEffectNoSchedule)
// Host procfs is needed inside the container because we need access to
// the network namespaces of processes on the machine.
//
procfsVolume := applyconfcore.Volume()
procfsVolume.WithName(procfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/proc"))
procfsVolumeMount := applyconfcore.VolumeMount().WithName(procfsVolumeName).WithMountPath(procfsMountPath).WithReadOnly(true)
workerContainer.WithVolumeMounts(procfsVolumeMount)
// We need access to /sys in order to install certain eBPF tracepoints
//
sysfsVolume := applyconfcore.Volume()
sysfsVolume.WithName(sysfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/sys"))
sysfsVolumeMount := applyconfcore.VolumeMount().WithName(sysfsVolumeName).WithMountPath(sysfsMountPath).WithReadOnly(true)
workerContainer.WithVolumeMounts(sysfsVolumeMount)
podSpec := applyconfcore.PodSpec()
podSpec.WithHostNetwork(true)
podSpec.WithDNSPolicy(core.DNSClusterFirstWithHostNet)
podSpec.WithTerminationGracePeriodSeconds(0)
if serviceAccountName != "" {
podSpec.WithServiceAccountName(serviceAccountName)
}
podSpec.WithContainers(workerContainer)
podSpec.WithAffinity(affinity)
podSpec.WithTolerations(noExecuteToleration, noScheduleToleration)
podSpec.WithVolumes(procfsVolume, sysfsVolume)
if len(imagePullSecrets) > 0 {
localObjectReference := applyconfcore.LocalObjectReference()
for _, secret := range imagePullSecrets {
localObjectReference.WithName(secret.Name)
}
podSpec.WithImagePullSecrets(localObjectReference)
}
podTemplate := applyconfcore.PodTemplateSpec()
podTemplate.WithLabels(map[string]string{
"app": workerPodName,
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
})
podTemplate.WithSpec(podSpec)
labelSelector := applyconfmeta.LabelSelector()
labelSelector.WithMatchLabels(map[string]string{"app": workerPodName})
applyOptions := metav1.ApplyOptions{
Force: true,
FieldManager: fieldManagerName,
}
daemonSet := applyconfapp.DaemonSet(daemonSetName, namespace)
daemonSet.
WithLabels(map[string]string{
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
}).
WithSpec(applyconfapp.DaemonSetSpec().WithSelector(labelSelector).WithTemplate(podTemplate))
_, err = provider.clientSet.AppsV1().DaemonSets(namespace).Apply(ctx, daemonSet, applyOptions)
return err
}
func (provider *Provider) ResetWorkerDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, workerPodName string) error {
workerContainer := applyconfcore.Container()
workerContainer.WithName(workerPodName)
workerContainer.WithImage(podImage)
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
nodeSelectorRequirement.WithKey(fmt.Sprintf("%s-non-existing-label", misc.Program))
nodeSelectorRequirement.WithOperator(core.NodeSelectorOpExists)
nodeSelectorTerm := applyconfcore.NodeSelectorTerm()
nodeSelectorTerm.WithMatchExpressions(nodeSelectorRequirement)
nodeSelector := applyconfcore.NodeSelector()
nodeSelector.WithNodeSelectorTerms(nodeSelectorTerm)
nodeAffinity := applyconfcore.NodeAffinity()
nodeAffinity.WithRequiredDuringSchedulingIgnoredDuringExecution(nodeSelector)
affinity := applyconfcore.Affinity()
affinity.WithNodeAffinity(nodeAffinity)
podSpec := applyconfcore.PodSpec()
podSpec.WithContainers(workerContainer)
podSpec.WithAffinity(affinity)
podTemplate := applyconfcore.PodTemplateSpec()
podTemplate.WithLabels(map[string]string{
"app": workerPodName,
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
})
podTemplate.WithSpec(podSpec)
labelSelector := applyconfmeta.LabelSelector()
labelSelector.WithMatchLabels(map[string]string{"app": workerPodName})
applyOptions := metav1.ApplyOptions{
Force: true,
FieldManager: fieldManagerName,
}
daemonSet := applyconfapp.DaemonSet(daemonSetName, namespace)
daemonSet.
WithLabels(map[string]string{
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
}).
WithSpec(applyconfapp.DaemonSetSpec().WithSelector(labelSelector).WithTemplate(podTemplate))
_, err := provider.clientSet.AppsV1().DaemonSets(namespace).Apply(ctx, daemonSet, applyOptions)
return err
}
func (provider *Provider) listPodsImpl(ctx context.Context, regex *regexp.Regexp, namespaces []string, listOptions metav1.ListOptions) ([]core.Pod, error) {
var pods []core.Pod
for _, namespace := range namespaces {
@@ -893,10 +163,6 @@ func (provider *Provider) ListAllPodsMatchingRegex(ctx context.Context, regex *r
return provider.listPodsImpl(ctx, regex, namespaces, metav1.ListOptions{})
}
func (provider *Provider) GetPod(ctx context.Context, namespaces string, podName string) (*core.Pod, error) {
return provider.clientSet.CoreV1().Pods(namespaces).Get(ctx, podName, metav1.GetOptions{})
}
func (provider *Provider) ListAllRunningPodsMatchingRegex(ctx context.Context, regex *regexp.Regexp, namespaces []string) ([]core.Pod, error) {
pods, err := provider.ListAllPodsMatchingRegex(ctx, regex, namespaces)
if err != nil {
@@ -955,41 +221,6 @@ func (provider *Provider) GetNamespaceEvents(ctx context.Context, namespace stri
return eventList.String(), nil
}
func (provider *Provider) ListManagedServiceAccounts(ctx context.Context, namespace string) (*core.ServiceAccountList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.CoreV1().ServiceAccounts(namespace).List(ctx, listOptions)
}
func (provider *Provider) ListManagedClusterRoles(ctx context.Context) (*rbac.ClusterRoleList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.RbacV1().ClusterRoles().List(ctx, listOptions)
}
func (provider *Provider) ListManagedClusterRoleBindings(ctx context.Context) (*rbac.ClusterRoleBindingList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.RbacV1().ClusterRoleBindings().List(ctx, listOptions)
}
func (provider *Provider) ListManagedRoles(ctx context.Context, namespace string) (*rbac.RoleList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.RbacV1().Roles(namespace).List(ctx, listOptions)
}
func (provider *Provider) ListManagedRoleBindings(ctx context.Context, namespace string) (*rbac.RoleBindingList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.RbacV1().RoleBindings(namespace).List(ctx, listOptions)
}
// ValidateNotProxy We added this after a customer tried to run kubeshark from lens, which used len's kube config, which have cluster server configuration, which points to len's local proxy.
// The workaround was to use the user's local default kube config.
// For now - we are blocking the option to run kubeshark through a proxy to k8s server
@@ -1029,6 +260,14 @@ func (provider *Provider) GetKubernetesVersion() (*semver.SemVersion, error) {
return &serverVersionSemVer, nil
}
func (provider *Provider) GetNamespaces() []string {
if len(config.Config.Tap.Namespaces) > 0 {
return utils.Unique(config.Config.Tap.Namespaces)
} else {
return []string{K8sAllNamespaces}
}
}
func getClientSet(config *rest.Config) (*kubernetes.Clientset, error) {
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {

View File

@@ -11,6 +11,7 @@ import (
"strings"
"time"
"github.com/kubeshark/kubeshark/config"
"github.com/rs/zerolog/log"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/client-go/tools/portforward"
@@ -66,8 +67,8 @@ func getSelfHubProxiedHostAndPath(selfNamespace string, selfServiceName string)
return fmt.Sprintf("/api/v1/namespaces/%s/services/%s:%d/proxy", selfNamespace, selfServiceName, selfServicePort)
}
func GetLocalhostOnPort(port uint16) string {
return fmt.Sprintf("http://localhost:%d", port)
func GetProxyOnPort(port uint16) string {
return fmt.Sprintf("http://%s:%d", config.Config.Tap.Proxy.Host, port)
}
func getRerouteHttpHandlerSelfAPI(proxyHandler http.Handler, selfNamespace string, selfServiceName string) http.Handler {

View File

@@ -1,8 +0,0 @@
package kubernetes
type Resources struct {
CpuLimit string `yaml:"cpu-limit" default:"750m"`
MemoryLimit string `yaml:"memory-limit" default:"1Gi"`
CpuRequests string `yaml:"cpu-requests" default:"50m"`
MemoryRequests string `yaml:"memory-requests" default:"50Mi"`
}

View File

@@ -1,55 +0,0 @@
package kubernetes
import (
"github.com/kubeshark/base/pkg/models"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func GetNodeHostToTargetedPodsMap(targetedPods []core.Pod) models.NodeToPodsMap {
nodeToTargetedPodsMap := make(models.NodeToPodsMap)
for _, pod := range targetedPods {
minimizedPod := getMinimizedPod(pod)
existingList := nodeToTargetedPodsMap[pod.Spec.NodeName]
if existingList == nil {
nodeToTargetedPodsMap[pod.Spec.NodeName] = []core.Pod{minimizedPod}
} else {
nodeToTargetedPodsMap[pod.Spec.NodeName] = append(nodeToTargetedPodsMap[pod.Spec.NodeName], minimizedPod)
}
}
return nodeToTargetedPodsMap
}
func getMinimizedPod(fullPod core.Pod) core.Pod {
return core.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fullPod.Name,
Namespace: fullPod.Namespace,
},
Status: core.PodStatus{
PodIP: fullPod.Status.PodIP,
ContainerStatuses: getMinimizedContainerStatuses(fullPod),
},
}
}
func getMinimizedContainerStatuses(fullPod core.Pod) []core.ContainerStatus {
result := make([]core.ContainerStatus, len(fullPod.Status.ContainerStatuses))
for i, container := range fullPod.Status.ContainerStatuses {
result[i] = core.ContainerStatus{
ContainerID: container.ContainerID,
}
}
return result
}
func GetPodInfosForPods(pods []core.Pod) []*models.PodInfo {
podInfos := make([]*models.PodInfo, 0)
for _, pod := range pods {
podInfos = append(podInfos, &models.PodInfo{Name: pod.Name, Namespace: pod.Namespace, NodeName: pod.Spec.NodeName})
}
return podInfos
}

View File

@@ -1,54 +0,0 @@
package kubernetes
import (
"context"
"github.com/kubeshark/kubeshark/docker"
"github.com/rs/zerolog/log"
core "k8s.io/api/core/v1"
)
func CreateWorkers(
kubernetesProvider *Provider,
selfServiceAccountExists bool,
ctx context.Context,
namespace string,
resources Resources,
imagePullPolicy core.PullPolicy,
imagePullSecrets []core.LocalObjectReference,
serviceMesh bool,
tls bool,
debug bool,
) error {
image := docker.GetWorkerImage()
var serviceAccountName string
if selfServiceAccountExists {
serviceAccountName = ServiceAccountName
} else {
serviceAccountName = ""
}
log.Info().Msg("Creating the worker DaemonSet...")
if err := kubernetesProvider.ApplyWorkerDaemonSet(
ctx,
namespace,
WorkerDaemonSetName,
image,
WorkerPodName,
serviceAccountName,
resources,
imagePullPolicy,
imagePullSecrets,
serviceMesh,
tls,
debug,
); err != nil {
return err
}
log.Info().Msg("Successfully created the worker DaemonSet.")
return nil
}

35
manifests/README.md Normal file
View File

@@ -0,0 +1,35 @@
# Manifests
## Apply
Clone the repo:
```shell
git clone git@github.com:kubeshark/kubeshark.git --depth 1
cd kubeshark/manifests
```
To apply the manifests, run:
```shell
kubectl apply -f .
```
To clean up:
```shell
kubectl delete namespace kubeshark
kubectl delete clusterrolebinding kubeshark-cluster-role-binding
kubectl delete clusterrole kubeshark-cluster-role
```
## Accesing
Do the port forwarding:
```shell
kubectl port-forward -n kubeshark service/kubeshark-hub 8898:80 & \
kubectl port-forward -n kubeshark service/kubeshark-front 8899:80
```
Visit [localhost:8899](http://localhost:8899)

267
manifests/complete.yaml Normal file
View File

@@ -0,0 +1,267 @@
---
# Source: kubeshark/templates/01-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
annotations:
name: kubeshark-service-account
namespace: default
---
# Source: kubeshark/templates/02-cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
annotations:
name: kubeshark-cluster-role
namespace: default
rules:
- apiGroups:
- ""
- extensions
- apps
- networking.k8s.io
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- ingresses
verbs:
- list
- get
- watch
---
# Source: kubeshark/templates/03-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
annotations:
name: kubeshark-cluster-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeshark-cluster-role
subjects:
- kind: ServiceAccount
name: kubeshark-service-account
namespace: default
---
# Source: kubeshark/templates/05-hub-service.yaml
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
annotations:
name: kubeshark-hub
namespace: default
spec:
ports:
- name: kubeshark-hub
port: 80
targetPort: 80
selector:
app: kubeshark-hub
type: NodePort
status:
loadBalancer: {}
---
# Source: kubeshark/templates/07-front-service.yaml
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
annotations:
name: kubeshark-front
namespace: default
spec:
ports:
- name: kubeshark-front
port: 80
targetPort: 80
selector:
app: kubeshark-front
type: NodePort
status:
loadBalancer: {}
---
# Source: kubeshark/templates/09-worker-daemon-set.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
annotations:
name: kubeshark-worker-daemon-set
namespace: default
spec:
selector:
matchLabels:
app: kubeshark-worker-daemon-set
template:
metadata:
creationTimestamp: null
labels:
app: kubeshark-worker-daemon-set
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
containers:
- command:
- './worker'
- -i
- any
- -port
- '8897'
- -packet-capture
- 'libpcap'
- -servicemesh
- -tls
- -procfs
- /hostproc
image: 'docker.io/kubeshark/worker:latest'
imagePullPolicy: Always
name: kubeshark-worker-daemon-set
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
securityContext:
capabilities:
add:
- NET_RAW
- NET_ADMIN
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
drop:
- ALL
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
- hostPath:
path: /proc
name: proc
- hostPath:
path: /sys
name: sys
---
# Source: kubeshark/templates/04-hub-pod.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-hub
annotations:
name: kubeshark-hub
namespace: default
spec:
containers:
- command:
- ./hub
env:
- name: POD_REGEX
value: '.*'
- name: NAMESPACES
value: ''
- name: LICENSE
value: ''
- name: SCRIPTING_ENV
value: '{}'
- name: SCRIPTING_SCRIPTS
value: '[]'
- name: AUTH_APPROVED_DOMAINS
value: ''
image: 'docker.io/kubeshark/hub:latest'
imagePullPolicy: Always
name: kubeshark-hub
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}
---
# Source: kubeshark/templates/06-front-pod.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
app: kubeshark-front
annotations:
name: kubeshark-front
namespace: default
spec:
containers:
- env:
- name: REACT_APP_DEFAULT_FILTER
value: ' '
- name: REACT_APP_HUB_HOST
value: ' '
- name: REACT_APP_HUB_PORT
value: ':8898'
image: 'docker.io/kubeshark/front:latest'
imagePullPolicy: Always
name: kubeshark-front
readinessProbe:
failureThreshold: 3
periodSeconds: 1
successThreshold: 1
tcpSocket:
port: 80
timeoutSeconds: 1
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
status: {}

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: kubeshark-tls
namespace: default
spec:
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
secretName: cert-kubeshark
dnsNames:
- ks.svc.cluster.local

View File

@@ -0,0 +1,14 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: info@kubeshark.co
privateKeySecretRef:
name: letsencrypt-prod-key
solvers:
- http01:
ingress:
class: kubeshark-ingress-class

15
manifests/tls/run.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
__dir="$(cd -P -- "$(dirname -- "$0")" && pwd -P)"
helm repo add jetstack https://charts.jetstack.io
helm repo update
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.crds.yaml
helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.9.1
kubectl apply -f ${__dir}/cluster-issuer.yaml
kubectl apply -f ${__dir}/certificate.yaml

View File

@@ -9,7 +9,9 @@ import (
var (
Software = "Kubeshark"
Program = "kubeshark"
Description = "The API Traffic Analyzer for Kubernetes"
Website = "https://kubeshark.co"
Email = "info@kubeshark.co"
Ver = "0.0"
Branch = "develop"
GitCommitHash = "" // this var is overridden using ldflags in makefile when building

22
misc/fsUtils/globUtils.go Normal file
View File

@@ -0,0 +1,22 @@
package fsUtils
import (
"fmt"
"os"
"path/filepath"
)
func RemoveFilesByExtension(dirPath string, ext string) error {
files, err := filepath.Glob(filepath.Join(dirPath, fmt.Sprintf("/*.%s", ext)))
if err != nil {
return err
}
for _, f := range files {
if err := os.Remove(f); err != nil {
return err
}
}
return nil
}

View File

@@ -15,13 +15,13 @@ import (
func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath string) error {
podExactRegex := regexp.MustCompile("^" + kubernetes.SelfResourcesPrefix)
pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.SelfNamespace})
pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.Tap.Release.Namespace})
if err != nil {
return err
}
if len(pods) == 0 {
return fmt.Errorf("No %s pods found in namespace %s", misc.Software, config.Config.SelfNamespace)
return fmt.Errorf("No %s pods found in namespace %s", misc.Software, config.Config.Tap.Release.Namespace)
}
newZipFile, err := os.Create(filePath)
@@ -60,23 +60,23 @@ func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath strin
}
}
events, err := provider.GetNamespaceEvents(ctx, config.Config.SelfNamespace)
events, err := provider.GetNamespaceEvents(ctx, config.Config.Tap.Release.Namespace)
if err != nil {
log.Error().Err(err).Msg("Failed to get k8b events!")
} else {
log.Debug().Str("namespace", config.Config.SelfNamespace).Msg("Successfully read events.")
log.Debug().Str("namespace", config.Config.Tap.Release.Namespace).Msg("Successfully read events.")
}
if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.SelfNamespace)); err != nil {
if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.Tap.Release.Namespace)); err != nil {
log.Error().Err(err).Msg("Failed write logs!")
} else {
log.Debug().Str("namespace", config.Config.SelfNamespace).Msg("Successfully added events.")
log.Debug().Str("namespace", config.Config.Tap.Release.Namespace).Msg("Successfully added events.")
}
if err := AddFileToZip(zipWriter, config.Config.ConfigFilePath); err != nil {
if err := AddFileToZip(zipWriter, config.ConfigFilePath); err != nil {
log.Error().Err(err).Msg("Failed write file!")
} else {
log.Debug().Str("file-path", config.Config.ConfigFilePath).Msg("Successfully added file.")
log.Debug().Str("file-path", config.ConfigFilePath).Msg("Successfully added file.")
}
log.Info().Str("path", filePath).Msg("You can find the ZIP file with all logs at:")

55
misc/scripting.go Normal file
View File

@@ -0,0 +1,55 @@
package misc
import (
"os"
"path/filepath"
"github.com/robertkrimen/otto/ast"
"github.com/robertkrimen/otto/file"
"github.com/robertkrimen/otto/parser"
)
type Script struct {
Path string `json:"path"`
Title string `json:"title"`
Code string `json:"code"`
}
func ReadScriptFile(path string) (script *Script, err error) {
filename := filepath.Base(path)
var body []byte
body, err = os.ReadFile(path)
if err != nil {
return
}
content := string(body)
var program *ast.Program
program, err = parser.ParseFile(nil, filename, content, parser.StoreComments)
if err != nil {
return
}
var title string
var titleIsSet bool
code := content
var idx0 file.Idx
for node, comments := range program.Comments {
if (titleIsSet && node.Idx0() > idx0) || len(comments) == 0 {
continue
}
idx0 = node.Idx0()
title = comments[0].Text
titleIsSet = true
}
script = &Script{
Path: path,
Title: title,
Code: code,
}
return
}

View File

@@ -16,7 +16,7 @@ import (
)
func CheckNewerVersion() {
if os.Getenv("KUBESHARK_DISABLE_VERSION_CHECK") != "" {
if os.Getenv(fmt.Sprintf("%s_DISABLE_VERSION_CHECK", strings.ToUpper(misc.Program))) != "" {
return
}
@@ -44,7 +44,7 @@ func CheckNewerVersion() {
} else {
downloadCommand = fmt.Sprintf("sh <(curl -Ls %s/install)", misc.Website)
}
msg := fmt.Sprintf("There is a new release! %v -> %v run:", misc.Ver, latestVersion)
msg := fmt.Sprintf("There is a new release! %v -> %v Please upgrade to the latest release, as new releases are not always backward compatible. Run:", misc.Ver, latestVersion)
log.Warn().Str("command", downloadCommand).Msg(fmt.Sprintf(utils.Yellow, msg))
}
}

View File

@@ -1,168 +0,0 @@
package resources
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/errormessage"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"k8s.io/apimachinery/pkg/util/wait"
)
func CleanUpSelfResources(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfResourcesNamespace string) {
log.Warn().Msg(fmt.Sprintf("Removing %s resources...", misc.Software))
var leftoverResources []string
if isNsRestrictedMode {
leftoverResources = cleanUpRestrictedMode(ctx, kubernetesProvider, selfResourcesNamespace)
} else {
leftoverResources = cleanUpNonRestrictedMode(ctx, cancel, kubernetesProvider, selfResourcesNamespace)
}
if len(leftoverResources) > 0 {
errMsg := "Failed to remove the following resources."
for _, resource := range leftoverResources {
errMsg += "\n- " + resource
}
log.Error().Msg(fmt.Sprintf(utils.Red, errMsg))
}
}
func cleanUpNonRestrictedMode(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string {
leftoverResources := make([]string, 0)
if err := kubernetesProvider.RemoveNamespace(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("Namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
defer waitUntilNamespaceDeleted(ctx, cancel, kubernetesProvider, selfResourcesNamespace)
}
if resources, err := kubernetesProvider.ListManagedClusterRoles(ctx); err != nil {
resourceDesc := "ClusterRoles"
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveClusterRole(ctx, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("ClusterRole %s", resource.Name)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if resources, err := kubernetesProvider.ListManagedClusterRoleBindings(ctx); err != nil {
resourceDesc := "ClusterRoleBindings"
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveClusterRoleBinding(ctx, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("ClusterRoleBinding %s", resource.Name)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
return leftoverResources
}
func waitUntilNamespaceDeleted(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) {
// Call cancel if a terminating signal was received. Allows user to skip the wait.
go func() {
utils.WaitForTermination(ctx, cancel)
}()
if err := kubernetesProvider.WaitUtilNamespaceDeleted(ctx, selfResourcesNamespace); err != nil {
switch {
case ctx.Err() == context.Canceled:
log.Printf("Do nothing. User interrupted the wait")
log.Warn().
Str("namespace", selfResourcesNamespace).
Msg("Did nothing. User interrupted the wait.")
case err == wait.ErrWaitTimeout:
log.Warn().
Str("namespace", selfResourcesNamespace).
Msg("Timed out while deleting the namespace.")
default:
log.Warn().
Err(errormessage.FormatError(err)).
Str("namespace", selfResourcesNamespace).
Msg("Unknown error while deleting the namespace.")
}
}
}
func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string {
leftoverResources := make([]string, 0)
if err := kubernetesProvider.RemoveService(ctx, selfResourcesNamespace, kubernetes.FrontServiceName); err != nil {
resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.FrontServiceName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemoveService(ctx, selfResourcesNamespace, kubernetes.HubServiceName); err != nil {
resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.HubServiceName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemoveDaemonSet(ctx, selfResourcesNamespace, kubernetes.WorkerDaemonSetName); err != nil {
resourceDesc := fmt.Sprintf("DaemonSet %s in namespace %s", kubernetes.WorkerDaemonSetName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if resources, err := kubernetesProvider.ListManagedServiceAccounts(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("ServiceAccounts in namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveServiceAccount(ctx, selfResourcesNamespace, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("ServiceAccount %s in namespace %s", resource.Name, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if resources, err := kubernetesProvider.ListManagedRoles(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("Roles in namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveRole(ctx, selfResourcesNamespace, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("Role %s in namespace %s", resource.Name, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if resources, err := kubernetesProvider.ListManagedRoleBindings(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("RoleBindings in namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveRoleBinding(ctx, selfResourcesNamespace, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("RoleBinding %s in namespace %s", resource.Name, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if err := kubernetesProvider.RemovePod(ctx, selfResourcesNamespace, kubernetes.HubPodName); err != nil {
resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.HubPodName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemovePod(ctx, selfResourcesNamespace, kubernetes.FrontPodName); err != nil {
resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.FrontPodName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
return leftoverResources
}
func handleDeletionError(err error, resourceDesc string, leftoverResources *[]string) {
log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Error while removing %s", resourceDesc))
*leftoverResources = append(*leftoverResources, resourceDesc)
}

View File

@@ -1,124 +0,0 @@
package resources
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/errormessage"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/rs/zerolog/log"
core "k8s.io/api/core/v1"
)
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, hubResources kubernetes.Resources, imagePullPolicy core.PullPolicy, imagePullSecrets []core.LocalObjectReference, debug bool) (bool, error) {
if !isNsRestrictedMode {
if err := createSelfNamespace(ctx, kubernetesProvider, selfNamespace); err != nil {
return false, err
}
}
selfServiceAccountExists, err := createRBACIfNecessary(ctx, kubernetesProvider, isNsRestrictedMode, selfNamespace, []string{"pods", "services", "endpoints"})
if err != nil {
log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Failed to ensure the resources required for IP resolving. %s will not resolve target IPs to names.", misc.Software))
}
var serviceAccountName string
if selfServiceAccountExists {
serviceAccountName = kubernetes.ServiceAccountName
} else {
serviceAccountName = ""
}
opts := &kubernetes.PodOptions{
Namespace: selfNamespace,
PodName: kubernetes.HubPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: serviceAccountName,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Debug: debug,
}
frontOpts := &kubernetes.PodOptions{
Namespace: selfNamespace,
PodName: kubernetes.FrontPodName,
PodImage: docker.GetWorkerImage(),
ServiceAccountName: serviceAccountName,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Debug: debug,
}
if err := createSelfHubPod(ctx, kubernetesProvider, opts); err != nil {
return selfServiceAccountExists, err
}
if err := createFrontPod(ctx, kubernetesProvider, frontOpts); err != nil {
return selfServiceAccountExists, err
}
// TODO: Why the port values need to be 80?
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.HubServiceName, kubernetes.HubServiceName, 80, 80)
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("service", kubernetes.HubServiceName).Msg("Successfully created a service.")
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.FrontServiceName, kubernetes.FrontServiceName, 80, int32(config.Config.Tap.Proxy.Front.DstPort))
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("service", kubernetes.FrontServiceName).Msg("Successfully created a service.")
return selfServiceAccountExists, nil
}
func createSelfNamespace(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfNamespace string) error {
_, err := kubernetesProvider.CreateNamespace(ctx, selfNamespace)
return err
}
func createRBACIfNecessary(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, resources []string) (bool, error) {
if !isNsRestrictedMode {
if err := kubernetesProvider.CreateSelfRBAC(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.ClusterRoleName, kubernetes.ClusterRoleBindingName, misc.RBACVersion, resources); err != nil {
return false, err
}
} else {
if err := kubernetesProvider.CreateSelfRBACNamespaceRestricted(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.RoleName, kubernetes.RoleBindingName, misc.RBACVersion); err != nil {
return false, err
}
}
return true, nil
}
func createSelfHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
pod, err := kubernetesProvider.BuildHubPod(opts)
if err != nil {
return err
}
if _, err = kubernetesProvider.CreatePod(ctx, opts.Namespace, pod); err != nil {
return err
}
log.Info().Str("pod", pod.Name).Msg("Successfully created a pod.")
return nil
}
func createFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
pod, err := kubernetesProvider.BuildFrontPod(opts, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
if err != nil {
return err
}
if _, err = kubernetesProvider.CreatePod(ctx, opts.Namespace, pod); err != nil {
return err
}
log.Info().Str("pod", pod.Name).Msg("Successfully created a pod.")
return nil
}

View File

@@ -16,8 +16,15 @@ func Get(url string, client *http.Client) (*http.Response, error) {
// Post - When err is nil, resp always contains a non-nil resp.Body.
// Caller should close resp.Body when done reading from it.
func Post(url, contentType string, body io.Reader, client *http.Client) (*http.Response, error) {
return checkError(client.Post(url, contentType, body))
func Post(url, contentType string, body io.Reader, client *http.Client, licenseKey string) (*http.Response, error) {
req, err := http.NewRequest(http.MethodPost, url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("License-Key", licenseKey)
return checkError(client.Do(req))
}
// Do - When err is nil, resp always contains a non-nil resp.Body.

View File

@@ -2,7 +2,6 @@ package utils
import (
"bytes"
"encoding/json"
"gopkg.in/yaml.v3"
)
@@ -12,22 +11,10 @@ const (
tab = "\t"
)
func PrettyJson(data interface{}) (string, error) {
buffer := new(bytes.Buffer)
encoder := json.NewEncoder(buffer)
encoder.SetIndent(empty, tab)
err := encoder.Encode(data)
if err != nil {
return empty, err
}
return buffer.String(), nil
}
func PrettyYaml(data interface{}) (string, error) {
buffer := new(bytes.Buffer)
encoder := yaml.NewEncoder(buffer)
encoder.SetIndent(0)
encoder.SetIndent(2)
err := encoder.Encode(data)
if err != nil {

View File

@@ -1,7 +0,0 @@
package utils
import "github.com/docker/go-units"
func HumanReadableToBytes(humanReadableSize string) (int64, error) {
return units.FromHumanSize(humanReadableSize)
}