Compare commits

..

381 Commits

Author SHA1 Message Date
M. Mert Yildiran
0b58558f70 🔖 Bump the Helm chart version to 52.2.0 2024-03-27 21:50:27 +03:00
Alon Girmonsky
cdd306b890 Update RELEASE.md.TEMPLATE 2024-03-26 15:21:41 -07:00
M. Mert Yildiran
3cc9ff8616 🔖 Bump the Helm chart version to 52.1.77 2024-03-19 18:55:27 +03:00
Serhii Ponomarenko
247498492a Set custom timezone (#1517)
* 🔨 Add timezone config

* 🔨 Update `complete.yaml`

* 📝 Document `timezone` config

* 📝 Update `timezone` config docs

* 📝 Update `timezone` config docs

* 🔥 Remove unused `TIMEZONE` field from `ConfigMap`

* 🦺 Handle empty `tap.timezone` case

* 🔨 Move `timezone` from `.Values.tap` to `.Values`

* 🔨 Add `timezone` field to helm values

* 🔨 Update `complete.yaml`

* 📝 Update `timezone` config docs

* 🔨 Add `TIMEZONE` field to `ConfigMap`

---------

Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-03-19 12:06:50 +01:00
Volodymyr Stoiko
867c7058a0 👷 Remove kubeshark tap upgrades (#1519) 2024-03-18 17:32:56 +03:00
M. Mert Yildiran
f1021f61b6 👷 Change the Homebrew job's name 2024-03-15 21:16:14 +03:00
M. Mert Yildiran
9162c4fb64 🔖 Bump the Helm chart version to 52.1.75 2024-03-15 20:39:39 +03:00
Serhii Ponomarenko
e7fc7b791a 🐛 Fix front nginx and network policies ports (#1518)
* 🐛 Use `8080` listen port for front nginx config

* 🐛 Use `8080` ingress port for front/hub network policies
2024-03-14 15:18:24 -07:00
Volodymyr Stoiko
9914183d7d Move brew release into separate job (#1516) 2024-03-11 04:58:22 -07:00
Volodymyr Stoiko
c0751ad4cb Switch to lower ports (#1514)
Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-03-08 21:02:05 -08:00
Serhii Ponomarenko
0aca81fbcb 🔨 Disable scripting, targeted pods update & recording via ConfigMap keys (#1515)
* 🔨 Add `SCRIPTING_DISABLED` key to `ConfigMap`

* 🔨 Add `TARGETED_PODS_UPDATE_DISABLED` config

* 🔨 Add `RECORDING_DISABLED` key to `ConfigMap`

* 🎨 Reformat `TapConfig`

* 🔨 Update `complete.yaml`
2024-03-08 20:49:07 -08:00
Shunsuke Suzuki
24dccab3e4 fix: fix the asset name of the checksum file for windows/amd64 (#1509)
Pre-built binaries and checksum files are released at GitHub Releases.

https://github.com/kubeshark/kubeshark/releases

But checksum files for windows/amd64 have the following issues.

kubeshark.exe
kubeshark_windows_amd64.sha256

- The executable file name and the checksum file name don't conform to the naming convention
- We can't verify the pre-built binaries with checksum files because the pre-built binary name is different from the actual binary name

```console
$ cat kubeshark_windows_amd64.sha256
ea8fffa952bc8047f493469d024887ed80f966c0d74cf5fb039ea12f71174629  kubeshark_windows_amd64
```

```console
$ sha256sum -c kubeshark_windows_amd64.sha256
sha256sum: kubeshark_windows_amd64: No such file or directory
kubeshark_windows_amd64: FAILED open or read
sha256sum: WARNING: 1 listed file could not be read
```

The cause of these issues is pre-built binaries were renamed after checksum files were generated.

b125860d06/Makefile (L41)
b125860d06/Makefile (L61)

This commit resolves the issue by generating the checksum file after renaming the pre-built binary.

Co-authored-by: Volodymyr Stoiko <me@volodymyrstoiko.com>
2024-03-08 19:32:17 +03:00
Volodymyr Stoiko
db607aff16 Add network policies for kubeshark components (#1513)
* Add explicit network policies for kubeshark components

* allow exact ports

---------

Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-03-07 06:37:13 -08:00
Volodymyr Stoiko
ec1728ef91 Add kubeshark fork to use for homebrew release (#1512) 2024-03-06 11:02:08 +01:00
M. Mert Yildiran
93de6e8934 🔖 Bump the Helm chart version to 52.1.66 2024-03-06 00:12:02 +03:00
Alon Girmonsky
5998d00e6a Update README.md 2024-03-03 20:45:44 +02:00
Volodymyr Stoiko
afafb2c625 Add homebrew core version update release step (#1511) 2024-02-29 23:32:52 +02:00
M. Mert Yildiran
b125860d06 💚 Set prerelease to false 2024-02-29 01:53:32 +03:00
M. Mert Yildiran
68aabf262f 🔖 Bump the Helm chart version to 52.1.63 2024-02-29 01:45:41 +03:00
M. Mert Yildiran
d279b7272d 💚 Change ssh-key field to token 2024-02-29 01:45:11 +03:00
M. Mert Yildiran
d15e1cca54 🔖 Bump the Helm chart version to 52.1.62 2024-02-29 01:33:28 +03:00
M. Mert Yildiran
d8761e1e31 💚 Fix the secret name for Homebrew repo 2024-02-29 01:32:57 +03:00
M. Mert Yildiran
a9d2cb5ac2 🔖 Bump the Helm chart version to 52.1.61 2024-02-28 23:43:04 +03:00
M. Mert Yildiran
ddcf973e35 Revert "🔖 Bump the Helm chart version to 52.1.61"
This reverts commit b6d1804326.
2024-02-28 23:42:08 +03:00
M. Mert Yildiran
b6d1804326 🔖 Bump the Helm chart version to 52.1.61 2024-02-28 23:39:06 +03:00
Volodymyr Stoiko
6dc12af55b Add namespace prefix to cluster scope resources (#1506)
Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-02-28 12:14:03 -08:00
Volodymyr Stoiko
d78b0b987a Remove brew version before installing with script (#1503)
Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-02-28 11:48:43 -08:00
iluxa
9889787833 update comment for IPC_LOCK (#1507) 2024-02-27 11:52:07 -08:00
M. Mert Yildiran
8fe0544175 🔨 Remove CHECKPOINT_RESTORE capability from defaults 2024-02-26 21:40:14 +03:00
Volodymyr Stoiko
09afa1983a Add build-brew target for makefile (#1504) 2024-02-26 09:38:01 -08:00
Alon Girmonsky
669b5cb1f2 Update README.md 2024-02-25 13:55:08 -08:00
Volodymyr Stoiko
25e0949761 Template homebrew formulae (#1502) 2024-02-24 15:06:15 -08:00
Alon Girmonsky
fa07f973c0 Moving the installation script to the project's repo 2024-02-21 15:47:25 -08:00
M. Mert Yildiran
c38bdcd977 🔖 Bump the Helm chart version to 52.1.50 2024-02-20 21:25:10 +03:00
M. Mert Yildiran
51a4165304 🔧 Update the generate-helm-values Makefile rule 2024-02-15 19:54:40 +03:00
M. Mert Yildiran
c8cd1f57c4 🔖 Bump the Helm chart version to 52.1.45 2024-02-15 19:35:01 +03:00
M. Mert Yildiran
dfde87140a 🔧 Update the release Makefile rule 2024-02-15 19:34:09 +03:00
M. Mert Yildiran
64b6368e63 🔨 Update complete.yaml 2024-02-15 19:25:22 +03:00
Alon Girmonsky
6af2d11878 removed cloud URL from config map (#1499)
1. removed cloud URL from config map
2. added to hub's and worker's deployments
2024-02-14 13:06:24 -08:00
M. Mert Yildiran
2b552b5847 🔨 Update complete.yaml 2024-02-08 13:18:11 +03:00
Alon Girmonsky
72ec983b24 updated the top banner 2024-02-07 17:18:11 -08:00
M. Mert Yildiran
2f899a943c 🔖 Bump the Helm chart version to 52.1.30 2024-02-07 22:43:22 +03:00
M. Mert Yildiran
12f6b04a49 🔨 Update complete.yaml 2024-02-07 22:22:48 +03:00
Alon Girmonsky
f010f349a1 unixsocket for tracer (#1497)
- Added `-unixsocket` by default
- In DEBUG mode, added `-dumptracer 100000000`
2024-02-07 09:50:58 -08:00
iluxa
26e23dc94f add capability for tracer (#1496)
Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-02-06 13:38:21 -08:00
Serhii Ponomarenko
6785f024e4 Feature-based SAML authorization (#49) (#1495)
* 🔨 Add `showAdminConsoleLink` to helm values

* 🔨 Add `ShowAdminConsoleLink` to `TapConfig`

* 🔨 Regenerate `complete.yaml` manifest

* 📝 Update helm-chart `README.md`

---------

Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-02-06 13:36:32 -08:00
M. Mert Yildiran
92dab2e2f7 🔨 Add PcapErrorTTL field to MiscConfig 2024-02-06 01:32:07 +03:00
M. Mert Yildiran
4da51c40b9 🔨 Add kube_prometheus_stack.yaml manifest 2024-02-06 01:28:15 +03:00
Serhii Ponomarenko
18d051af28 🔥 Remove old Descope auth (#1490)
* 🔥 Remove Descope-related config updates

* 🔥 Remove Descope-related helm values

* 🔥 Remove Descope-related k8s configs

* 🔥 Remove Descope-related fields from `tapConfig`

---------

Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-01-31 14:49:55 -08:00
M. Mert Yildiran
cef012d1f3 🐛 Fix the ConfigMap keys of JsonTTL and PcapTTL fields 2024-01-31 16:57:11 +03:00
M. Mert Yildiran
4802cca646 Add MiscConfig struct with has JsonTTL and PcapTTL fields 2024-01-30 02:25:04 +03:00
Alon Girmonsky
4117d008a9 Update README.md 2024-01-28 11:06:18 -08:00
Alon Girmonsky
91e3546196 added a link to the dashboard 2024-01-26 15:38:40 -08:00
Alon Girmonsky
4db2a80675 Add API cloud endpoint env var to hub deployment (#1489)
* Add API cloud endpoint env var to hub deployment

* Added an env var for api cloud endpoint
2024-01-26 00:24:38 -08:00
Serhii Ponomarenko
bfa3efd23a SAML authorization (#1487)
* 🔨 Add `AUTH_SAML_ROLE_ATTRIBUTE` field to `ConfigMap`

* 📝 Document `tap.auth.saml.roleAttribute/roles` values

* 🔧 Re-generate `complete.yaml`

* 🔥 Remove `default` tag from `SamlConfig.RoleAttribute`

---------

Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-01-24 16:05:37 -08:00
M. Mert Yildiran
c48187a02e 🔖 Bump the Helm chart version to 52.1.9 2024-01-24 22:29:38 +03:00
Alon Girmonsky
f6d7510a14 fix the env variable / helm issue (#1486)
* fix the env variable / helm issue

Empty environment variables can not be read by front.

* change env variable to avoid an empty string
2024-01-23 21:30:24 -08:00
M. Mert Yildiran
f9e0c36d5f 🔨 Add AUTH_SAML_ROLES field to ConfigMap 2024-01-23 23:22:06 +03:00
Serhii Ponomarenko
a8dd332ff8 SAML integration prototype (#1475)
* 🔨 Add `AUTH_TYPE` field to `ConfigMap`

* 🔨 Add `AUTH_SAML_IDP_METADATA_URL` field to `ConfigMap`

* 🔨 Add `AUTH_SAML_X509_CRT` field to `Secret`

* 🔨 Add `AUTH_SAML_X509_KEY` field to `Secret`

* 🔨  Mount SAML X.509 key pair into `hub`

* 🔨 Add `REACT_APP_AUTH_TYPE` environment variable to `front`

* 🔧 Add Nginx path rewrite for `/saml`

* 🔧 Raise request size to accept big SAML responses

* 🔨 Add `REACT_APP_AUTH_TYPE` environment default value

* 📝 Update `README.md`

* 📝 Update `README.md`

* 🔨 Add `AUTH_TYPE` config map key

* 🔨 Add `AUTH_SAML_IDP_METADATA_URL` config map key

* ☸ Set `CONFIG_AUTH_TYPE` from `TapConfig`

* ☸ Set `CONFIG_AUTH_SAML_IDP_METADATA_URL` from `TapConfig`

*  Create `SamlConfig` in `TapConfig.AuthConfig`

* 🔨 Use updated `tap.auth.saml.idpMetadataUrl` tap config field

* 📝 Update `README.md`

* 🔨 Add `tap.insgress.enabled/host` to `ConfigMap`

* 🔨 Add `tap.proxy.front.port` to `ConfigMap`

* 🔨 Add `REACT_APP_AUTH_SAML_IDP_METADATA_URL` env to `front`

* 🔧 Supply `auth.saml` fields to `helm-chart/values.yaml`

* 🐛 Fix indentation for X.509 secrets

* 📝 Provide SAML setup docs

* 📝 Update SAML setup docs

* 📝 Update SAML setup docs

* Added callback URL indication

* 💥 Disable standard `Descope` auth

---------

Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-01-23 10:47:29 -08:00
M. Mert Yildiran
8e5df14f49 🔨 Run make generate-manifests 2024-01-23 20:54:58 +03:00
M. Mert Yildiran
6307871584 🔨 Add patch verb to kubeshark-self-config-role 2024-01-23 20:52:04 +03:00
Alon Girmonsky
7e77a76334 added custom release name support (-s) to the CLI's pro command (#1483) 2024-01-20 15:28:06 -08:00
Alon Girmonsky
f2b7df7e02 Global Filter, escaping doublequotes in strings (#1484)
* Global filter quote change

Global filter uses a single quote as opposed to double quote. This limits the use of `'` inside the string as it can not be escaped. When using double quote ("), single quote can be used and double quote can be escaped as part of a string. An example for a Global Filter string: "redact(\"request.headers.Authorization\", \"request.headers['X-Aws-Ec2-Metadata-Token']\")"

* support escaping double quotes in  the global filter string
2024-01-19 16:51:33 -08:00
M. Mert Yildiran
b0af52ba9c 🔖 Bump the Helm chart version to 52.1.0 2024-01-18 02:22:20 +03:00
M. Mert Yildiran
ddc1dc3d71 🔨 Add TcpStreamChannelTimeoutMs field to TapConfig struct 2024-01-15 23:00:31 +03:00
M. Mert Yildiran
d99bfea0db 🔨 Rename worker resource requirement to sniffer 2024-01-15 21:14:06 +03:00
Volodymyr Stoiko
bed9d06c59 Pass kernel-module flag only if pf_ring enabled (#1480) 2024-01-14 14:39:32 -08:00
Volodymyr Stoiko
aaeb3ca1eb Load pf-ring kernel module in init container (#1476)
* Load kernel module in init container

* Update docs

* Update formatting

* Add pre-stop hook to unload pf_ring module

* Enable hook only on kernel module enabled

* fix template

* Use sidecontainer to unload pf_ring

* Add requirements for tracer into structs

* fix values

* fix typo

---------

Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-01-12 15:49:39 -08:00
Alon Girmonsky
7df35e04a8 Update README.md
Changed `tap.tls` and `tap.serviceMesh` defaults to `true` following this commit: 8ba3e603a4
2024-01-12 09:36:34 -08:00
tgaliotto
a5be1a8eaa add request and limits for tracer container (#1459)
Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2024-01-12 09:30:50 -08:00
M. Mert Yildiran
8ba3e603a4 Add trafficSampleRate field to TapConfig 2024-01-10 18:51:52 +03:00
Volodymyr Stoiko
db51e6dbc2 Add kubeshark-worker-metrics service and document it (#1474)
* Expose worker metrics

* Add metrics documentation

* upd

* Update metrics port configuration

* Update config/configStructs/tapConfig.go

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/templates/16-worker-service-metrics.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

---------

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2024-01-04 16:17:22 +03:00
Alon Girmonsky
77878e97f5 Tracer, ServiceMesh - Disable by default and some docs updates (#1472)
* Disabled Tracer by default

As Tracer requires significantly more resources and elevated security capability, it is recommended to have it disabled by default and enabled on demand.

* Updated the tap.tls default value to false

* added description to the default and global KFL filters

* serviceMesh false by default

As serviceMesh requires elevated security permissions.
Furthermore this capability is required only in a fraction of the userbase. Some service mesh versions/configurations aren't supported. Therefore, it is recommended to start as disabled and enable on-demand

* Update the readme related to the service mesh default value

Set the default value of serviceMesh to false as among other things, it requires elevated security permissions and therefore should be enabled on demand.
2023-12-30 18:47:26 -08:00
M. Mert Yildiran
36767eda27 🔨 Add KernelModuleConfig struct to TapConfig 2023-12-28 22:09:01 +03:00
Volodymyr Stoiko
6c01078f97 Add PF_RING related changes to docs and helm (#1471)
* Install pf-ring KMM Module and wait for it

* Add mode configuration

* save

* Update doc

* upd

* toc

* adjust template

* upd

* Add module cr verification job

* upd doc

* Fix binary name

* Add disable mode

* Update PF_RING.md

Some adjustments to the instructions.

* Update 15-pf-ring-kernel-module.yaml

Small syntax err

* upd

* merge master

---------

Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2023-12-27 19:01:20 -08:00
M. Mert Yildiran
6c06307d68 🔨 Add GLOBAL_FILTER field to ConfigMap 2023-12-27 23:58:17 +03:00
M. Mert Yildiran
2223cad038 🔨 Add REACT_APP_REPLAY_DISABLED environment variable to front 2023-12-27 22:30:17 +03:00
M. Mert Yildiran
c1fc4447ef 🔨 Move the list of Linux capabilities into values.yaml 2023-12-27 13:14:53 +03:00
M. Mert Yildiran
ea3eecfa04 🔨 Move SCRIPTING_ENV from ConfigMap to Secret 2023-12-25 20:55:26 +03:00
M. Mert Yildiran
51968f2aae 🔨 Add REPLAY_DISABLED field to ConfigMap 2023-12-25 17:34:38 +03:00
Alon Girmonsky
15f7a3559a Update README.md
UPdated the banner
2023-12-20 12:59:48 +02:00
M. Mert Yildiran
cc9627c884 🔖 Bump the Helm chart version to 52.0.0 2023-12-19 20:20:43 +03:00
M. Mert Yildiran
d3f2cdbf0e Add DefaultFilter field to TapConfig 2023-12-18 16:51:55 +03:00
M. Mert Yildiran
28bfbf4186 🐛 Fix the type of EfsFileSytemIdAndPath field 2023-12-18 16:51:21 +03:00
Serhiy Berezin
d3c21a07bb EFS persistent volume helm deployment support (#1455)
* EFS persistent volume

docs/14

EFS static and dynamic provision added to default

* Update helm-chart/values.yaml

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/templates/08-persistent-volume-claim.yaml

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update config/configStructs/tapConfig.go

Fix format

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Fix format config/configStructs/tapConfig.go

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Improve formatting

---------

Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>
2023-12-11 10:52:58 -08:00
M. Mert Yildiran
510d5e5ed8 🔥 Remove REACT_APP_HUB_HOST and REACT_APP_HUB_PORT environment variables 2023-12-07 22:10:11 +03:00
Alon Girmonsky
1070d17e20 Update README.md
fixing syntax err
2023-12-05 00:17:36 -08:00
M. Mert Yildiran
6b8beb50ad 🔨 Update the capabilities 2023-12-04 23:31:15 +03:00
M. Mert Yildiran
68877b254b 🔨 Run make generate-helm-values && make generate-manifests 2023-12-04 22:50:19 +03:00
M. Mert Yildiran
dd91087157 Add comments to explain the required Linux capabilities 2023-12-04 22:49:31 +03:00
M. Mert Yildiran
cf3ce0180b 🔨 Remove the unnecessary Linux capabilities 2023-12-04 22:39:21 +03:00
M. Mert Yildiran
b4dc321829 🔖 Bump the Helm chart version to 51.0.39 2023-11-22 02:03:56 +03:00
M. Mert Yildiran
7e893a5b52 🔖 Bump the Helm chart version to 51.0.38 2023-11-22 01:03:27 +03:00
M. Mert Yildiran
33dabe8bbf 🔖 Bump the Helm chart version to 51.0.37 2023-11-21 23:20:16 +03:00
M. Mert Yildiran
ddf354f34e 🔧 Update port-forward-worker Makefile rule 2023-11-21 20:24:57 +03:00
M. Mert Yildiran
88f8998df3 🔨 Update the worker pod and run make generate-helm-values && make generate-manifests 2023-11-21 20:24:14 +03:00
Chin K
fc0f6a8452 🔨 Add SecurityContextConstraints for OpenShift (#1451)
* Added OS route

* Openshift Port-changes

* custom-scc

* custom-scc name update

* Revert "custom-scc name update"

This reverts commit 7e6d96c086.

* Added pre-install hook

* default port

* worker port update

* Update helm-chart/templates/14-kubeshark-scc.yaml

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* SCC only for openshift - capability added

---------

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-11-21 20:01:34 +03:00
M. Mert Yildiran
cc9dbbef2e 🔥 Remove tapPcapRunner.go and --pcap flag 2023-11-21 07:17:43 +03:00
M. Mert Yildiran
696fed8345 🔊 Log the pod name in SPDYExecutor 2023-11-18 03:02:44 +03:00
M. Mert Yildiran
c03de2222d Add kube:// prefixed URI support (#1454)
*  Add `kube://` prefixed URI support

* 🐛 Fix the `a container name must be specified for pod <POD_NAME>, choose one of: [sniffer tracer]` error

* 🐛 Fix all of the issues in `kube://` prefixed URI support

* 🐛 Fix the `invalid reference format` error

* 🐛 Fix the `kubeUrl`
2023-11-18 02:56:07 +03:00
M. Mert Yildiran
a028211f0a 🔖 Bump the Helm chart version to 51.0.27 2023-11-16 21:31:23 +03:00
M. Mert Yildiran
c94a399bc3 🔖 Bump the Helm chart version to 2023-11-16 21:28:05 +03:00
M. Mert Yildiran
788bcd4846 🔧 Add release Makefile rule 2023-11-16 21:27:21 +03:00
M. Mert Yildiran
e2ef9eff05 🐛 Add CHECKPOINT_RESTORE Linux capability for kernel versions above 5.9 2023-11-04 00:06:27 +03:00
M. Mert Yildiran
7ce18ecaa9 🐛 Fix the POST script request 2023-11-03 01:09:22 +03:00
M. Mert Yildiran
7737bdf4fc 🐛 Start watching scripts after the hub and front are ready 2023-11-03 01:05:03 +03:00
M. Mert Yildiran
bed59e12ea 🔥 Delete the non-existing field references in _helpers.tpl file 2023-11-02 18:53:17 +03:00
M. Mert Yildiran
19723debb2 🐛 Fix GetHubUrl method 2023-11-01 20:35:19 +03:00
M. Mert Yildiran
d82df9d670 🔖 Bump the Helm chart version to 51.0.18 2023-10-31 01:13:11 +03:00
M. Mert Yildiran
d295cecfc2 🔨 Add REACT_APP_AUTH_ENABLED environment variable to kubeshark-front 2023-10-28 00:15:00 +03:00
M. Mert Yildiran
9c291bbf47 🔨 Disable auth by default 2023-10-27 22:06:07 +03:00
M. Mert Yildiran
cb0e89934d 🔨 Fix the AUTH_APPROVED_TENANTS config map key 2023-10-27 22:04:42 +03:00
M. Mert Yildiran
820fb64f8d 🔥 Delete envFrom fields from the Helm templates 2023-10-27 00:31:24 +03:00
M. Mert Yildiran
62d4c3a86e 🔨 Add ApprovedTenants field to AuthConfig and enable auth by default 2023-10-26 20:27:34 +03:00
M. Mert Yildiran
2757b7419f 🔨 Run make generate-manifests 2023-10-26 16:28:18 +03:00
M. Mert Yildiran
3b5cd6c77b 🔨 Make the config field tags camelCase 2023-10-25 18:00:32 +03:00
M. Mert Yildiran
7e56d45c6b 🔖 Bump the Helm chart version to 51.0.14 2023-10-25 03:02:16 +03:00
M. Mert Yildiran
0e2bca9729 Revert "🔨 Decrease the default storage limit back to 200Mi"
This reverts commit b1a40df069.
2023-10-24 03:10:23 +03:00
M. Mert Yildiran
b1a40df069 🔨 Decrease the default storage limit back to 200Mi 2023-10-24 03:06:02 +03:00
M. Mert Yildiran
773cf371f3 🩹 Exit if couldn't set the secret and log server init error instead of panic 2023-10-20 20:55:16 +03:00
Alon Girmonsky
1527f43396 Update README.md
updated the docker pull link
2023-10-19 14:20:34 -07:00
Alon Girmonsky
c9a2b9eb44 Update README.md
updated the announcement text
2023-10-19 14:18:08 -07:00
Alon Girmonsky
2b92bb74c7 📝 Update README.md (#1439)
* Update README.md

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update README.md

fixed storage limit and change a title auth+eks

---------

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-10-17 01:42:29 +03:00
M. Mert Yildiran
fb08481909 🔥 Delete the unused docker package 2023-10-16 23:41:33 +03:00
M. Mert Yildiran
e215870b9d 🔨 Add v prefix to image tags 2023-10-16 23:30:12 +03:00
M. Mert Yildiran
426c6450ba 👷 Fix the CI trigger criteria 2023-10-16 23:25:02 +03:00
M. Mert Yildiran
a3383ee6cc 🔨 Template the Helm chart versions into Docker tags 2023-10-16 23:19:44 +03:00
Sergio Fernández
bdff836040 📝 Document the fields of values.yaml in Helm chart's README.md (#1437)
* Add Configuration parameters section

* Change proxy definition

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

* Update helm-chart/README.md

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>

---------

Co-authored-by: M. Mert Yildiran <mehmetmertyildiran@gmail.com>
2023-10-13 16:18:35 +03:00
M. Mert Yildiran
9f8ecc8e4e 🔨 Use if for commands 2023-10-13 02:09:59 +03:00
M. Mert Yildiran
9cdf1aa68b 🔥 Delete enabling profiler of the worker 2023-10-13 01:58:31 +03:00
M. Mert Yildiran
78481d4bcc 🔥 Delete unused packetcapture field 2023-10-12 18:31:59 +03:00
M. Mert Yildiran
706a2fc9b5 🔨 Template the -servicemesh flag 2023-10-12 18:28:42 +03:00
Alon Girmonsky
1064305934 Update README.md
fixed helm command
2023-10-11 17:09:17 -07:00
M. Mert Yildiran
9f1586ab50 🔨 Increase the default storage limit to 500Mi 2023-10-11 20:57:42 +03:00
M. Mert Yildiran
6d79598c5d 🔨 Template -no-kernel-module flag 2023-10-09 21:17:34 +03:00
M. Mert Yildiran
22bdbda718 🔨 Add NET_RAW and NET_ADMIN capabilities to tracer container 2023-10-09 19:56:29 +03:00
M. Mert Yildiran
23e2493890 🔨 Fix the issues in worker DaemonSet 2023-10-04 06:34:34 +03:00
M. Mert Yildiran
a7905bc1ba Revert "🔨 Add server container to worker DaemonSet"
This reverts commit 48adf86b25.
2023-10-04 06:02:24 +03:00
M. Mert Yildiran
4831b44dfa Revert "🔨 Separate the resources of each container in worker DaemonSet"
This reverts commit 6add6fb1ec.
2023-10-04 06:02:19 +03:00
M. Mert Yildiran
6817fd70ab Revert "🔨 Lower the resource limits"
This reverts commit d0b621070c.
2023-10-04 06:02:12 +03:00
M. Mert Yildiran
3803bad6a4 🔨 Run make generate-manifests 2023-09-28 20:40:56 +03:00
M. Mert Yildiran
d0b621070c 🔨 Lower the resource limits 2023-09-28 20:39:57 +03:00
M. Mert Yildiran
6add6fb1ec 🔨 Separate the resources of each container in worker DaemonSet 2023-09-28 01:36:56 +03:00
M. Mert Yildiran
e1106e25c4 Use emptyDir as the default type of data volume 2023-09-27 01:57:15 +03:00
M. Mert Yildiran
48adf86b25 🔨 Add server container to worker DaemonSet 2023-09-27 00:20:46 +03:00
M. Mert Yildiran
2ea5dc0df0 🔨 Remove SYS_MODULE from the capabilities of tracer 2023-09-26 21:45:30 +03:00
M. Mert Yildiran
bb0172b151 📝 Update the README.md(s) 2023-09-25 23:26:06 +03:00
M. Mert Yildiran
ef7c80df05 📝 Update Helm notes 2023-09-25 23:24:37 +03:00
M. Mert Yildiran
5bd44b57f4 In case of tap re-run, update the config and start a proxy 2023-09-25 23:21:38 +03:00
M. Mert Yildiran
41dacbff1a 🔨 Add tracer as a separate container to worker DaemonSet (#1428)
* 🔨 Add `tracer` as a separate container to worker `DaemonSet`

* 🔥 Delete some of the unused connector methods

* 🔨 Set `POD_NAME` and `POD_NAMESPACE` environment variables in worker `DeamonSet`

* 🔨 Set `POD_NAME` and `POD_NAMESPACE` environment variables in hub `Deployment`

* Fix the labels

* Fix the self config role

* Restrict it to specific resource names

* Run `make generate-manifests`
2023-09-24 04:23:32 +03:00
M. Mert Yildiran
d94ce4dce3 🔖 Bump the Helm chart version to 50.4 2023-09-21 21:56:09 +03:00
M. Mert Yildiran
65ab0ca668 🐛 Don't use encoding/json in config command 2023-09-21 21:52:43 +03:00
M. Mert Yildiran
9bc3ea5ffc 🐛 Generate truly the default config with config -r 2023-09-21 21:43:00 +03:00
M. Mert Yildiran
2d17d1a83d Replace gopkg.in/yaml.v3 with github.com/goccy/go-yaml 2023-09-21 21:36:08 +03:00
M. Mert Yildiran
78c89cc5b4 🔖 Bump the Helm chart version to 50.3 2023-09-17 00:09:37 +03:00
M. Mert Yildiran
b5c9a31380 🔧 Run make generate-manifests 2023-09-16 23:52:53 +03:00
Luiz Oliveira
3dfff2b7a5 ♻️ Turn the Ingress path rewrite for Hub into an Nginx location directive (#1426)
* fixes websocket for nginx-ingress

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* update messagem when helm completes

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* force react port to be a path

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* include Authorization header to the proxy

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* remove hub from proxy

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* remove REACT_APP_HUB_PORT info

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* include path back again to REACT_APP_HUB_PORT

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

---------

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
2023-09-15 21:43:34 +03:00
M. Mert Yildiran
583a5b97ee 🔧 Re-order the template filenames and re-generate values.yaml and complete.yaml 2023-09-04 02:25:33 +03:00
Luiz Oliveira
64aae06fe5 🛂 Add a new Role and RoleBinding resources to have write access for our own Secret resource (#1416)
* include role and rolebinding to write secrets

With this, the kubeshark service-account have rights to
update the value of the secrets of the same namespace
where kubeshark was deployed. This was necessary to keep
the value of the license updated

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* Update helm-chart/templates/02-cluster-role.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/templates/03-cluster-role-binding.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/templates/03-cluster-role-binding.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/templates/03-cluster-role-binding.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/templates/02-cluster-role.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

---------

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-09-04 02:20:26 +03:00
Luiz Oliveira
1ccaa03fb2 🏗️ Give the user ability to set ingress as needed (#1417)
* Give the user hability to set ingress as needed

- Removed unecessary IngressClass.
- If no IngressClassName passed, use cluster's default class
- Renamed `ingressclass` with `IngressClassName`. Is the standard name
    used for it.
- Included custom annotations for Ingress. This way user can set any
    custom annotation for the ingress only.

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* Update helm-chart/templates/11-ingress.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update config/configStructs/tapConfig.go

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/templates/11-ingress.yaml

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* update default ingressClassName value

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

---------

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-09-04 02:18:43 +03:00
M. Mert Yildiran
3222212367 🔧 Update complete.yaml 2023-09-01 04:09:57 +03:00
M. Mert Yildiran
c5681871e4 🔖 Bump the Helm chart version to 50.2 2023-09-01 03:22:56 +03:00
M. Mert Yildiran
1ac3ba0a6d 🔧 Add a notice about telemetry into NOTES.txt of the Helm chart 2023-08-31 18:55:58 +03:00
M. Mert Yildiran
d3520765eb 🔥 Delete .dockerignore file 2023-08-31 06:16:52 +03:00
M. Mert Yildiran
fa1e7bcf01 🔧 Add TelemetryConfig struct and --telemetry-enabled flag to tap command 2023-08-31 03:50:14 +03:00
M. Mert Yildiran
bf182b6330 🐛 Template the -tls flag in worker DaemonSet 2023-08-29 03:51:08 +03:00
M. Mert Yildiran
f59f84af02 Add export command to download PCAP export 2023-08-28 22:00:36 +03:00
M. Mert Yildiran
cae5a92a13 🔖 Bump the Helm chart version to 50.1 2023-08-25 22:22:36 +03:00
M. Mert Yildiran
7afb1d8b9b Set the probing port of Hub back to 80 2023-08-24 23:51:47 +03:00
M. Mert Yildiran
f628192216 🚑 Add initialDelaySeconds to readiness and liveness probes of worker DaemonSet 2023-08-24 22:05:26 +03:00
M. Mert Yildiran
b1feb4e33f 🔧 Add port-forward-worker Makefile rule 2023-08-23 23:55:33 +03:00
M. Mert Yildiran
94dff24aed 🔥 Delete Chart.lock file 2023-08-23 02:02:29 +03:00
M. Mert Yildiran
d00d2eafa7 🔖 Bump the Helm chart version to 50.0 2023-08-22 23:25:48 +03:00
M. Mert Yildiran
63eb39b451 🚑 Fix the pod regex in the watch function for the recent changes related to pod names 2023-08-22 23:24:40 +03:00
M. Mert Yildiran
149a8b7efe 🔧 Remove the KMM related Makefile rules 2023-08-22 19:02:39 +03:00
M. Mert Yildiran
247fbc1291 🔥 Delete the module loader Dockerfile 2023-08-22 19:02:22 +03:00
M. Mert Yildiran
0e74238e56 🚀 Rename some of the recently added Kubernetes resources 2023-08-22 19:00:22 +03:00
M. Mert Yildiran
05ecef557f 🔧 Run make generate-manifests 2023-08-22 18:54:25 +03:00
Luiz Oliveira
63325ec890 🚀 Add readiness and liveness probes to worker DaemonSet (#1414)
Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
2023-08-19 20:32:49 +03:00
M. Mert Yildiran
579cb47ecf 🔥 networking.k8s.io from apiGroups and ingresses from resources in ClusterRole 2023-08-17 17:29:54 +03:00
M. Mert Yildiran
7ed4088b4b Load the environment variables from kubeshark-hub-secret in worker DaemonSet 2023-08-17 00:56:16 +03:00
Luiz Oliveira
f95db49317 🚀 Change Hub's and Front's resource type from Pod to Deployment (#1412)
* change services to ClusterIP and update selector labels

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* replace kind of hub and front to Deployments

Pod -> Deployments
hub config -> Uses a config-map
license -> Ises a secret

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* uses map of labels to select pods and services

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* remove ListAllNamespaces method

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* include livenessProbe and readinessProbe for deployments

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

---------

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
2023-08-16 02:35:31 +03:00
M. Mert Yildiran
749b19512e Bring back the app labels 2023-08-15 18:33:00 +03:00
M. Mert Yildiran
746eff1e23 🔥 Remove the dead code in kubernetes package 2023-08-15 17:46:23 +03:00
M. Mert Yildiran
b7a8d9a41a Fix the label order 2023-08-15 17:44:39 +03:00
Luiz Oliveira
995fb96f24 🎨 Rename worker labels to the same pattern just like the other resources (#1410)
* rename worker labels to the same pattern from others kubeshark components

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* update matchLabels from daemonsets

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

---------

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
2023-08-15 16:56:43 +03:00
M. Mert Yildiran
5d4557d1dd Add SYS_MODULE Linux capability to the worker DaemonSet 2023-08-14 17:49:14 +03:00
M. Mert Yildiran
78c1c02fe6 🔥 Delete the recently added KMM related resources 2023-08-14 17:43:44 +03:00
M. Mert Yildiran
742a56272b 👕 Fix the linter error 2023-08-12 03:36:01 +03:00
M. Mert Yildiran
b7b3603e57 Add cert-manager Helm dependency 2023-08-12 03:29:12 +03:00
M. Mert Yildiran
54c5da2fcb Add a default NodeSelectorTerm that's matching Linux OS 2023-08-12 03:28:33 +03:00
M. Mert Yildiran
a5efb6b625 Fix the indentation 2023-08-12 03:09:37 +03:00
M. Mert Yildiran
7dcb2d23a0 Use the nodeselectorterms from values.yaml in the kmm-operator-controller-manager deployment 2023-08-12 02:44:35 +03:00
M. Mert Yildiran
f4ff4d4dd6 Add KMMConfig struct to TapConfig 2023-08-12 02:41:29 +03:00
M. Mert Yildiran
dd5761f112 🎨 Add a new line character at the end of values.yaml 2023-08-12 02:38:25 +03:00
M. Mert Yildiran
854836056d 🔨 Rename kernel-module-management.yaml to 15-kernel-module-management.yaml 2023-08-12 02:37:29 +03:00
Luiz Oliveira
090368295c Include kernel module management operator (#1409)
Files generated from https://github.com/kubernetes-sigs/kernel-module-management/tree/main/config/default
using kubectl kustomize
included kubeshark labels and checking

Attention, KMM requires cert-manager.

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
2023-08-12 02:36:30 +03:00
M. Mert Yildiran
67038e324b 🔧 Add logs-kmm-loader Makefile rule 2023-08-11 21:49:46 +03:00
M. Mert Yildiran
a5fb7e0474 Add .Capabilities.APIVersions.Has "kmm.sigs.x-k8s.io/v1beta1"check to module loader related Helm templates 2023-08-11 21:49:01 +03:00
M. Mert Yildiran
1a0625d37c Change the key from Dockerfile to dockerfile in module loader ConfigMap 2023-08-11 17:15:12 +03:00
M. Mert Yildiran
7ec1f595a1 Change the selector in module loader 2023-08-11 00:20:47 +03:00
M. Mert Yildiran
3998485944 🔨 Rename 12-nginx-config.yaml to 12-nginx-config-map.yaml 2023-08-11 00:15:41 +03:00
M. Mert Yildiran
e5de984acd 🔧 Add ssh-node Makefile rule 2023-08-11 00:14:04 +03:00
M. Mert Yildiran
18d6345e80 🔧 Add logs-kmm Makefile rule 2023-08-11 00:06:17 +03:00
M. Mert Yildiran
661e17ace9 Add 14-module-loader-config-map.yaml and a Makefile rule that generates it 2023-08-11 00:03:37 +03:00
M. Mert Yildiran
cc78b291af 🐳 Bring in module-loader Dockerfile 2023-08-10 23:50:53 +03:00
Luiz Oliveira
7c8adee7a8 🔨 Add _helpers.tpl and NOTES.txt to Helm chart and refactor labels (#1406)
* include kubernetes default labels

Using _helpers.tpl to define those labels

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* include Notes with tips after the installs

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* create a standard service account name

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* Update helm-chart/templates/NOTES.txt

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* Update helm-chart/templates/NOTES.txt

Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>

* fixes ingress and nginx labels

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* fixes new label mapping from values

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

* update makefile to to use correct default namespace and release name to generate manifests

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>

---------

Signed-off-by: Luiz Oliveira <ziuloliveira@gmail.com>
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-08-10 22:39:17 +03:00
M. Mert Yildiran
461ad1921e Add 13-module-loader.yaml Helm template which should load pf_ring.ko kernel module using KMM 2023-08-10 15:51:37 +03:00
M. Mert Yildiran
5ca90d70ff Have consistent case style in values.yaml 2023-08-09 20:16:49 +03:00
M. Mert Yildiran
65bda4e844 Add the IPv6 field to TapConfig struct 2023-08-09 01:24:08 +03:00
M. Mert Yildiran
c533bcd38c Add AUTH_ENABLED and AUTH_APPROVED_EMAILS environment variables to Hub's template 2023-08-09 01:22:10 +03:00
M. Mert Yildiran
1d17f83931 ⬆️ Bump the Helm chart version 2023-08-07 20:03:11 +03:00
M. Mert Yildiran
b9c3704bae Remove apiVersion field 2023-08-07 20:01:59 +03:00
M. Mert Yildiran
08602c75e0 Run make generate-manifests 2023-08-07 20:00:06 +03:00
M. Mert Yildiran
46799f6665 Revert " Let the user system:anonymous access the services/proxy resource"
This reverts commit acaa29f8eb.
2023-08-07 19:59:16 +03:00
Adrian Wyssmann
250a878407 Allow to disable IPv6 for nginx ingress (#1392)
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-08-05 18:43:13 +03:00
M. Mert Yildiran
b32f5f9e12 🔥 Remove the unused constants in kubernetes package 2023-08-04 20:49:21 +03:00
M. Mert Yildiran
5325f94f2b 🐛 Fix the flag redefined: release-namespace error 2023-08-01 23:00:36 +03:00
M. Mert Yildiran
fc3bf69348 Add -s flag to set release namespace into console, proxy and scripts 2023-07-31 23:09:04 +03:00
M. Mert Yildiran
7f41c348e6 ⬆️ Bump the Helm chart version 2023-07-30 23:39:59 +03:00
M. Mert Yildiran
eb69ebf008 Run make generate-manifests 2023-07-30 23:36:20 +03:00
M. Mert Yildiran
9f889a7a36 🔧 Add Makefile rules to do Helm install using canary and dev tags without the debug mode enabled 2023-07-30 05:46:53 +03:00
M. Mert Yildiran
909cc8de15 Change default PROFILING_INTERVAL_SECONDS to 60 2023-07-30 04:57:26 +03:00
M. Mert Yildiran
a0313e9e5a 🔧 Fix the recently added Makefile rules 2023-07-30 04:29:05 +03:00
M. Mert Yildiran
3aed354ab8 🔧 Add Makefile rules do Helm install/uninstall 2023-07-30 04:27:34 +03:00
M. Mert Yildiran
7fe9ecbca4 🔧 Add Makefile rules to exec into pods 2023-07-30 04:24:27 +03:00
M. Mert Yildiran
9e6af8c0bc Enable profiling in the worker when debug is enabled 2023-07-30 04:23:53 +03:00
M. Mert Yildiran
2c8f2e903f 🔧 Add Makefile rules to see the pod logs 2023-07-30 03:13:11 +03:00
dependabot[bot]
ca451e08f6 ⬆️ Bump github.com/docker/distribution (#1399)
Bumps [github.com/docker/distribution](https://github.com/docker/distribution) from 2.8.1+incompatible to 2.8.2+incompatible.
- [Release notes](https://github.com/docker/distribution/releases)
- [Commits](https://github.com/docker/distribution/compare/v2.8.1...v2.8.2)

---
updated-dependencies:
- dependency-name: github.com/docker/distribution
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-30 02:57:43 +03:00
M. Mert Yildiran
45bfebc956 Add sidecar.istio.io/inject: "false" label to all pods 2023-07-30 02:50:30 +03:00
M. Mert Yildiran
acaa29f8eb Let the user system:anonymous access the services/proxy resource 2023-07-30 02:48:32 +03:00
M. Mert Yildiran
470ab3d7ed Run make generate-manifests 2023-07-17 20:15:55 +03:00
M. Mert Yildiran
a259361a96 🔥 Remove -packet-capture flag 2023-07-17 20:14:53 +03:00
Alon Girmonsky
0350bcdd61 Update README.md (#1387)
Announcing latest features.
2023-07-16 08:12:40 +03:00
M. Mert Yildiran
cddc7d25fd ⬆️ Upgrade github.com/gin-gonic/gin to v1.9.1 2023-07-14 21:42:33 +03:00
M. Mert Yildiran
635a9d3256 ⬆️ Bump the Helm chart version 2023-07-10 20:44:14 +03:00
M. Mert Yildiran
2224d0e9f4 🐛 Fix the -debug flag of the worker in the Helm template 2023-07-10 20:43:07 +03:00
M. Mert Yildiran
db01c4e9e3 Revert the tap.proxy.hub.srvport usage in the Hub template 2023-07-09 23:11:02 +03:00
M. Mert Yildiran
0659d0fead ⬆️ Bump the Helm chart version 2023-07-09 23:01:19 +03:00
M. Mert Yildiran
988bb16260 Use the tap.proxy.hub.port and tap.proxy.hub.srvport in the Helm templates 2023-07-09 22:58:02 +03:00
M. Mert Yildiran
b4e8573634 Add license command 2023-07-06 21:57:21 +03:00
M. Mert Yildiran
cfa12ea45e 🐛 FIx the websocket: bad handshake error in console command in case Ingress is enabled 2023-07-06 21:48:03 +03:00
M. Mert Yildiran
9a7c23f070 🔥 Remove nodeSelectorTerms from hub and front pods 2023-07-06 21:28:32 +03:00
M. Mert Yildiran
0f1f832ddd 🐛 Add the missing json struct tags to ResourcesConfig 2023-07-03 23:26:18 +03:00
M. Mert Yildiran
dfe5605032 Update complete.yaml 2023-07-03 18:50:36 +03:00
M. Mert Yildiran
4c2884c40f Add KUBESHARK_HELM_CHART_PATH environment variable to set a local path for the Helm chart 2023-07-03 17:15:47 +03:00
M. Mert Yildiran
4fb179f623 ⬆️ Bump the Helm chart version 2023-07-03 17:05:20 +03:00
M. Mert Yildiran
796fc1453c Fix the hub and worker commands 2023-07-03 16:47:00 +03:00
M. Mert Yildiran
0ef3e2d018 Fix the issues related to release namespace 2023-07-03 16:33:50 +03:00
M. Mert Yildiran
77a14410f4 Revert " Rename releasenamespace field to selfnamespace"
This reverts commit d8ee89225c.
2023-07-03 15:11:21 +03:00
M. Mert Yildiran
f269a61842 Revert "Revert "🐛 Fix the commands in case of -debug flag enabled""
This reverts commit 64b22daa2a.
2023-07-03 15:11:04 +03:00
M. Mert Yildiran
51eddd3ae4 Fix the -r flag behavior in config command 2023-07-03 13:14:30 +03:00
M. Mert Yildiran
64b22daa2a Revert "🐛 Fix the commands in case of -debug flag enabled"
This reverts commit 3a2d34647e.
2023-07-03 12:32:15 +03:00
M. Mert Yildiran
3a2d34647e 🐛 Fix the commands in case of -debug flag enabled 2023-07-03 12:08:57 +03:00
M. Mert Yildiran
d8ee89225c Rename releasenamespace field to selfnamespace 2023-07-03 11:54:06 +03:00
M. Mert Yildiran
f7ce141d0d Remove an unnecessary check 2023-07-03 11:45:00 +03:00
M. Mert Yildiran
3c25cec633 Regenerate the complete.yaml 2023-06-30 16:57:44 +03:00
M. Mert Yildiran
7b86d32174 Remove the hostPort field from the manifests 2023-06-30 16:57:26 +03:00
M. Mert Yildiran
aeda619104 Download files in parallel 2023-06-29 16:45:59 +03:00
M. Mert Yildiran
98738cb5a6 Use Prefix field of ListObjectsV2Input instead of strings.HasPrefix check 2023-06-29 16:06:52 +03:00
M. Mert Yildiran
bf3285cb8b 🐛 Fix the collision of the -d flag in tap with the root level debug flag 2023-06-29 02:49:01 +03:00
M. Mert Yildiran
5f9084e497 Make the config command print the current config instead of the default config 2023-06-29 02:39:35 +03:00
M. Mert Yildiran
f2a384c8db Change the S3 URL to S3 URI in the flag description 2023-06-29 02:29:34 +03:00
M. Mert Yildiran
207d89fa17 🐛 Fix the cleanUpOldContainers method by adding All: true to ContainerListOptions 2023-06-29 02:22:57 +03:00
M. Mert Yildiran
3b758d15a0 Change the pattern of temporary file downloaded from S3 2023-06-29 02:21:32 +03:00
M. Mert Yildiran
261e850a59 Support folder URLs 2023-06-29 02:13:47 +03:00
M. Mert Yildiran
242a276c5f Download all the objects in bucket and TAR them in case of key is empty in the S3 URL 2023-06-29 01:42:51 +03:00
M. Mert Yildiran
b9f9e860b6 Change the default namespace from kubeshark to default and use .Release.Namespace in Helm templates 2023-06-27 21:06:44 +03:00
M. Mert Yildiran
1404c68a22 Fix the annotations in Ingress 2023-06-27 20:50:02 +03:00
M. Mert Yildiran
400c681369 Fix the issues in Ingress Helm template 2023-06-27 20:36:46 +03:00
M. Mert Yildiran
a4761e3262 Handle the column character in REACT_APP_HUB_PORT environment variable 2023-06-27 14:43:53 +03:00
M. Mert Yildiran
313d26670b Regenerate the manifests 2023-06-27 03:47:27 +03:00
M. Mert Yildiran
16f1e116c0 Template the annotations in all resources 2023-06-27 03:45:47 +03:00
M. Mert Yildiran
2d625eccaa Rename resourcelabels to labels 2023-06-27 03:33:46 +03:00
M. Mert Yildiran
19443501da Have consistent key style in values.yaml 2023-06-27 03:32:03 +03:00
M. Mert Yildiran
4ef91a2701 Template the controller field in IngressClass resource 2023-06-27 03:27:40 +03:00
M. Mert Yildiran
bc031be0fe 🔧 Add generate-helm-values Makefile rule 2023-06-27 03:26:20 +03:00
M. Mert Yildiran
f32a7d97ec Template the ingressClassName field in Ingress resource 2023-06-27 03:25:58 +03:00
M. Mert Yildiran
aeda024986 Remove the unnecessary single quotes from the Helm templates 2023-06-27 03:19:45 +03:00
M. Mert Yildiran
98198b9733 Remove the unused labels from the resources 2023-06-27 02:48:46 +03:00
M. Mert Yildiran
0bf7c83b86 Use toYaml and nindent instead of range in the Helm templates 2023-06-27 02:45:55 +03:00
M. Mert Yildiran
a8df589076 Bring back the functionality of nodeselectorterms field into the Helm chart 2023-06-27 01:32:16 +03:00
M. Mert Yildiran
c07f1851b3 🔥 Delete the manifests and add complete.yaml instead 2023-06-27 01:22:30 +03:00
M. Mert Yildiran
5c4c913a27 Bring back the functionality of resourcelabels field into the Helm chart 2023-06-27 01:12:04 +03:00
M. Mert Yildiran
71111248bd Add icon field to Chart.yaml 2023-06-27 00:30:53 +03:00
M. Mert Yildiran
5efb48f0c5 Bring back the functionality of ignoretainted field into the Helm chart 2023-06-27 00:15:04 +03:00
M. Mert Yildiran
cc980dbaf8 Print a warning if the storage limit modified while persistent storage is disabled and default its value 2023-06-23 02:08:42 +03:00
M. Mert Yildiran
1afe27e969 Add S3 URL support to --pcap flag 2023-06-22 20:59:14 +03:00
M. Mert Yildiran
8df5e015c5 Call os.Exit if the Helm install fails 2023-06-21 17:11:03 +03:00
M. Mert Yildiran
6b898077f1 ⬆️ Bump the Helm chart version 2023-06-21 17:04:25 +03:00
Victor Login
e93cd978e8 Update TLS for ingress (#1367) 2023-06-21 17:02:44 +03:00
M. Mert Yildiran
bada6dae68 🐛 Fix <len .Values.tap.namespaces>: error calling len: len of nil pointer Helm install error 2023-06-20 22:14:06 +03:00
M. Mert Yildiran
8814e08871 ⬆️ Bump the Helm chart version 2023-06-19 23:59:38 +03:00
M. Mert Yildiran
6b7a94a850 Revert values.yaml 2023-06-19 02:40:07 +03:00
M. Mert Yildiran
7b004e7a1f Change GetLocalhostOnPort method to GetProxyOnPort 2023-06-19 02:19:52 +03:00
M. Mert Yildiran
836b87d517 Template the SCRIPTING_ENV env in Hub pod (Helm) 2023-06-19 01:46:51 +03:00
M. Mert Yildiran
646da4810d Allow license key holders to bypass the auth 2023-06-19 01:44:01 +03:00
Alon Girmonsky
a6d349a8fa Update README.md
Changed the announcement part
2023-06-13 10:11:44 -07:00
Alon Girmonsky
9d58c662a8 Update README.md
Announcing Self-hosted Kubeshark
2023-06-13 10:01:28 -07:00
M. Mert Yildiran
e4a09be4e2 Change the PRO_URL constant 2023-06-07 01:09:30 +03:00
Alon Girmonsky
7208ed85d3 Update README.md
Adding a way to get the license where relevant.
2023-06-06 17:43:35 +03:00
M. Mert Yildiran
7a5bf83336 Use the Helm chart in tap command to install Kubeshark (#1362)
*  Use the Helm chart in `tap` command to install Kubeshark

* ⬆️ Set Go version to `1.19` in `go.mod` file

*  Add `Helm` struct`, `NewHelm` and `NewHelmDefault` methods

*  Better logging and error return

*  Pass the config as `values.yaml` to Helm install

* 🔥 Remove `helm-chart`, `manifests` and `check` commands

*  Run `go mod tidy`

* 🎨 Move `helm` package into `kubernetes` package

* 🔥 Remove `# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!` notice from the manifests and Helm templates

* 🔥 Remove the unused `GenerateApplyConfiguration` and `buildWithDefaultLabels` methods
2023-06-06 12:16:03 +03:00
Alon Girmonskys
87b8a067c9 changed com to co 2023-05-28 20:54:57 -07:00
M. Mert Yildiran
3fe765e072 ⬆️ Bump the Helm chart version 2023-05-26 00:07:46 +03:00
M. Mert Yildiran
a163f9cc0e Change the new release warning 2023-05-25 20:42:57 +03:00
M. Mert Yildiran
2edb987c07 Template REACT_APP_HUB_PORT in the Helm chart 2023-05-25 20:24:29 +03:00
M. Mert Yildiran
c0d7d0fe80 Update Helm README.md 2023-05-25 05:46:10 +03:00
M. Mert Yildiran
be5bd6a372 Template the AUTH_APPROVED_DOMAINS and certmanager.k8s.io/cluster-issuer
Also add `networking.k8s.io` to `apiGroups` in `ClusterRole`
2023-05-25 05:07:42 +03:00
M. Mert Yildiran
42df7aa42f Update the Certificate resource name 2023-05-24 06:32:48 +03:00
M. Mert Yildiran
9a9052198f ⬆️ Bump the Helm chart version 2023-05-24 05:44:18 +03:00
M. Mert Yildiran
2fb83c3642 Fix the Bash script 2023-05-24 04:18:27 +03:00
M. Mert Yildiran
d44674fe86 Update the secret name of certificate 2023-05-24 04:10:46 +03:00
M. Mert Yildiran
c57ed1efd3 Run kubeshark manifests --dump && kubeshark helm-chart 2023-05-24 04:04:34 +03:00
M. Mert Yildiran
c19cd00c77 Add CertManager field to IngressConfig and add an Ingress TLS example 2023-05-24 04:01:45 +03:00
M. Mert Yildiran
39f8d40b76 Revert " Add Refresh-Token to the list of Access-Control-Allow-Headers"
This reverts commit bf731073c8.
2023-05-24 02:10:48 +03:00
M. Mert Yildiran
bf731073c8 Add Refresh-Token to the list of Access-Control-Allow-Headers 2023-05-24 02:04:56 +03:00
M. Mert Yildiran
4bb68afaaf Add AuthConfig struct and pass domains in AUTH_APPROVED_DOMAINS environment variable 2023-05-24 01:50:59 +03:00
M. Mert Yildiran
2126fc83a7 🐛 Remove the cancel() call 2023-05-22 19:20:39 +03:00
M. Mert Yildiran
d0c1dbcd5e Print and open a different URL in case of Ingress is enabled 2023-05-17 03:57:27 +03:00
M. Mert Yildiran
ad9dfbce40 Add Ingress (#1357)
*  Add `Ingress`

*  Rewrite the target in `Ingress`

*  Fix the path of front pod in `Ingress`

*  Add `IngressConfig` struct

*  Generate the correct Helm chart based on `tap.ingress` field of `values.yaml`
2023-05-16 19:46:47 +03:00
M. Mert Yildiran
139336d4ee Template hostPort(s) in the Helm chart 2023-05-10 14:38:38 +03:00
M. Mert Yildiran
f68fed0de8 🐛 Fix the effect of proxy config port changes 2023-05-10 01:28:43 +03:00
M. Mert Yildiran
1d7d242e6c Generate the missing new line in 08-persistent-volume-claim.yaml 2023-05-09 00:00:07 +03:00
M. Mert Yildiran
aa904e23c7 Add --persistentstorage option to tap command 2023-05-08 23:57:22 +03:00
M. Mert Yildiran
baf0e65337 Template the Helm chart based on persistentstorage value 2023-05-08 23:52:14 +03:00
M. Mert Yildiran
a33a3467fc Add persistentstorage option 2023-05-08 00:50:56 +03:00
M. Mert Yildiran
a9b598bc41 ⬆️ Bump the Helm chart version 2023-05-04 21:45:55 +03:00
M. Mert Yildiran
0aee367ad5 Omit the license string in helm-chart and manifests commands 2023-05-04 21:37:55 +03:00
M. Mert Yildiran
8c7d9ea8fd Fix the updateLicense method 2023-05-04 21:33:38 +03:00
M. Mert Yildiran
fab0f713ed 🐛 Pass the license string 2023-05-04 21:18:34 +03:00
M. Mert Yildiran
2563cc1922 🐛 Fix the imagePullPolicy to imagepullpolicy in helm-chart command 2023-04-24 02:03:58 +03:00
Jay R. Wren
26c9f42eba 📚 Remove kubeshark tap -A example from README.md (#1339) 2023-04-21 18:35:24 -07:00
M. Mert Yildiran
00dd3a93df Update the Helm chart version 2023-04-20 21:20:50 +03:00
M. Mert Yildiran
d02293ab55 Add a workflow for publishing Helm chart 2023-04-20 21:20:07 +03:00
M. Mert Yildiran
60cfa92efb Apply the same Kubernetes tolerations to all pods 2023-04-20 20:27:22 +03:00
M. Mert Yildiran
01b187aaa3 Update a log message 2023-04-20 20:22:33 +03:00
M. Mert Yildiran
38d121556c Add storageclass option to config.yaml 2023-04-20 20:20:24 +03:00
M. Mert Yildiran
2d73b46b44 Change the Docker Hub repository in the badges to kubeshark/worker 2023-04-20 20:10:52 +03:00
M. Mert Yildiran
466b9099bd Ignore the Kubernetes version check in certain commands while creating the Kubernetes provider 2023-04-20 20:09:19 +03:00
M. Mert Yildiran
bbe3338c3c Rename 08-persistent-volume.yaml to 08-persistent-volume-claim.yaml 2023-04-20 20:04:47 +03:00
M. Mert Yildiran
2780791068 Clean YAML files before generating the new ones in manifests and helm-chart command 2023-04-20 04:00:37 +03:00
M. Mert Yildiran
e65656c1df 🔥 Delete permissionFiles folder 2023-04-20 03:52:15 +03:00
M. Mert Yildiran
df7d1ac10c Give the permission of listing or watching the persistentvolumeclaims to the ClusterRole 2023-04-20 03:01:25 +03:00
M. Mert Yildiran
c342885cae Set the default storage limit to 200Mi 2023-04-20 02:48:18 +03:00
M. Mert Yildiran
44adb397c1 🔥 Remove the old DaemonSet manifests 2023-04-20 00:26:01 +03:00
M. Mert Yildiran
657ea8570c Add PersistentVolumeClaim and mount it to worker DaemonSet 2023-04-20 00:09:22 +03:00
M. Mert Yildiran
686dd5fba1 🔥 Remove the -A flag and allnamespaces field from config.yaml 2023-04-19 20:52:28 +03:00
M. Mert Yildiran
90e6e99386 Run the manifests --dump and helm-chart commands 2023-04-19 20:30:11 +03:00
M. Mert Yildiran
aa9109df12 Update helm-chart command 2023-04-19 20:29:17 +03:00
M. Mert Yildiran
9a37781355 🔥 Remove the grace period 2023-04-19 05:18:59 +03:00
M. Mert Yildiran
5ce10b626f Pass every config through environment variables and don't make HTTP calls in first tap command 2023-04-18 03:21:23 +03:00
M. Mert Yildiran
26d75da588 Set the Helm chart version to 39.6 2023-04-13 01:57:26 +03:00
M. Mert Yildiran
95edac9f8f 📚 Remove the WIP notice in the Helm chart README.md 2023-04-13 01:53:04 +03:00
M. Mert Yildiran
f6c4d43eb1 Update a README.md(s) 2023-04-12 03:12:32 +03:00
M. Mert Yildiran
47b9cd0c8d Update the README.md(s) 2023-04-12 03:10:36 +03:00
M. Mert Yildiran
fb06545887 Add a header comment to generated manifests and Helm chart templates 2023-04-12 03:10:23 +03:00
M. Mert Yildiran
ea594ea70a Update helm-chart/README.md 2023-04-12 02:51:34 +03:00
M. Mert Yildiran
3cc543827a Fix all of the remaining issues in the Helm chart 2023-04-12 02:50:12 +03:00
M. Mert Yildiran
18addbb980 Fix the issues in Helm chart such that helm template succeeds 2023-04-12 02:12:12 +03:00
M. Mert Yildiran
d2b9bddf78 Do more Helm templating 2023-04-12 01:33:41 +03:00
M. Mert Yildiran
3ebf816a68 Generate Helm chart templates 2023-04-12 01:06:39 +03:00
M. Mert Yildiran
504ecc4f83 Generate values.yaml out of config.yaml 2023-04-11 23:01:29 +03:00
dependabot[bot]
562dff0d6c ⬆️ Bump github.com/docker/docker (#1333)
Bumps [github.com/docker/docker](https://github.com/docker/docker) from 20.10.22+incompatible to 20.10.24+incompatible.
- [Release notes](https://github.com/docker/docker/releases)
- [Commits](https://github.com/docker/docker/compare/v20.10.22...v20.10.24)

---
updated-dependencies:
- dependency-name: github.com/docker/docker
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: M. Mert Yildiran <me@mertyildiran.com>
2023-04-11 23:00:11 +03:00
M. Mert Yildiran
02990912b7 Move ResourceLabels and NodeSelectorTerms fields into TapConfig 2023-04-11 22:37:29 +03:00
M. Mert Yildiran
0aedc023aa Add README.md to helm-chart directory 2023-04-11 22:31:48 +03:00
M. Mert Yildiran
0801ea8c74 Generate Chart.yaml 2023-04-11 22:17:37 +03:00
M. Mert Yildiran
83be3558ed Copy the license into Helm chart 2023-04-11 19:29:19 +03:00
M. Mert Yildiran
100b397cdf Run kubeshark helm-chart 2023-04-11 19:21:04 +03:00
M. Mert Yildiran
c2cad11e0a Add helm-chart command 2023-04-11 19:20:04 +03:00
M. Mert Yildiran
c42481deb8 Add POD_REGEX, NAMESPACES, STORAGE_LIMIT and LICENSE environment variables to Hub 2023-04-11 18:40:34 +03:00
M. Mert Yildiran
39d1b77045 Fix the issues in worker DaemonSet 2023-04-11 02:33:17 +03:00
M. Mert Yildiran
f19db77228 Fix more issues in manifests command 2023-04-11 02:18:23 +03:00
M. Mert Yildiran
077fc6c126 Set the apiVersion in the manifests 2023-04-11 02:09:03 +03:00
M. Mert Yildiran
aeeb9e6c9f Run kubeshark manifests --dump and add the manifest files 2023-04-11 02:02:29 +03:00
M. Mert Yildiran
384ed4e16b Set YAML indent to 2 2023-04-11 01:57:04 +03:00
M. Mert Yildiran
5dafc015bb Add manifests command to generate Kubernetes manifests 2023-04-11 01:54:06 +03:00
M. Mert Yildiran
d1b17d4534 Build worker DaemonSet separately then apply it by converting it to ApplyConfiguration 2023-04-10 22:24:54 +03:00
M. Mert Yildiran
b9333e4d67 🔧 Add some useful kubectl commands to Makefile 2023-04-10 01:09:34 +03:00
M. Mert Yildiran
c962864d0b 🐛 Fix the clean command cause leftover ClusterRole and ClusterRoleBinding 2023-04-10 00:48:22 +03:00
M. Mert Yildiran
07b080e97a 🔥 Remove the unused methods in the kubernetes package 2023-04-01 21:36:48 +03:00
M. Mert Yildiran
e4684a10af Add --ignoreTainted flag to tap command 2023-03-27 16:26:09 +03:00
96 changed files with 4563 additions and 3360 deletions

View File

@@ -1,16 +0,0 @@
# Files
.dockerignore
.editorconfig
.gitignore
Dockerfile
Makefile
LICENSE
**/*.md
**/*_test.go
*.out
# Folders
.git/
.github/
build/
**/node_modules/

46
.github/static/kubeshark.rb.tmpl vendored Normal file
View File

@@ -0,0 +1,46 @@
# typed: false
# frozen_string_literal: true
class Kubeshark < Formula
desc ""
homepage "https://github.com/kubeshark/kubeshark"
version "${CLEAN_VERSION}"
on_macos do
if Hardware::CPU.arm?
url "https://github.com/kubeshark/kubeshark/releases/download/${FULL_VERSION}/kubeshark_darwin_arm64"
sha256 "${DARWIN_ARM64_SHA256}"
def install
bin.install "kubeshark_darwin_arm64" => "kubeshark"
end
end
if Hardware::CPU.intel?
url "https://github.com/kubeshark/kubeshark/releases/download/${FULL_VERSION}/kubeshark_darwin_amd64"
sha256 "${DARWIN_AMD64_SHA256}"
def install
bin.install "kubeshark_darwin_amd64" => "kubeshark"
end
end
end
on_linux do
if Hardware::CPU.intel?
url "https://github.com/kubeshark/kubeshark/releases/download/${FULL_VERSION}/kubeshark_linux_amd64"
sha256 "${LINUX_AMD64_SHA256}"
def install
bin.install "kubeshark_linux_amd64" => "kubeshark"
end
end
if Hardware::CPU.arm? && Hardware::CPU.is_64_bit?
url "https://github.com/kubeshark/kubeshark/releases/download/${FULL_VERSION}/kubeshark_linux_arm64"
sha256 "${LINUX_ARM64_SHA256}"
def install
bin.install "kubeshark_linux_arm64" => "kubeshark"
end
end
end
end

36
.github/workflows/helm.yml vendored Normal file
View File

@@ -0,0 +1,36 @@
on:
push:
# Sequence of patterns matched against refs/tags
tags:
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
name: Release Helm Charts
jobs:
release:
# depending on default permission settings for your org (contents being read-only or read-write for workloads), you will have to add permissions
# see: https://docs.github.com/en/actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v3
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.5.0
with:
charts_dir: .
charts_repo_url: https://kubeshark.github.io/kubeshark
env:
CR_TOKEN: "${{ secrets.HELM_TOKEN }}"

View File

@@ -1,7 +1,8 @@
on:
push:
# Sequence of patterns matched against refs/tags
tags:
- '*'
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
name: Release
@@ -13,6 +14,8 @@ jobs:
release:
name: Build and publish a new release
runs-on: ubuntu-latest
outputs:
version: ${{ steps.version.outputs.tag }}
steps:
- name: Check out the repo
uses: actions/checkout@v3
@@ -46,44 +49,19 @@ jobs:
token: ${{ secrets.GITHUB_TOKEN }}
artifacts: "bin/*"
tag: ${{ steps.version.outputs.tag }}
prerelease: true
prerelease: false
bodyFile: 'bin/README.md'
brew-tap:
name: Create Homebrew formulae
runs-on: ubuntu-latest
brew:
name: Publish a new Homebrew formulae
needs: [release]
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Bump core homebrew formula
uses: mislav/bump-homebrew-formula-action@v3
with:
fetch-depth: 0
- name: Version
id: version
shell: bash
run: |
{
echo "tag=${GITHUB_REF#refs/*/}"
echo "build_timestamp=$(date +%s)"
echo "branch=${GITHUB_REF#refs/heads/}"
} >> "$GITHUB_OUTPUT"
- name: Fetch all tags
run: git fetch --force --tags
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: 'go.mod'
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v4
with:
distribution: goreleaser
version: ${{ env.GITHUB_REF_NAME }}
args: release --clean
# A PR will be sent to github.com/Homebrew/homebrew-core to update this formula:
formula-name: kubeshark
push-to: kubeshark/homebrew-core
env:
GITHUB_TOKEN: ${{ secrets.HOMEBREW_TOKEN }}
VER: ${{ steps.version.outputs.tag }}
BUILD_TIMESTAMP: ${{ steps.version.outputs.build_timestamp }}
COMMITTER_TOKEN: ${{ secrets.COMMITTER_TOKEN }}

121
Makefile
View File

@@ -9,7 +9,7 @@ COMMIT_HASH=$(shell git rev-parse HEAD)
GIT_BRANCH=$(shell git branch --show-current | tr '[:upper:]' '[:lower:]')
GIT_VERSION=$(shell git branch --show-current | tr '[:upper:]' '[:lower:]')
BUILD_TIMESTAMP=$(shell date +%s)
export VER?=0.0
export VER?=0.0.0
help: ## Print this help message.
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
@@ -40,6 +40,21 @@ build-base: ## Build binary (select the platform via GOOS / GOARCH env variables
-o bin/kubeshark_$(SUFFIX) kubeshark.go && \
cd bin && shasum -a 256 kubeshark_${SUFFIX} > kubeshark_${SUFFIX}.sha256
build-brew: ## Build binary for brew/core CI
go build ${GCLFAGS} -ldflags="${LDFLAGS_EXT} \
-X 'github.com/kubeshark/kubeshark/misc.GitCommitHash=$(COMMIT_HASH)' \
-X 'github.com/kubeshark/kubeshark/misc.Branch=$(GIT_BRANCH)' \
-X 'github.com/kubeshark/kubeshark/misc.BuildTimestamp=$(BUILD_TIMESTAMP)' \
-X 'github.com/kubeshark/kubeshark/misc.Platform=$(SUFFIX)' \
-X 'github.com/kubeshark/kubeshark/misc.Ver=$(VER)'" \
-o kubeshark kubeshark.go
build-windows-amd64:
$(MAKE) build GOOS=windows GOARCH=amd64 && \
mv ./bin/kubeshark_windows_amd64 ./bin/kubeshark.exe && \
rm bin/kubeshark_windows_amd64.sha256 && \
cd bin && shasum -a 256 kubeshark.exe > kubeshark.exe.sha256
build-all: ## Build for all supported platforms.
export CGO_ENABLED=0
echo "Compiling for every OS and Platform" && \
@@ -48,8 +63,7 @@ build-all: ## Build for all supported platforms.
$(MAKE) build GOOS=linux GOARCH=arm64 && \
$(MAKE) build GOOS=darwin GOARCH=amd64 && \
$(MAKE) build GOOS=darwin GOARCH=arm64 && \
$(MAKE) build GOOS=windows GOARCH=amd64 && \
mv ./bin/kubeshark_windows_amd64 ./bin/kubeshark.exe && \
$(MAKE) build-windows-amd64 && \
echo "---------" && \
find ./bin -ls
@@ -62,3 +76,104 @@ test: ## Run cli tests.
lint: ## Lint the source code.
golangci-lint run
kubectl-view-all-resources: ## This command outputs all Kubernetes resources using YAML format and pipes it to VS Code
./kubectl.sh view-all-resources
kubectl-view-kubeshark-resources: ## This command outputs all Kubernetes resources in "kubeshark" namespace using YAML format and pipes it to VS Code
./kubectl.sh view-kubeshark-resources
generate-helm-values: ## Generate the Helm values from config.yaml
./bin/kubeshark__ config > ./helm-chart/values.yaml && sed -i 's/^license:.*/license: ""/' helm-chart/values.yaml
generate-manifests: ## Generate the manifests from the Helm chart using default configuration
helm template kubeshark -n default ./helm-chart > ./manifests/complete.yaml
logs-worker:
export LOGS_POD_PREFIX=kubeshark-worker-
export LOGS_FOLLOW=
${MAKE} logs
logs-worker-follow:
export LOGS_POD_PREFIX=kubeshark-worker-
export LOGS_FOLLOW=--follow
${MAKE} logs
logs-hub:
export LOGS_POD_PREFIX=kubeshark-hub
export LOGS_FOLLOW=
${MAKE} logs
logs-hub-follow:
export LOGS_POD_PREFIX=kubeshark-hub
export LOGS_FOLLOW=--follow
${MAKE} logs
logs-front:
export LOGS_POD_PREFIX=kubeshark-front
export LOGS_FOLLOW=
${MAKE} logs
logs-front-follow:
export LOGS_POD_PREFIX=kubeshark-front
export LOGS_FOLLOW=--follow
${MAKE} logs
logs:
kubectl logs $$(kubectl get pods | awk '$$1 ~ /^$(LOGS_POD_PREFIX)/' | awk 'END {print $$1}') $(LOGS_FOLLOW)
ssh-node:
kubectl ssh node $$(kubectl get nodes | awk 'END {print $$1}')
exec-worker:
export EXEC_POD_PREFIX=kubeshark-worker-
${MAKE} exec
exec-hub:
export EXEC_POD_PREFIX=kubeshark-hub
${MAKE} exec
exec-front:
export EXEC_POD_PREFIX=kubeshark-front
${MAKE} exec
exec:
kubectl exec --stdin --tty $$(kubectl get pods | awk '$$1 ~ /^$(EXEC_POD_PREFIX)/' | awk 'END {print $$1}') -- /bin/sh
helm-install:
cd helm-chart && helm install kubeshark . && cd ..
helm-install-canary:
cd helm-chart && helm install kubeshark . --set tap.docker.tag=canary && cd ..
helm-install-dev:
cd helm-chart && helm install kubeshark . --set tap.docker.tag=dev && cd ..
helm-install-debug:
cd helm-chart && helm install kubeshark . --set tap.debug=true && cd ..
helm-install-debug-canary:
cd helm-chart && helm install kubeshark . --set tap.debug=true --set tap.docker.tag=canary && cd ..
helm-install-debug-dev:
cd helm-chart && helm install kubeshark . --set tap.debug=true --set tap.docker.tag=dev && cd ..
helm-uninstall:
helm uninstall kubeshark
proxy:
kubeshark proxy
port-forward-worker:
kubectl port-forward $$(kubectl get pods | awk '$$1 ~ /^$(LOGS_POD_PREFIX)/' | awk 'END {print $$1}') $(LOGS_FOLLOW) 30001:30001
release:
@cd ../worker && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
@cd ../hub && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
@cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
@cd ../kubeshark && sed -i 's/^version:.*/version: "$(VERSION)"/' helm-chart/Chart.yaml && make && make generate-helm-values && make generate-manifests
@git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push
@git tag v$(VERSION) && git push origin --tags
@cd helm-chart && cp -r . ../../kubeshark.github.io/charts/chart
@cd ../../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
@cd ../kubeshark

View File

@@ -3,16 +3,13 @@
</p>
<p align="center">
<a href="https://github.com/kubeshark/kubeshark/blob/main/LICENSE">
<img alt="GitHub License" src="https://img.shields.io/github/license/kubeshark/kubeshark?logo=GitHub&style=flat-square">
</a>
<a href="https://github.com/kubeshark/kubeshark/releases/latest">
<img alt="GitHub Latest Release" src="https://img.shields.io/github/v/release/kubeshark/kubeshark?logo=GitHub&style=flat-square">
</a>
<a href="https://hub.docker.com/r/kubeshark/kubeshark">
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/kubeshark/kubeshark?color=%23099cec&logo=Docker&style=flat-square">
<a href="https://hub.docker.com/r/kubeshark/worker">
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/kubeshark/worker?color=%23099cec&logo=Docker&style=flat-square">
</a>
<a href="https://hub.docker.com/r/kubeshark/kubeshark">
<a href="https://hub.docker.com/r/kubeshark/worker">
<img alt="Image size" src="https://img.shields.io/docker/image-size/kubeshark/kubeshark/latest?logo=Docker&style=flat-square">
</a>
<a href="https://discord.gg/WkvRGMUcx7">
@@ -25,9 +22,10 @@
<p align="center">
<b>
<span>NEW: </span><a href="https://github.com/kubeshark/kubeshark/releases/tag/39.4">Version 39.4</a> is out, introducing
<a href="https://docs.kubeshark.co/en/automation_scripting">Scripting</a>,
<a href="https://docs.kubeshark.co/en/automation_hooks">L4/L7 hooks</a>, and so much more...
NEW:
<a href="https://github.com/kubeshark/kubeshark/releases/latest">Version 52.1.63</a>
now available, featuring enhanced
<a href="https://docs.kubeshark.co/en/half_connections">Network Error Detection & Analysis</a>.
</b>
</p>
@@ -45,10 +43,6 @@ Download **Kubeshark**'s binary distribution [latest release](https://github.com
kubeshark tap
```
```shell
kubeshark tap -A
```
```shell
kubeshark tap -n sock-shop "(catalo*|front-end*)"
```

View File

@@ -1,5 +1,5 @@
# Kubeshark release _VER_
Kubeshark CHANGELOG is now part of [Kubeshark wiki](https://github.com/kubeshark/kubeshark/wiki/CHANGELOG)
Release notes comming soon ..
## Download Kubeshark for your platform

View File

@@ -1,21 +0,0 @@
package cmd
import (
"fmt"
"github.com/kubeshark/kubeshark/misc"
"github.com/spf13/cobra"
)
var checkCmd = &cobra.Command{
Use: "check",
Short: fmt.Sprintf("Check the %s resources for potential problems", misc.Software),
RunE: func(cmd *cobra.Command, args []string) error {
runCheck()
return nil
},
}
func init() {
rootCmd.AddCommand(checkCmd)
}

View File

@@ -1,28 +0,0 @@
package check
import (
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/semver"
"github.com/rs/zerolog/log"
)
func KubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
log.Info().Str("procedure", "kubernetes-api").Msg("Checking:")
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.Kube.Context)
if err != nil {
log.Error().Err(err).Msg("Can't initialize the client!")
return nil, nil, false
}
log.Info().Msg("Initialization of the client is passed.")
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
log.Error().Err(err).Msg("Can't query the Kubernetes API!")
return nil, nil, false
}
log.Info().Msg("Querying the Kubernetes API is passed.")
return kubernetesProvider, kubernetesVersion, true
}

View File

@@ -1,91 +0,0 @@
package check
import (
"context"
"embed"
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
rbac "k8s.io/api/rbac/v1"
"k8s.io/client-go/kubernetes/scheme"
)
func KubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "kubernetes-permissions").Msg("Checking:")
var filePath string
if config.Config.IsNsRestrictedMode() {
filePath = "permissionFiles/permissions-ns-tap.yaml"
} else {
filePath = "permissionFiles/permissions-all-namespaces-tap.yaml"
}
data, err := embedFS.ReadFile(filePath)
if err != nil {
log.Error().Err(err).Msg("While checking Kubernetes permissions!")
return false
}
decode := scheme.Codecs.UniversalDeserializer().Decode
obj, _, err := decode(data, nil, nil)
if err != nil {
log.Error().Err(err).Msg("While checking Kubernetes permissions!")
return false
}
switch resource := obj.(type) {
case *rbac.Role:
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.Tap.SelfNamespace)
case *rbac.ClusterRole:
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, "")
}
log.Error().Msg("While checking Kubernetes permissions! Resource of types 'Role' or 'ClusterRole' are not found in permission files.")
return false
}
func checkRulesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider, rules []rbac.PolicyRule, namespace string) bool {
permissionsExist := true
for _, rule := range rules {
for _, group := range rule.APIGroups {
for _, resource := range rule.Resources {
for _, verb := range rule.Verbs {
exist, err := kubernetesProvider.CanI(ctx, namespace, resource, verb, group)
permissionsExist = checkPermissionExist(group, resource, verb, namespace, exist, err) && permissionsExist
}
}
}
}
return permissionsExist
}
func checkPermissionExist(group string, resource string, verb string, namespace string, exist bool, err error) bool {
var groupAndNamespace string
if group != "" && namespace != "" {
groupAndNamespace = fmt.Sprintf("in api group '%v' and namespace '%v'", group, namespace)
} else if group != "" {
groupAndNamespace = fmt.Sprintf("in api group '%v'", group)
} else if namespace != "" {
groupAndNamespace = fmt.Sprintf("in namespace '%v'", namespace)
}
if err != nil {
log.Error().
Str("verb", verb).
Str("resource", resource).
Str("group-and-namespace", groupAndNamespace).
Err(err).
Msg("While checking Kubernetes permissions!")
return false
} else if !exist {
log.Error().Msg(fmt.Sprintf("Can't %v %v %v", verb, resource, groupAndNamespace))
return false
}
log.Info().Msg(fmt.Sprintf("Can %v %v %v", verb, resource, groupAndNamespace))
return true
}

View File

@@ -1,118 +0,0 @@
package check
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
)
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "k8s-components").Msg("Checking:")
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.Tap.SelfNamespace)
allResourcesExist := checkResourceExist(config.Config.Tap.SelfNamespace, "namespace", exist, err)
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.ServiceAccountName)
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
if config.Config.IsNsRestrictedMode() {
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.RoleName)
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.RoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
} else {
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesClusterRoleBindingExist(ctx, kubernetes.ClusterRoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
}
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.HubServiceName)
allResourcesExist = checkResourceExist(kubernetes.HubServiceName, "service", exist, err) && allResourcesExist
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
return allResourcesExist
}
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.Tap.SelfNamespace, kubernetes.HubPodName); err != nil {
log.Error().
Str("name", kubernetes.HubPodName).
Err(err).
Msg("While checking if pod is running!")
return false
} else if len(pods) == 0 {
log.Error().
Str("name", kubernetes.HubPodName).
Msg("Pod doesn't exist!")
return false
} else if !kubernetes.IsPodRunning(&pods[0]) {
log.Error().
Str("name", kubernetes.HubPodName).
Msg("Pod is not running!")
return false
}
log.Info().
Str("name", kubernetes.HubPodName).
Msg("Pod is running.")
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.Tap.SelfNamespace, kubernetes.WorkerPodName); err != nil {
log.Error().
Str("name", kubernetes.WorkerPodName).
Err(err).
Msg("While checking if pods are running!")
return false
} else {
workers := 0
notRunningWorkers := 0
for _, pod := range pods {
workers += 1
if !kubernetes.IsPodRunning(&pod) {
notRunningWorkers += 1
}
}
if notRunningWorkers > 0 {
log.Error().
Str("name", kubernetes.WorkerPodName).
Msg(fmt.Sprintf("%d/%d pods are not running!", notRunningWorkers, workers))
return false
}
log.Info().
Str("name", kubernetes.WorkerPodName).
Msg(fmt.Sprintf("All %d pods are running.", workers))
return true
}
}
func checkResourceExist(resourceName string, resourceType string, exist bool, err error) bool {
if err != nil {
log.Error().
Str("name", resourceName).
Str("type", resourceType).
Err(err).
Msg("Checking if resource exists!")
return false
} else if !exist {
log.Error().
Str("name", resourceName).
Str("type", resourceType).
Msg("Resource doesn't exist!")
return false
}
log.Info().
Str("name", resourceName).
Str("type", resourceType).
Msg("Resource exist.")
return true
}

View File

@@ -1,22 +0,0 @@
package check
import (
"fmt"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/semver"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
)
func KubernetesVersion(kubernetesVersion *semver.SemVersion) bool {
log.Info().Str("procedure", "kubernetes-version").Msg("Checking:")
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
log.Error().Str("k8s-version", string(*kubernetesVersion)).Err(err).Msg(fmt.Sprintf(utils.Red, "The cluster does not have the minimum required Kubernetes API version!"))
return false
}
log.Info().Str("k8s-version", string(*kubernetesVersion)).Msg("Minimum required Kubernetes API version is passed.")
return true
}

View File

@@ -1,40 +0,0 @@
package check
import (
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
)
func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "server-connectivity").Msg("Checking:")
var connectedToHub, connectedToFront bool
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), "/echo", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Hub using proxy!")
} else {
connectedToHub = true
log.Info().Msg("Connected successfully to Hub using proxy.")
}
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort), "", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Front using proxy!")
} else {
connectedToFront = true
log.Info().Msg("Connected successfully to Front using proxy.")
}
return connectedToHub && connectedToFront
}
func checkProxy(serverUrl string, path string, kubernetesProvider *kubernetes.Provider) error {
log.Info().Str("url", serverUrl).Msg("Connecting:")
connector := connect.NewConnector(serverUrl, connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(path); err != nil {
return err
}
return nil
}

View File

@@ -1,53 +0,0 @@
package cmd
import (
"context"
"embed"
"fmt"
"os"
"github.com/kubeshark/kubeshark/cmd/check"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
)
var (
//go:embed permissionFiles
embedFS embed.FS
)
func runCheck() {
log.Info().Msg(fmt.Sprintf("Checking the %s resources...", misc.Software))
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel will be called when this function exits
kubernetesProvider, kubernetesVersion, checkPassed := check.KubernetesApi()
if checkPassed {
checkPassed = check.KubernetesVersion(kubernetesVersion)
}
if checkPassed {
checkPassed = check.KubernetesPermissions(ctx, embedFS, kubernetesProvider)
}
if checkPassed {
checkPassed = check.KubernetesResources(ctx, kubernetesProvider)
}
if checkPassed {
checkPassed = check.ServerConnection(kubernetesProvider)
}
if checkPassed {
log.Info().Msg(fmt.Sprintf(utils.Green, "All checks are passed."))
} else {
log.Error().
Str("command1", fmt.Sprintf("%s %s", misc.Program, cleanCmd.Use)).
Str("command2", fmt.Sprintf("%s %s", misc.Program, tapCmd.Use)).
Msg(fmt.Sprintf(utils.Red, fmt.Sprintf("There are issues in your %s resources! Run these commands:", misc.Software)))
os.Exit(1)
}
}

View File

@@ -4,7 +4,9 @@ import (
"fmt"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/kubernetes/helm"
"github.com/kubeshark/kubeshark/misc"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
@@ -14,7 +16,16 @@ var cleanCmd = &cobra.Command{
Use: "clean",
Short: fmt.Sprintf("Removes all %s resources", misc.Software),
RunE: func(cmd *cobra.Command, args []string) error {
performCleanCommand()
resp, err := helm.NewHelm(
config.Config.Tap.Release.Repo,
config.Config.Tap.Release.Name,
config.Config.Tap.Release.Namespace,
).Uninstall()
if err != nil {
log.Error().Err(err).Send()
} else {
log.Info().Msgf("Uninstalled the Helm release: %s", resp.Release.Name)
}
return nil
},
}
@@ -27,5 +38,5 @@ func init() {
log.Debug().Err(err).Send()
}
cleanCmd.Flags().StringP(configStructs.SelfNamespaceLabel, "s", defaultTapConfig.SelfNamespace, "Self-namespace of Kubeshark")
cleanCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}

View File

@@ -1,14 +0,0 @@
package cmd
import (
"github.com/kubeshark/kubeshark/config"
)
func performCleanCommand() {
kubernetesProvider, err := getKubernetesProviderForCli()
if err != nil {
return
}
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, false)
}

View File

@@ -14,12 +14,11 @@ import (
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/misc/fsUtils"
"github.com/kubeshark/kubeshark/resources"
"github.com/rs/zerolog/log"
)
func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx context.Context, serviceName string, podName string, proxyPortLabel string, srcPort uint16, dstPort uint16, healthCheck string) {
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.Proxy.Host, srcPort, config.Config.Tap.SelfNamespace, serviceName)
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.Proxy.Host, srcPort, config.Config.Tap.Release.Namespace, serviceName)
if err != nil {
log.Error().
Err(errormessage.FormatError(err)).
@@ -27,7 +26,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
return
}
connector := connect.NewConnector(kubernetes.GetLocalhostOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector := connect.NewConnector(kubernetes.GetProxyOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(healthCheck); err != nil {
log.Warn().
Str("service", serviceName).
@@ -39,7 +38,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
}
podRegex, _ := regexp.Compile(podName)
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.Tap.SelfNamespace, podRegex, srcPort, dstPort, ctx); err != nil {
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.Tap.Release.Namespace, podRegex, srcPort, dstPort, ctx); err != nil {
log.Error().
Str("pod-regex", podRegex.String()).
Err(errormessage.FormatError(err)).
@@ -47,7 +46,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
return
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetProxyOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(healthCheck); err != nil {
log.Error().
Str("service", serviceName).
@@ -58,7 +57,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
}
}
func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
func getKubernetesProviderForCli(silent bool, dontCheckVersion bool) (*kubernetes.Provider, error) {
kubeConfigPath := config.Config.KubeConfigPath()
kubernetesProvider, err := kubernetes.NewProvider(kubeConfigPath, config.Config.Kube.Context)
if err != nil {
@@ -66,22 +65,26 @@ func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
return nil, err
}
log.Info().Str("path", kubeConfigPath).Msg("Using kubeconfig:")
if !silent {
log.Info().Str("path", kubeConfigPath).Msg("Using kubeconfig:")
}
if err := kubernetesProvider.ValidateNotProxy(); err != nil {
handleKubernetesProviderError(err)
return nil, err
}
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
handleKubernetesProviderError(err)
return nil, err
}
if !dontCheckVersion {
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
handleKubernetesProviderError(err)
return nil, err
}
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
handleKubernetesProviderError(err)
return nil, err
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
handleKubernetesProviderError(err)
return nil, err
}
}
return kubernetesProvider, nil
@@ -96,13 +99,10 @@ func handleKubernetesProviderError(err error) {
}
}
func finishSelfExecution(kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, withoutCleanup bool) {
func finishSelfExecution(kubernetesProvider *kubernetes.Provider) {
removalCtx, cancel := context.WithTimeout(context.Background(), cleanupTimeout)
defer cancel()
dumpLogsIfNeeded(removalCtx, kubernetesProvider)
if !withoutCleanup {
resources.CleanUpSelfResources(removalCtx, cancel, kubernetesProvider, isNsRestrictedMode, selfNamespace)
}
}
func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provider) {

View File

@@ -17,21 +17,20 @@ var configCmd = &cobra.Command{
Use: "config",
Short: fmt.Sprintf("Generate %s config with default values", misc.Software),
RunE: func(cmd *cobra.Command, args []string) error {
configWithDefaults, err := config.GetConfigWithDefaults()
if err != nil {
log.Error().Err(err).Msg("Failed generating config with defaults.")
return nil
}
if config.Config.Config.Regenerate {
if err := config.WriteConfig(configWithDefaults); err != nil {
defaultConfig := config.CreateDefaultConfig()
if err := defaults.Set(&defaultConfig); err != nil {
log.Error().Err(err).Send()
return nil
}
if err := config.WriteConfig(&defaultConfig); err != nil {
log.Error().Err(err).Msg("Failed generating config with defaults.")
return nil
}
log.Info().Str("config-path", config.ConfigFilePath).Msg("Template file written to config path.")
} else {
template, err := utils.PrettyYaml(configWithDefaults)
template, err := utils.PrettyYaml(config.Config)
if err != nil {
log.Error().Err(err).Msg("Failed converting config with defaults to YAML.")
return nil

View File

@@ -36,12 +36,13 @@ func init() {
log.Debug().Err(err).Send()
}
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
consoleCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark")
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark")
consoleCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}
func runConsole() {
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetHubUrl()
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
@@ -51,14 +52,16 @@ func runConsole() {
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.SrcPort).Msg("Connecting to:")
log.Info().Str("host", config.Config.Tap.Proxy.Host).Str("url", hubUrl).Msg("Connecting to:")
u := url.URL{
Scheme: "ws",
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.SrcPort),
Host: fmt.Sprintf("%s:%d/api", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Front.Port),
Path: "/scripts/logs",
}
headers := http.Header{}
headers.Set("License-Key", config.Config.License)
c, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
c, _, err := websocket.DefaultDialer.Dial(u.String(), headers)
if err != nil {
log.Error().Err(err).Send()
return

62
cmd/export.go Normal file
View File

@@ -0,0 +1,62 @@
package cmd
import (
"fmt"
"net/http"
"os"
"path/filepath"
"time"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
var exportCmd = &cobra.Command{
Use: "export",
Short: "Exports the captured traffic into a TAR file that contains PCAP files",
RunE: func(cmd *cobra.Command, args []string) error {
runExport()
return nil
},
}
func init() {
rootCmd.AddCommand(exportCmd)
defaultTapConfig := configStructs.TapConfig{}
if err := defaults.Set(&defaultTapConfig); err != nil {
log.Debug().Err(err).Send()
}
exportCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark")
exportCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark")
exportCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}
func runExport() {
hubUrl := kubernetes.GetHubUrl()
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
dstPath, err := filepath.Abs(fmt.Sprintf("./%d.tar.gz", time.Now().Unix()))
if err != nil {
panic(err)
}
out, err := os.Create(dstPath)
if err != nil {
panic(err)
}
defer out.Close()
connector := connect.NewConnector(kubernetes.GetHubUrl(), connect.DefaultRetries, connect.DefaultTimeout)
connector.PostPcapsMerge(out)
}

21
cmd/license.go Normal file
View File

@@ -0,0 +1,21 @@
package cmd
import (
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/spf13/cobra"
)
var licenseCmd = &cobra.Command{
Use: "license",
Short: "Print the license loaded string",
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println(config.Config.License)
return nil
},
}
func init() {
rootCmd.AddCommand(licenseCmd)
}

View File

@@ -18,7 +18,7 @@ var logsCmd = &cobra.Command{
Use: "logs",
Short: "Create a ZIP file with logs for GitHub issues or troubleshooting",
RunE: func(cmd *cobra.Command, args []string) error {
kubernetesProvider, err := getKubernetesProviderForCli()
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return nil
}

View File

@@ -1,25 +0,0 @@
# This example shows permissions that enrich the logs with additional info
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-clusterrole
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-runner-debug-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,37 +0,0 @@
# This example shows permissions that are required for Kubeshark to resolve IPs to service names
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-clusterrole
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "create"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterrolebindings"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["", "apps", "extensions"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["services"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["endpoints"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-resolver-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,40 +0,0 @@
# This example shows the permissions that are required in order to run the `kubeshark tap` command
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-clusterrole
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "watch", "create"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "create"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["create", "patch"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["services/proxy"]
verbs: ["get", "create"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-runner-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,25 +0,0 @@
# This example shows permissions that enrich the logs with additional info in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-role
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-runner-debug-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,37 +0,0 @@
# This example shows permissions that are required for Kubeshark to resolve IPs to service names in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-role
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["rolebindings"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["", "apps", "extensions"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["services"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["endpoints"]
verbs: ["get", "list", "watch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-resolver-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,37 +0,0 @@
# This example shows the permissions that are required in order to run the `kubeshark tap` command in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "watch", "create"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "create", "delete"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["create", "patch", "delete"]
- apiGroups: [""]
resources: ["services/proxy"]
verbs: ["get", "create", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "delete"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-runner-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -2,7 +2,7 @@ package cmd
import (
"fmt"
"io/ioutil"
"io"
"net/http"
"os"
"time"
@@ -28,7 +28,7 @@ var proCmd = &cobra.Command{
}
const (
PRO_URL = "https://console.kubeshark.co"
PRO_URL = "https://console.kubeshark.co/cli"
PRO_PORT = 5252
)
@@ -40,19 +40,20 @@ func init() {
log.Debug().Err(err).Send()
}
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
proCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
proCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark")
proCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark")
proCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}
func acquireLicense() {
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetHubUrl()
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetHubUrl(), connect.DefaultRetries, connect.DefaultTimeout)
log.Info().Str("url", PRO_URL).Msg("Opening in the browser:")
utils.OpenBrowser(PRO_URL)
@@ -61,17 +62,31 @@ func acquireLicense() {
}
func updateLicense(licenseKey string) {
log.Info().Str("key", licenseKey).Msg("Received license:")
config.Config.License = licenseKey
err := config.WriteConfig(&config.Config)
if err != nil {
log.Error().Err(err).Send()
}
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
log.Error().Err(err).Send()
return
}
updated, err := kubernetes.SetSecret(kubernetesProvider, kubernetes.SECRET_LICENSE, config.Config.License)
if err != nil {
log.Error().Err(err).Send()
}
if updated {
log.Info().Msg("Updated the license, exiting...")
} else {
log.Info().Msg("Exiting...")
}
go func() {
connector.PostLicense(config.Config.License)
log.Info().Msg("Updated the license. Exiting.")
time.Sleep(2 * time.Second)
os.Exit(0)
}()
@@ -96,7 +111,7 @@ func runLicenseRecieverServer() {
})
ginApp.POST("/", func(c *gin.Context) {
data, err := ioutil.ReadAll(c.Request.Body)
data, err := io.ReadAll(c.Request.Body)
if err != nil {
log.Error().Err(err).Send()
c.AbortWithStatus(http.StatusBadRequest)
@@ -105,14 +120,12 @@ func runLicenseRecieverServer() {
licenseKey := string(data)
log.Info().Str("key", licenseKey).Msg("Received license:")
updateLicense(licenseKey)
})
go func() {
if err := ginApp.Run(fmt.Sprintf(":%d", PRO_PORT)); err != nil {
panic(err)
log.Error().Err(err).Send()
}
}()

View File

@@ -24,7 +24,7 @@ func init() {
log.Debug().Err(err).Send()
}
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the proxy/port-forward")
proxyCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
proxyCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}

View File

@@ -15,7 +15,7 @@ import (
)
func runProxy(block bool, noBrowser bool) {
kubernetesProvider, err := getKubernetesProviderForCli()
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return
}
@@ -23,7 +23,7 @@ func runProxy(block bool, noBrowser bool) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.FrontServiceName)
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.Release.Namespace, kubernetes.FrontServiceName)
if err != nil {
log.Error().
Str("service", kubernetes.FrontServiceName).
@@ -42,7 +42,7 @@ func runProxy(block bool, noBrowser bool) {
return
}
exists, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.SelfNamespace, kubernetes.HubServiceName)
exists, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.Tap.Release.Namespace, kubernetes.HubServiceName)
if err != nil {
log.Error().
Str("service", kubernetes.HubServiceName).
@@ -63,42 +63,12 @@ func runProxy(block bool, noBrowser bool) {
var establishedProxy bool
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.HubServiceName).
Int("port", int(config.Config.Tap.Proxy.Hub.SrcPort)).
Msg("Found a running service.")
okToOpen("Hub", hubUrl, true)
} else {
startProxyReportErrorIfAny(
kubernetesProvider,
ctx,
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
"/echo",
)
connector := connect.NewConnector(hubUrl, connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection("/echo"); err != nil {
log.Error().Msg(fmt.Sprintf(utils.Red, "Couldn't connect to Hub."))
return
}
establishedProxy = true
okToOpen("Hub", hubUrl, true)
}
frontUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
response, err = http.Get(fmt.Sprintf("%s/", frontUrl))
frontUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
response, err := http.Get(fmt.Sprintf("%s/", frontUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.FrontServiceName).
Int("port", int(config.Config.Tap.Proxy.Front.SrcPort)).
Int("port", int(config.Config.Tap.Proxy.Front.Port)).
Msg("Found a running service.")
okToOpen("Kubeshark", frontUrl, noBrowser)
@@ -109,8 +79,8 @@ func runProxy(block bool, noBrowser bool) {
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
connector := connect.NewConnector(frontUrl, connect.DefaultRetries, connect.DefaultTimeout)

View File

@@ -12,10 +12,10 @@ import (
var rootCmd = &cobra.Command{
Use: "kubeshark",
Short: fmt.Sprintf("%s: The API Traffic Analyzer for Kubernetes", misc.Software),
Long: fmt.Sprintf(`%s: The API Traffic Analyzer for Kubernetes
Short: fmt.Sprintf("%s: %s", misc.Software, misc.Description),
Long: fmt.Sprintf(`%s: %s
An extensible Kubernetes-aware network sniffer and kernel tracer.
For more info: %s`, misc.Software, misc.Website),
For more info: %s`, misc.Software, misc.Description, misc.Website),
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := config.InitConfig(cmd); err != nil {
log.Fatal().Err(err).Send()

View File

@@ -34,8 +34,9 @@ func init() {
log.Debug().Err(err).Send()
}
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
scriptsCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark")
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark")
scriptsCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}
func runScripts() {
@@ -44,14 +45,14 @@ func runScripts() {
return
}
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetHubUrl()
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetHubUrl(), connect.DefaultRetries, connect.DefaultTimeout)
watchScripts(true)
}

View File

@@ -2,13 +2,11 @@ package cmd
import (
"errors"
"fmt"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/errormessage"
"github.com/kubeshark/kubeshark/misc"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
@@ -47,16 +45,19 @@ func init() {
tapCmd.Flags().StringP(configStructs.DockerTagLabel, "t", defaultTapConfig.Docker.Tag, "The tag of the Docker images that are going to be pulled")
tapCmd.Flags().String(configStructs.DockerImagePullPolicy, defaultTapConfig.Docker.ImagePullPolicy, "ImagePullPolicy for the Docker images")
tapCmd.Flags().StringSlice(configStructs.DockerImagePullSecrets, defaultTapConfig.Docker.ImagePullSecrets, "ImagePullSecrets for the Docker images")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the proxy/port-forward")
tapCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
tapCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultTapConfig.Namespaces, "Namespaces selector")
tapCmd.Flags().BoolP(configStructs.AllNamespacesLabel, "A", defaultTapConfig.AllNamespaces, "Tap all namespaces")
tapCmd.Flags().StringP(configStructs.SelfNamespaceLabel, "s", defaultTapConfig.SelfNamespace, "Self-namespace of Kubeshark")
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit. (per node)")
tapCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
tapCmd.Flags().Bool(configStructs.PersistentStorageLabel, defaultTapConfig.PersistentStorage, "Enable persistent storage (PersistentVolumeClaim)")
tapCmd.Flags().Bool(configStructs.PersistentStorageStaticLabel, defaultTapConfig.PersistentStorageStatic, "Persistent storage static provision")
tapCmd.Flags().String(configStructs.EfsFileSytemIdAndPathLabel, defaultTapConfig.EfsFileSytemIdAndPath, "EFS file system ID")
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit (per node)")
tapCmd.Flags().String(configStructs.StorageClassLabel, defaultTapConfig.StorageClass, "Override the default storage class of the PersistentVolumeClaim (per node)")
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
tapCmd.Flags().StringP(configStructs.PcapLabel, "p", defaultTapConfig.Pcap, fmt.Sprintf("Capture from a PCAP snapshot of %s (.tar.gz) using your Docker Daemon instead of Kubernetes", misc.Software))
tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS")
tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries")
tapCmd.Flags().Bool(configStructs.DebugLabel, defaultTapConfig.Debug, "Enable the debug mode")
tapCmd.Flags().Bool(configStructs.IgnoreTaintedLabel, defaultTapConfig.IgnoreTainted, "Ignore tainted pods while running Worker DaemonSet")
tapCmd.Flags().Bool(configStructs.IngressEnabledLabel, defaultTapConfig.Ingress.Enabled, "Enable Ingress")
tapCmd.Flags().Bool(configStructs.TelemetryEnabledLabel, defaultTapConfig.Telemetry.Enabled, "Enable/disable Telemetry")
}

View File

@@ -1,353 +0,0 @@
package cmd
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"os"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
v1 "k8s.io/api/core/v1"
)
func logPullingImage(image string, reader io.ReadCloser) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
text := scanner.Text()
var data map[string]interface{}
if err := json.Unmarshal([]byte(text), &data); err != nil {
log.Error().Err(err).Send()
continue
}
var id string
if val, ok := data["id"]; ok {
id = val.(string)
}
var status string
if val, ok := data["status"]; ok {
status = val.(string)
}
var progress string
if val, ok := data["progress"]; ok {
progress = val.(string)
}
e := log.Info()
if image != "" {
e = e.Str("image", image)
}
if progress != "" {
e = e.Str("progress", progress)
}
e.Msg(fmt.Sprintf("[%-12s] %-18s", id, status))
}
}
func pullImages(ctx context.Context, cli *client.Client, imageFront string, imageHub string, imageWorker string) error {
readerFront, err := cli.ImagePull(ctx, imageFront, types.ImagePullOptions{})
if err != nil {
return err
}
defer readerFront.Close()
logPullingImage(imageFront, readerFront)
readerHub, err := cli.ImagePull(ctx, imageHub, types.ImagePullOptions{})
if err != nil {
return err
}
defer readerHub.Close()
logPullingImage(imageHub, readerHub)
readerWorker, err := cli.ImagePull(ctx, imageWorker, types.ImagePullOptions{})
if err != nil {
return err
}
defer readerWorker.Close()
logPullingImage(imageWorker, readerWorker)
return nil
}
func cleanUpOldContainers(
ctx context.Context,
cli *client.Client,
nameFront string,
nameHub string,
nameWorker string,
) error {
containers, err := cli.ContainerList(ctx, types.ContainerListOptions{})
if err != nil {
return err
}
for _, container := range containers {
f := fmt.Sprintf("/%s", nameFront)
h := fmt.Sprintf("/%s", nameHub)
w := fmt.Sprintf("/%s", nameWorker)
if utils.Contains(container.Names, f) || utils.Contains(container.Names, h) || utils.Contains(container.Names, w) {
err = cli.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true})
if err != nil {
return err
}
}
}
return nil
}
func createAndStartContainers(
ctx context.Context,
cli *client.Client,
imageFront string,
imageHub string,
imageWorker string,
tarReader io.Reader,
) (
respFront container.ContainerCreateCreatedBody,
respHub container.ContainerCreateCreatedBody,
respWorker container.ContainerCreateCreatedBody,
workerIPAddr string,
err error,
) {
log.Info().Msg("Creating containers...")
nameFront := fmt.Sprintf("%s-front", misc.Program)
nameHub := fmt.Sprintf("%s-hub", misc.Program)
nameWorker := fmt.Sprintf("%s-worker", misc.Program)
err = cleanUpOldContainers(ctx, cli, nameFront, nameHub, nameWorker)
if err != nil {
return
}
hostIP := "0.0.0.0"
hostConfigFront := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Front.DstPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.SrcPort),
},
},
},
}
respFront, err = cli.ContainerCreate(ctx, &container.Config{
Image: imageFront,
Tty: false,
Env: []string{
"REACT_APP_DEFAULT_FILTER= ",
"REACT_APP_HUB_HOST= ",
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.SrcPort),
},
}, hostConfigFront, nil, nil, nameFront)
if err != nil {
return
}
hostConfigHub := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort),
},
},
},
}
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.DstPort)}
if config.DebugMode {
cmdHub = append(cmdHub, fmt.Sprintf("-%s", config.DebugFlag))
}
respHub, err = cli.ContainerCreate(ctx, &container.Config{
Image: imageHub,
Cmd: cmdHub,
Tty: false,
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): {}},
}, hostConfigHub, nil, nil, nameHub)
if err != nil {
return
}
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.DstPort)}
if config.DebugMode {
cmdWorker = append(cmdWorker, fmt.Sprintf("-%s", config.DebugFlag))
}
respWorker, err = cli.ContainerCreate(ctx, &container.Config{
Image: imageWorker,
Cmd: cmdWorker,
Tty: false,
}, nil, nil, nil, nameWorker)
if err != nil {
return
}
if err = cli.CopyToContainer(ctx, respWorker.ID, "/app/import", tarReader, types.CopyToContainerOptions{}); err != nil {
return
}
log.Info().Msg("Starting containers...")
if err = cli.ContainerStart(ctx, respFront.ID, types.ContainerStartOptions{}); err != nil {
return
}
if err = cli.ContainerStart(ctx, respHub.ID, types.ContainerStartOptions{}); err != nil {
return
}
if err = cli.ContainerStart(ctx, respWorker.ID, types.ContainerStartOptions{}); err != nil {
return
}
var containerWorker types.ContainerJSON
containerWorker, err = cli.ContainerInspect(ctx, respWorker.ID)
if err != nil {
return
}
workerIPAddr = containerWorker.NetworkSettings.IPAddress
return
}
func stopAndRemoveContainers(
ctx context.Context,
cli *client.Client,
respFront container.ContainerCreateCreatedBody,
respHub container.ContainerCreateCreatedBody,
respWorker container.ContainerCreateCreatedBody,
) (err error) {
log.Warn().Msg("Stopping containers...")
err = cli.ContainerStop(ctx, respFront.ID, nil)
if err != nil {
return
}
err = cli.ContainerStop(ctx, respHub.ID, nil)
if err != nil {
return
}
err = cli.ContainerStop(ctx, respWorker.ID, nil)
if err != nil {
return
}
log.Warn().Msg("Removing containers...")
err = cli.ContainerRemove(ctx, respFront.ID, types.ContainerRemoveOptions{})
if err != nil {
return
}
err = cli.ContainerRemove(ctx, respHub.ID, types.ContainerRemoveOptions{})
if err != nil {
return
}
err = cli.ContainerRemove(ctx, respWorker.ID, types.ContainerRemoveOptions{})
if err != nil {
return
}
return
}
func pcap(tarPath string) {
docker.SetRegistry(config.Config.Tap.Docker.Registry)
docker.SetTag(config.Config.Tap.Docker.Tag)
ctx := context.Background()
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
log.Error().Err(err).Send()
return
}
defer cli.Close()
imageFront := docker.GetFrontImage()
imageHub := docker.GetHubImage()
imageWorker := docker.GetWorkerImage()
err = pullImages(ctx, cli, imageFront, imageHub, imageWorker)
if err != nil {
log.Error().Err(err).Send()
return
}
tarFile, err := os.Open(tarPath)
if err != nil {
log.Error().Err(err).Send()
return
}
defer tarFile.Close()
tarReader := bufio.NewReader(tarFile)
respFront, respHub, respWorker, workerIPAddr, err := createAndStartContainers(
ctx,
cli,
imageFront,
imageHub,
imageWorker,
tarReader,
)
if err != nil {
log.Error().Err(err).Send()
return
}
workerPod := &v1.Pod{
Spec: v1.PodSpec{
NodeName: "docker",
},
Status: v1.PodStatus{
PodIP: workerIPAddr,
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{
{
Ready: true,
},
},
},
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector.PostWorkerPodToHub(workerPod)
log.Info().
Str("url", kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)).
Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {
utils.OpenBrowser(url)
}
ctxC, cancel := context.WithCancel(context.Background())
defer cancel()
utils.WaitForTermination(ctxC, cancel)
err = stopAndRemoveContainers(ctx, cli, respFront, respHub, respWorker)
if err != nil {
log.Error().Err(err).Send()
}
}

View File

@@ -2,21 +2,20 @@ package cmd
import (
"context"
"errors"
"encoding/json"
"fmt"
"os"
"regexp"
"strings"
"sync"
"time"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes/helm"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/resources"
"github.com/kubeshark/kubeshark/utils"
core "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
@@ -28,9 +27,8 @@ import (
const cleanupTimeout = time.Minute
type tapState struct {
startTime time.Time
targetNamespaces []string
selfServiceAccountExists bool
startTime time.Time
targetNamespaces []string
}
var state tapState
@@ -48,36 +46,29 @@ var ready *Readiness
func tap() {
ready = &Readiness{}
state.startTime = time.Now()
docker.SetRegistry(config.Config.Tap.Docker.Registry)
docker.SetTag(config.Config.Tap.Docker.Tag)
log.Info().Str("registry", docker.GetRegistry()).Str("tag", docker.GetTag()).Msg("Using Docker:")
if config.Config.Tap.Pcap != "" {
pcap(config.Config.Tap.Pcap)
return
}
log.Info().Str("registry", config.Config.Tap.Docker.Registry).Str("tag", config.Config.Tap.Docker.Tag).Msg("Using Docker:")
log.Info().
Str("limit", config.Config.Tap.StorageLimit).
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP streams will be removed once the limit is reached.", misc.Software))
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP/UDP streams will be removed once the limit is reached.", misc.Software))
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetHubUrl(), connect.DefaultRetries, connect.DefaultTimeout)
kubernetesProvider, err := getKubernetesProviderForCli()
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
log.Error().Err(err).Send()
return
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel will be called when this function exits
state.targetNamespaces = getNamespaces(kubernetesProvider)
state.targetNamespaces = kubernetesProvider.GetNamespaces()
if config.Config.IsNsRestrictedMode() {
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.Tap.SelfNamespace) {
log.Error().Msg(fmt.Sprintf("%s can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", misc.Software, configStructs.NamespacesLabel, configStructs.SelfNamespaceLabel))
return
}
}
log.Info().
Bool("enabled", config.Config.Tap.Telemetry.Enabled).
Str("notice", "Telemetry can be disabled by setting the flag: --telemetry-enabled=false").
Msg("Telemetry")
log.Info().Strs("namespaces", state.targetNamespaces).Msg("Targeting pods in:")
@@ -90,30 +81,37 @@ func tap() {
}
log.Info().Msg(fmt.Sprintf("Waiting for the creation of %s resources...", misc.Software))
if state.selfServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, config.Config.Tap.Resources.Hub, config.Config.ImagePullPolicy(), config.Config.ImagePullSecrets(), config.Config.Tap.Debug); err != nil {
var statusError *k8serrors.StatusError
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
log.Info().Msg(fmt.Sprintf("%s is already running in this namespace, change the `selfnamespace` configuration or run `%s clean` to remove the currently running %s instance.", misc.Software, misc.Program, misc.Software))
postHubStarted(ctx, kubernetesProvider, cancel, true)
log.Info().Msg("Updated Hub about the changes in the config. Exiting.")
printProxyCommandSuggestion()
} else {
defer resources.CleanUpSelfResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace)
log.Error().Err(errormessage.FormatError(err)).Msg("Error creating resources!")
}
return
rel, err := helm.NewHelm(
config.Config.Tap.Release.Repo,
config.Config.Tap.Release.Name,
config.Config.Tap.Release.Namespace,
).Install()
if err != nil {
if err.Error() != "cannot re-use a name that is still in use" {
log.Error().Err(err).Send()
os.Exit(1)
}
log.Info().Msg("Found an existing installation, skipping Helm install...")
updateConfig(kubernetesProvider)
postFrontStarted(ctx, kubernetesProvider, cancel)
} else {
log.Info().Msgf("Installed the Helm release: %s", rel.Name)
go watchHubEvents(ctx, kubernetesProvider, cancel)
go watchHubPod(ctx, kubernetesProvider, cancel)
go watchFrontPod(ctx, kubernetesProvider, cancel)
}
defer finishTapExecution(kubernetesProvider)
go watchHubEvents(ctx, kubernetesProvider, cancel)
go watchHubPod(ctx, kubernetesProvider, cancel)
go watchFrontPod(ctx, kubernetesProvider, cancel)
// block until exit signal or error
utils.WaitForTermination(ctx, cancel)
printProxyCommandSuggestion()
if !config.Config.Tap.Ingress.Enabled {
printProxyCommandSuggestion()
}
}
func printProxyCommandSuggestion() {
@@ -123,7 +121,7 @@ func printProxyCommandSuggestion() {
}
func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.Tap.SelfNamespace, true)
finishSelfExecution(kubernetesProvider)
}
/*
@@ -154,9 +152,9 @@ func printNoPodsFoundSuggestion(targetNamespaces []string) {
}
func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.HubPodName))
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.HubPodName))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.SelfNamespace}, podWatchHelper)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.Release.Namespace}, podWatchHelper)
isPodReady := false
timeAfter := time.After(120 * time.Second)
@@ -195,7 +193,6 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
ready.Lock()
ready.Hub = true
ready.Unlock()
postHubStarted(ctx, kubernetesProvider, cancel, false)
}
ready.Lock()
@@ -223,7 +220,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
log.Error().
Str("pod", kubernetes.HubPodName).
Str("namespace", config.Config.Tap.SelfNamespace).
Str("namespace", config.Config.Tap.Release.Namespace).
Err(err).
Msg("Failed creating pod.")
cancel()
@@ -245,9 +242,9 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
}
func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.FrontPodName))
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.FrontPodName))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.SelfNamespace}, podWatchHelper)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.Tap.Release.Namespace}, podWatchHelper)
isPodReady := false
timeAfter := time.After(120 * time.Second)
@@ -312,10 +309,9 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
log.Error().
Str("pod", kubernetes.FrontPodName).
Str("namespace", config.Config.Tap.SelfNamespace).
Str("namespace", config.Config.Tap.Release.Namespace).
Err(err).
Msg("Failed creating pod.")
cancel()
case <-timeAfter:
if !isPodReady {
@@ -336,7 +332,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.HubPodName))
eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, podExactRegex, "pod")
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.Tap.SelfNamespace}, eventWatchHelper)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.Tap.Release.Namespace}, eventWatchHelper)
for {
select {
case wEvent, ok := <-eventChan:
@@ -402,80 +398,6 @@ func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider
}
}
func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, update bool) {
startProxyReportErrorIfAny(
kubernetesProvider,
ctx,
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
"/echo",
)
if !update {
// Create workers
err := kubernetes.CreateWorkers(
kubernetesProvider,
state.selfServiceAccountExists,
ctx,
config.Config.Tap.SelfNamespace,
config.Config.Tap.Resources.Worker,
config.Config.ImagePullPolicy(),
config.Config.ImagePullSecrets(),
config.Config.Tap.ServiceMesh,
config.Config.Tap.Tls,
config.Config.Tap.Debug,
)
if err != nil {
log.Error().Err(err).Send()
}
// Grace period
log.Info().Msg("Waiting for worker containers...")
time.Sleep(5 * time.Second)
}
// Storage limit
connector.PostStorageLimitToHub(config.Config.Tap.StorageLimitBytes())
// Pod regex
connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces)
// License
if config.Config.License != "" {
connector.PostLicense(config.Config.License)
}
// Scripting
connector.PostEnv(config.Config.Scripting.Env)
scripts, err := config.Config.Scripting.GetScripts()
if err != nil {
log.Error().Err(err).Send()
}
for _, script := range scripts {
_, err = connector.PostScript(script)
if err != nil {
log.Error().Err(err).Send()
}
}
connector.PostScriptDone()
if !update {
// Hub proxy URL
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
}
if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts {
watchScripts(false)
}
}
func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
startProxyReportErrorIfAny(
kubernetesProvider,
@@ -483,29 +405,61 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
var url string
if config.Config.Tap.Ingress.Enabled {
url = fmt.Sprintf("http://%s", config.Config.Tap.Ingress.Host)
} else {
url = kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
}
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {
utils.OpenBrowser(url)
}
}
func getNamespaces(kubernetesProvider *kubernetes.Provider) []string {
if config.Config.Tap.AllNamespaces {
return []string{kubernetes.K8sAllNamespaces}
} else if len(config.Config.Tap.Namespaces) > 0 {
return utils.Unique(config.Config.Tap.Namespaces)
} else {
currentNamespace, err := kubernetesProvider.CurrentNamespace()
if err != nil {
log.Fatal().Err(err).Msg("Error getting current namespace!")
}
return []string{currentNamespace}
for !ready.Hub {
time.Sleep(100 * time.Millisecond)
}
if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts {
watchScripts(false)
}
}
func updateConfig(kubernetesProvider *kubernetes.Provider) {
_, _ = kubernetes.SetSecret(kubernetesProvider, kubernetes.SECRET_LICENSE, config.Config.License)
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_POD_REGEX, config.Config.Tap.PodRegexStr)
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_NAMESPACES, strings.Join(config.Config.Tap.Namespaces, ","))
data, err := json.Marshal(config.Config.Scripting.Env)
if err != nil {
log.Error().Str("config", kubernetes.CONFIG_SCRIPTING_ENV).Err(err).Send()
return
} else {
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_SCRIPTING_ENV, string(data))
}
ingressEnabled := ""
if config.Config.Tap.Ingress.Enabled {
ingressEnabled = "true"
}
authEnabled := ""
if config.Config.Tap.Auth.Enabled {
authEnabled = "true"
}
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_INGRESS_ENABLED, ingressEnabled)
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_INGRESS_HOST, config.Config.Tap.Ingress.Host)
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_PROXY_FRONT_PORT, fmt.Sprint(config.Config.Tap.Proxy.Front.Port))
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_AUTH_ENABLED, authEnabled)
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_AUTH_TYPE, config.Config.Tap.Auth.Type)
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_AUTH_SAML_IDP_METADATA_URL, config.Config.Tap.Auth.Saml.IdpMetadataUrl)
}

View File

@@ -12,6 +12,7 @@ import (
"strings"
"github.com/creasty/defaults"
"github.com/goccy/go-yaml"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/misc/version"
"github.com/kubeshark/kubeshark/utils"
@@ -19,7 +20,6 @@ import (
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"gopkg.in/yaml.v3"
)
const (
@@ -52,13 +52,25 @@ func InitConfig(cmd *cobra.Command) error {
return nil
}
if cmd.Use != "console" && cmd.Use != "pro" {
if !utils.Contains([]string{
"console",
"pro",
"manifests",
"license",
}, cmd.Use) {
go version.CheckNewerVersion()
}
Config = CreateDefaultConfig()
Config.Tap.Debug = DebugMode
cmdName = cmd.Name()
if utils.Contains([]string{"clean", "console", "pro", "proxy", "scripts"}, cmdName) {
if utils.Contains([]string{
"clean",
"console",
"pro",
"proxy",
"scripts",
}, cmdName) {
cmdName = "tap"
}
@@ -67,7 +79,10 @@ func InitConfig(cmd *cobra.Command) error {
}
ConfigFilePath = path.Join(misc.GetDotFolderPath(), "config.yaml")
if err := loadConfigFile(&Config); err != nil {
if err := loadConfigFile(&Config, utils.Contains([]string{
"manifests",
"license",
}, cmd.Use)); err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("invalid config, %w\n"+
"you can regenerate the file by removing it (%v) and using `kubeshark config -r`", err, ConfigFilePath)
@@ -115,7 +130,7 @@ func WriteConfig(config *ConfigStruct) error {
return nil
}
func loadConfigFile(config *ConfigStruct) error {
func loadConfigFile(config *ConfigStruct, silent bool) error {
cwd, err := os.Getwd()
if err != nil {
return err
@@ -141,7 +156,9 @@ func loadConfigFile(config *ConfigStruct) error {
return err
}
log.Info().Str("path", ConfigFilePath).Msg("Found config file!")
if !silent {
log.Info().Str("path", ConfigFilePath).Msg("Found config file!")
}
return nil
}

View File

@@ -5,34 +5,98 @@ import (
"path/filepath"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/misc"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/util/homedir"
)
const (
KubeConfigPathConfigName = "kube-configpath"
KubeConfigPathConfigName = "kube-configPath"
)
func CreateDefaultConfig() ConfigStruct {
return ConfigStruct{}
return ConfigStruct{
Tap: configStructs.TapConfig{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "kubernetes.io/os",
Operator: v1.NodeSelectorOpIn,
Values: []string{"linux"},
},
},
},
},
Capabilities: configStructs.CapabilitiesConfig{
NetworkCapture: []string{
// NET_RAW is required to listen the network traffic
"NET_RAW",
// NET_ADMIN is required to listen the network traffic
"NET_ADMIN",
},
ServiceMeshCapture: []string{
// SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
"SYS_ADMIN",
// SYS_PTRACE is required to set netns to other process + to open libssl.so of other process
"SYS_PTRACE",
// DAC_OVERRIDE is required to read /proc/PID/environ
"DAC_OVERRIDE",
},
KernelModule: []string{
// SYS_MODULE is required to install kernel modules
"SYS_MODULE",
},
EBPFCapture: []string{
// SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
"SYS_ADMIN",
// SYS_PTRACE is required to set netns to other process + to open libssl.so of other process
"SYS_PTRACE",
// SYS_RESOURCE is required to change rlimits for eBPF
"SYS_RESOURCE",
// IPC_LOCK is required for ebpf perf buffers allocations after some amount of size buffer size:
// https://github.com/kubeshark/tracer/blob/13e24725ba8b98216dd0e553262e6d9c56dce5fa/main.go#L82)
"IPC_LOCK",
},
},
Auth: configStructs.AuthConfig{
Saml: configStructs.SamlConfig{
RoleAttribute: "role",
Roles: map[string]configStructs.Role{
"admin": {
Filter: "",
CanReplayTraffic: true,
CanDownloadPCAP: true,
CanUseScripting: true,
CanUpdateTargetedPods: true,
ShowAdminConsoleLink: true,
},
},
},
},
},
}
}
type KubeConfig struct {
ConfigPathStr string `yaml:"configpath"`
Context string `yaml:"context"`
ConfigPathStr string `yaml:"configPath" json:"configPath"`
Context string `yaml:"context" json:"context"`
}
type ManifestsConfig struct {
Dump bool `yaml:"dump" json:"dump"`
}
type ConfigStruct struct {
Tap configStructs.TapConfig `yaml:"tap"`
Logs configStructs.LogsConfig `yaml:"logs"`
Config configStructs.ConfigConfig `yaml:"config,omitempty"`
Kube KubeConfig `yaml:"kube"`
DumpLogs bool `yaml:"dumplogs" default:"false"`
HeadlessMode bool `yaml:"headless" default:"false"`
License string `yaml:"license" default:""`
Scripting configStructs.ScriptingConfig `yaml:"scripting"`
ResourceLabels map[string]string `yaml:"resourceLabels" default:"{}"`
Tap configStructs.TapConfig `yaml:"tap" json:"tap"`
Logs configStructs.LogsConfig `yaml:"logs" json:"logs"`
Config configStructs.ConfigConfig `yaml:"config,omitempty" json:"config,omitempty"`
Kube KubeConfig `yaml:"kube" json:"kube"`
DumpLogs bool `yaml:"dumpLogs" json:"dumpLogs" default:"false"`
HeadlessMode bool `yaml:"headless" json:"headless" default:"false"`
License string `yaml:"license" json:"license" default:""`
Scripting configStructs.ScriptingConfig `yaml:"scripting" json:"scripting"`
Manifests ManifestsConfig `yaml:"manifests,omitempty" json:"manifests,omitempty"`
Timezone string `yaml:"timezone" json:"timezone"`
}
func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy {
@@ -48,10 +112,6 @@ func (config *ConfigStruct) ImagePullSecrets() []v1.LocalObjectReference {
return ref
}
func (config *ConfigStruct) IsNsRestrictedMode() bool {
return config.Tap.SelfNamespace != misc.Program // Notice "kubeshark" string must match the default SelfNamespace
}
func (config *ConfigStruct) KubeConfigPath() string {
if config.Kube.ConfigPathStr != "" {
return config.Kube.ConfigPathStr

View File

@@ -5,5 +5,5 @@ const (
)
type ConfigConfig struct {
Regenerate bool `yaml:"regenerate,omitempty" default:"false" readonly:""`
Regenerate bool `yaml:"regenerate,omitempty" json:"regenerate,omitempty" default:"false" readonly:""`
}

View File

@@ -13,7 +13,7 @@ const (
)
type LogsConfig struct {
FileStr string `yaml:"file"`
FileStr string `yaml:"file" json:"file"`
}
func (config *LogsConfig) Validate() error {

View File

@@ -2,7 +2,7 @@ package configStructs
import (
"io/fs"
"io/ioutil"
"os"
"path/filepath"
"github.com/kubeshark/kubeshark/misc"
@@ -10,9 +10,9 @@ import (
)
type ScriptingConfig struct {
Env map[string]interface{} `yaml:"env"`
Source string `yaml:"source" default:""`
WatchScripts bool `yaml:"watchScripts" default:"true"`
Env map[string]interface{} `yaml:"env" json:"env" default:"{}"`
Source string `yaml:"source" json:"source" default:""`
WatchScripts bool `yaml:"watchScripts" json:"watchScripts" default:"true"`
}
func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error) {
@@ -20,8 +20,8 @@ func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error)
return
}
var files []fs.FileInfo
files, err = ioutil.ReadDir(config.Source)
var files []fs.DirEntry
files, err = os.ReadDir(config.Source)
if err != nil {
return
}

View File

@@ -4,85 +4,184 @@ import (
"fmt"
"regexp"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
v1 "k8s.io/api/core/v1"
networking "k8s.io/api/networking/v1"
)
const (
DockerRegistryLabel = "docker-registry"
DockerTagLabel = "docker-tag"
DockerImagePullPolicy = "docker-imagepullpolicy"
DockerImagePullSecrets = "docker-imagepullsecrets"
ProxyFrontPortLabel = "proxy-front-port"
ProxyHubPortLabel = "proxy-hub-port"
ProxyHostLabel = "proxy-host"
NamespacesLabel = "namespaces"
AllNamespacesLabel = "allnamespaces"
SelfNamespaceLabel = "selfnamespace"
StorageLimitLabel = "storagelimit"
DryRunLabel = "dryrun"
PcapLabel = "pcap"
ServiceMeshLabel = "servicemesh"
TlsLabel = "tls"
DebugLabel = "debug"
DockerRegistryLabel = "docker-registry"
DockerTagLabel = "docker-tag"
DockerImagePullPolicy = "docker-imagePullPolicy"
DockerImagePullSecrets = "docker-imagePullSecrets"
ProxyFrontPortLabel = "proxy-front-port"
ProxyHubPortLabel = "proxy-hub-port"
ProxyHostLabel = "proxy-host"
NamespacesLabel = "namespaces"
ReleaseNamespaceLabel = "release-namespace"
PersistentStorageLabel = "persistentStorage"
PersistentStorageStaticLabel = "persistentStorageStatic"
EfsFileSytemIdAndPathLabel = "efsFileSytemIdAndPath"
StorageLimitLabel = "storageLimit"
StorageClassLabel = "storageClass"
DryRunLabel = "dryRun"
PcapLabel = "pcap"
ServiceMeshLabel = "serviceMesh"
TlsLabel = "tls"
IgnoreTaintedLabel = "ignoreTainted"
IngressEnabledLabel = "ingress-enabled"
TelemetryEnabledLabel = "telemetry-enabled"
DebugLabel = "debug"
ContainerPort = 80
ContainerPortStr = "80"
)
type Resources struct {
CpuLimit string `yaml:"cpu-limit" default:"750m"`
MemoryLimit string `yaml:"memory-limit" default:"1Gi"`
CpuRequests string `yaml:"cpu-requests" default:"50m"`
MemoryRequests string `yaml:"memory-requests" default:"50Mi"`
type ResourceLimits struct {
CPU string `yaml:"cpu" json:"cpu" default:"750m"`
Memory string `yaml:"memory" json:"memory" default:"1Gi"`
}
type ResourceRequests struct {
CPU string `yaml:"cpu" json:"cpu" default:"50m"`
Memory string `yaml:"memory" json:"memory" default:"50Mi"`
}
type ResourceRequirements struct {
Limits ResourceLimits `yaml:"limits" json:"limits"`
Requests ResourceRequests `yaml:"requests" json:"requests"`
}
type WorkerConfig struct {
SrcPort uint16 `yaml:"port" default:"8897"`
DstPort uint16 `yaml:"srvport" default:"8897"`
SrvPort uint16 `yaml:"srvPort" json:"srvPort" default:"30001"`
}
type HubConfig struct {
SrcPort uint16 `yaml:"port" default:"8898"`
DstPort uint16 `yaml:"srvport" default:"80"`
SrvPort uint16 `yaml:"srvPort" json:"srvPort" default:"8898"`
}
type FrontConfig struct {
SrcPort uint16 `yaml:"port" default:"8899"`
DstPort uint16 `yaml:"srvport" default:"80"`
Port uint16 `yaml:"port" json:"port" default:"8899"`
}
type ProxyConfig struct {
Worker WorkerConfig `yaml:"worker"`
Hub HubConfig `yaml:"hub"`
Front FrontConfig `yaml:"front"`
Host string `yaml:"host" default:"127.0.0.1"`
Worker WorkerConfig `yaml:"worker" json:"worker"`
Hub HubConfig `yaml:"hub" json:"hub"`
Front FrontConfig `yaml:"front" json:"front"`
Host string `yaml:"host" json:"host" default:"127.0.0.1"`
}
type DockerConfig struct {
Registry string `yaml:"registry" default:"docker.io/kubeshark"`
Tag string `yaml:"tag" default:"latest"`
ImagePullPolicy string `yaml:"imagepullpolicy" default:"Always"`
ImagePullSecrets []string `yaml:"imagepullsecrets"`
Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"`
Tag string `yaml:"tag" json:"tag" default:""`
ImagePullPolicy string `yaml:"imagePullPolicy" json:"imagePullPolicy" default:"Always"`
ImagePullSecrets []string `yaml:"imagePullSecrets" json:"imagePullSecrets"`
}
type ResourcesConfig struct {
Worker Resources `yaml:"worker"`
Hub Resources `yaml:"hub"`
Hub ResourceRequirements `yaml:"hub" json:"hub"`
Sniffer ResourceRequirements `yaml:"sniffer" json:"sniffer"`
Tracer ResourceRequirements `yaml:"tracer" json:"tracer"`
}
type Role struct {
Filter string `yaml:"filter" json:"filter" default:""`
CanReplayTraffic bool `yaml:"canReplayTraffic" json:"canReplayTraffic" default:"false"`
CanDownloadPCAP bool `yaml:"canDownloadPCAP" json:"canDownloadPCAP" default:"false"`
CanUseScripting bool `yaml:"canUseScripting" json:"canUseScripting" default:"false"`
CanUpdateTargetedPods bool `yaml:"canUpdateTargetedPods" json:"canUpdateTargetedPods" default:"false"`
ShowAdminConsoleLink bool `yaml:"showAdminConsoleLink" json:"showAdminConsoleLink" default:"false"`
}
type SamlConfig struct {
IdpMetadataUrl string `yaml:"idpMetadataUrl" json:"idpMetadataUrl"`
X509crt string `yaml:"x509crt" json:"x509crt"`
X509key string `yaml:"x509key" json:"x509key"`
RoleAttribute string `yaml:"roleAttribute" json:"roleAttribute"`
Roles map[string]Role `yaml:"roles" json:"roles"`
}
type AuthConfig struct {
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
Type string `yaml:"type" json:"type" default:"saml"`
Saml SamlConfig `yaml:"saml" json:"saml"`
}
type IngressConfig struct {
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
ClassName string `yaml:"className" json:"className" default:""`
Host string `yaml:"host" json:"host" default:"ks.svc.cluster.local"`
TLS []networking.IngressTLS `yaml:"tls" json:"tls" default:"[]"`
Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"`
}
type ReleaseConfig struct {
Repo string `yaml:"repo" json:"repo" default:"https://helm.kubeshark.co"`
Name string `yaml:"name" json:"name" default:"kubeshark"`
Namespace string `yaml:"namespace" json:"namespace" default:"default"`
}
type TelemetryConfig struct {
Enabled bool `yaml:"enabled" json:"enabled" default:"true"`
}
type CapabilitiesConfig struct {
NetworkCapture []string `yaml:"networkCapture" json:"networkCapture" default:"[]"`
ServiceMeshCapture []string `yaml:"serviceMeshCapture" json:"serviceMeshCapture" default:"[]"`
KernelModule []string `yaml:"kernelModule" json:"kernelModule" default:"[]"`
EBPFCapture []string `yaml:"ebpfCapture" json:"ebpfCapture" default:"[]"`
}
type KernelModuleConfig struct {
Enabled bool `yaml:"enabled" json:"enabled" default:"true"`
Image string `yaml:"image" json:"image" default:"kubeshark/pf-ring-module:all"`
UnloadOnDestroy bool `yaml:"unloadOnDestroy" json:"unloadOnDestroy" default:"false"`
}
type MetricsConfig struct {
Port uint16 `yaml:"port" json:"port" default:"49100"`
}
type MiscConfig struct {
JsonTTL string `yaml:"jsonTTL" json:"jsonTTL" default:"5m"`
PcapTTL string `yaml:"pcapTTL" json:"pcapTTL" default:"10s"`
PcapErrorTTL string `yaml:"pcapErrorTTL" json:"pcapErrorTTL" default:"60s"`
}
type TapConfig struct {
Docker DockerConfig `yaml:"docker"`
Proxy ProxyConfig `yaml:"proxy"`
PodRegexStr string `yaml:"regex" default:".*"`
Namespaces []string `yaml:"namespaces"`
AllNamespaces bool `yaml:"allnamespaces" default:"false"`
SelfNamespace string `yaml:"selfnamespace" default:"kubeshark"`
StorageLimit string `yaml:"storagelimit" default:"200MB"`
DryRun bool `yaml:"dryrun" default:"false"`
Pcap string `yaml:"pcap" default:""`
Resources ResourcesConfig `yaml:"resources"`
ServiceMesh bool `yaml:"servicemesh" default:"true"`
Tls bool `yaml:"tls" default:"true"`
PacketCapture string `yaml:"packetcapture" default:"libpcap"`
Debug bool `yaml:"debug" default:"false"`
Docker DockerConfig `yaml:"docker" json:"docker"`
Proxy ProxyConfig `yaml:"proxy" json:"proxy"`
PodRegexStr string `yaml:"regex" json:"regex" default:".*"`
Namespaces []string `yaml:"namespaces" json:"namespaces" default:"[]"`
Release ReleaseConfig `yaml:"release" json:"release"`
PersistentStorage bool `yaml:"persistentStorage" json:"persistentStorage" default:"false"`
PersistentStorageStatic bool `yaml:"persistentStorageStatic" json:"persistentStorageStatic" default:"false"`
EfsFileSytemIdAndPath string `yaml:"efsFileSytemIdAndPath" json:"efsFileSytemIdAndPath" default:""`
StorageLimit string `yaml:"storageLimit" json:"storageLimit" default:"500Mi"`
StorageClass string `yaml:"storageClass" json:"storageClass" default:"standard"`
DryRun bool `yaml:"dryRun" json:"dryRun" default:"false"`
Resources ResourcesConfig `yaml:"resources" json:"resources"`
ServiceMesh bool `yaml:"serviceMesh" json:"serviceMesh" default:"true"`
Tls bool `yaml:"tls" json:"tls" default:"true"`
IgnoreTainted bool `yaml:"ignoreTainted" json:"ignoreTainted" default:"false"`
Labels map[string]string `yaml:"labels" json:"labels" default:"{}"`
Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"`
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeSelectorTerms" json:"nodeSelectorTerms" default:"[]"`
Auth AuthConfig `yaml:"auth" json:"auth"`
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
IPv6 bool `yaml:"ipv6" json:"ipv6" default:"true"`
Debug bool `yaml:"debug" json:"debug" default:"false"`
KernelModule KernelModuleConfig `yaml:"kernelModule" json:"kernelModule"`
Telemetry TelemetryConfig `yaml:"telemetry" json:"telemetry"`
DefaultFilter string `yaml:"defaultFilter" json:"defaultFilter"`
ReplayDisabled bool `yaml:"replayDisabled" json:"replayDisabled" default:"false"`
ScriptingDisabled bool `yaml:"scriptingDisabled" json:"scriptingDisabled" default:"false"`
TargetedPodsUpdateDisabled bool `yaml:"targetedPodsUpdateDisabled" json:"targetedPodsUpdateDisabled" default:"false"`
RecordingDisabled bool `yaml:"recordingDisabled" json:"recordingDisabled" default:"false"`
Capabilities CapabilitiesConfig `yaml:"capabilities" json:"capabilities"`
GlobalFilter string `yaml:"globalFilter" json:"globalFilter"`
Metrics MetricsConfig `yaml:"metrics" json:"metrics"`
TrafficSampleRate int `yaml:"trafficSampleRate" json:"trafficSampleRate" default:"100"`
TcpStreamChannelTimeoutMs int `yaml:"tcpStreamChannelTimeoutMs" json:"tcpStreamChannelTimeoutMs" default:"10000"`
Misc MiscConfig `yaml:"misc" json:"misc"`
}
func (config *TapConfig) PodRegex() *regexp.Regexp {
@@ -90,24 +189,11 @@ func (config *TapConfig) PodRegex() *regexp.Regexp {
return podRegex
}
func (config *TapConfig) StorageLimitBytes() int64 {
storageLimitBytes, err := utils.HumanReadableToBytes(config.StorageLimit)
if err != nil {
log.Fatal().Err(err).Send()
}
return storageLimitBytes
}
func (config *TapConfig) Validate() error {
_, compileErr := regexp.Compile(config.PodRegexStr)
if compileErr != nil {
return fmt.Errorf("%s is not a valid regex %s", config.PodRegexStr, compileErr)
}
_, parseHumanDataSizeErr := utils.HumanReadableToBytes(config.StorageLimit)
if parseHumanDataSizeErr != nil {
return fmt.Errorf("Could not parse --%s value %s", StorageLimitLabel, config.StorageLimit)
}
return nil
}

View File

@@ -1,53 +0,0 @@
package docker
import (
"fmt"
"strings"
)
const (
hub = "hub"
worker = "worker"
front = "front"
)
var (
registry = "docker.io/kubeshark/"
tag = "latest"
)
func GetRegistry() string {
return registry
}
func SetRegistry(value string) {
if strings.HasPrefix(value, "docker.io/kubeshark") {
registry = "docker.io/kubeshark/"
} else {
registry = value
}
}
func GetTag() string {
return tag
}
func SetTag(value string) {
tag = value
}
func getImage(image string) string {
return fmt.Sprintf("%s%s:%s", registry, image, tag)
}
func GetHubImage() string {
return getImage(hub)
}
func GetWorkerImage() string {
return getImage(worker)
}
func GetFrontImage() string {
return getImage(front)
}

View File

@@ -22,9 +22,9 @@ func FormatError(err error) error {
"in the config file or setting the targeted namespace with --%s %s=<NAMEPSACE>",
err,
misc.Software,
configStructs.SelfNamespaceLabel,
configStructs.ReleaseNamespaceLabel,
config.SetCommandName,
configStructs.SelfNamespaceLabel)
configStructs.ReleaseNamespaceLabel)
} else if syntaxError, isSyntaxError := asRegexSyntaxError(err); isSyntaxError {
errorNew = fmt.Errorf("regex %s is invalid: %w", syntaxError.Expr, err)
} else {

188
go.mod
View File

@@ -1,117 +1,165 @@
module github.com/kubeshark/kubeshark
go 1.17
go 1.20
require (
github.com/creasty/defaults v1.5.2
github.com/docker/docker v20.10.22+incompatible
github.com/docker/go-connections v0.4.0
github.com/docker/go-units v0.4.0
github.com/fsnotify/fsnotify v1.5.1
github.com/gin-gonic/gin v1.7.7
github.com/fsnotify/fsnotify v1.6.0
github.com/gin-gonic/gin v1.9.1
github.com/goccy/go-yaml v1.11.2
github.com/google/go-github/v37 v37.0.0
github.com/gorilla/websocket v1.4.2
github.com/kubeshark/base v0.6.3
github.com/pkg/errors v0.9.1
github.com/robertkrimen/otto v0.2.1
github.com/rs/zerolog v1.28.0
github.com/spf13/cobra v1.3.0
github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.23.3
k8s.io/apimachinery v0.23.3
k8s.io/client-go v0.23.3
k8s.io/kubectl v0.23.3
helm.sh/helm/v3 v3.12.0
k8s.io/api v0.28.3
k8s.io/apimachinery v0.28.3
k8s.io/client-go v0.28.3
k8s.io/kubectl v0.28.3
)
require (
cloud.google.com/go/compute v1.2.0 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/BurntSushi/toml v1.2.1 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.2.0 // indirect
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
github.com/Masterminds/squirrel v1.5.3 // indirect
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bytedance/sonic v1.9.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/containerd/containerd v1.7.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.0+incompatible // indirect
github.com/docker/cli v20.10.21+incompatible // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect
github.com/docker/docker v20.10.24+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/swag v0.21.1 // indirect
github.com/go-playground/locales v0.13.0 // indirect
github.com/go-playground/universal-translator v0.17.0 // indirect
github.com/go-playground/validator/v10 v10.4.1 // indirect
github.com/go-gorp/gorp/v3 v3.0.5 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/go-cmp v0.5.7 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/huandu/xstrings v1.4.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmoiron/sqlx v1.3.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/leodido/go-urn v1.2.0 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/russross/blackfriday v1.6.0 // indirect
github.com/sirupsen/logrus v1.7.0 // indirect
github.com/stretchr/testify v1.8.1 // indirect
github.com/ugorji/go/codec v1.1.7 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
go.starlark.net v0.0.0-20220203230714-bb14e151c28f // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
golang.org/x/tools v0.1.12 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/rubenv/sql-migrate v1.3.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
go.opentelemetry.io/otel v1.14.0 // indirect
go.opentelemetry.io/otel/trace v1.14.0 // indirect
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
golang.org/x/arch v0.3.0 // indirect
golang.org/x/crypto v0.14.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sync v0.2.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/term v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.27.1 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
google.golang.org/grpc v1.54.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/sourcemap.v1 v1.0.5 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/cli-runtime v0.23.3 // indirect
k8s.io/component-base v0.23.3 // indirect
k8s.io/klog/v2 v2.40.1 // indirect
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect
k8s.io/utils v0.0.0-20220127004650-9b3446523e65 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/kustomize/api v0.11.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.27.1 // indirect
k8s.io/apiserver v0.27.1 // indirect
k8s.io/cli-runtime v0.28.3 // indirect
k8s.io/component-base v0.28.3 // indirect
k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
oras.land/oras-go v1.2.2 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

837
go.sum

File diff suppressed because it is too large Load Diff

25
helm-chart/Chart.yaml Normal file
View File

@@ -0,0 +1,25 @@
apiVersion: v2
name: kubeshark
version: "52.2.0"
description: The API Traffic Analyzer for Kubernetes
home: https://kubeshark.co
keywords:
- kubeshark
- packet capture
- traffic capture
- traffic analyzer
- network sniffer
- observability
- devops
- microservice
- forensics
- api
kubeVersion: '>= 1.16.0-0'
maintainers:
- email: info@kubeshark.co
name: Kubeshark
url: https://kubeshark.co
sources:
- https://github.com/kubeshark/kubeshark/tree/master/helm-chart
type: application
icon: https://raw.githubusercontent.com/kubeshark/assets/master/logo/vector/logo.svg

191
helm-chart/LICENSE Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2022 Kubeshark
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

152
helm-chart/PF_RING.md Normal file
View File

@@ -0,0 +1,152 @@
# PF_RING
<!-- TOC -->
- [PF\_RING](#pf_ring)
- [Overview](#overview)
- [Loading PF\_RING module on Kubernetes nodes](#loading-pf_ring-module-on-kubernetes-nodes)
- [Pre-built kernel module exists and external egress allowed](#pre-built-kernel-module-exists-and-external-egress-allowed)
- [Pre-built kernel module doesn't exist or external egress isn't allowed](#pre-built-kernel-module-doesnt-exist-or-external-egress-isnt-allowed)
- [Appendix A: PF\_RING kernel module compilation](#appendix-a-pf_ring-kernel-module-compilation)
- [Automated complilation](#automated-complilation)
- [Manual compilation](#manual-compilation)
<!-- /TOC -->
## Overview
PF_RING™ is an advanced Linux kernel module and user-space framework designed for high-speed packet processing. It offers a uniform API for packet processing applications, enabling efficient handling of large volumes of network data.
For comprehensive information on PF_RING™, please visit the [User's Guide]((https://www.ntop.org/guides/pf_ring) and access detailed [API Documentation](http://www.ntop.org/guides/pf_ring_api/files.html).
## Loading PF_RING module on Kubernetes nodes
PF_RING kernel module loading is performed via of the `worker` component pod.
The target container `tap.kernelModule.image` must contain `pf_ring.ko` file under path `/opt/lib/modules/<kernel version>/pf_ring.ko`.
Kubeshark provides ready to use containers with kernel modules for the most popular kernel versions running in different managed clouds.
Prior to deploying `kubeshark` with PF_RING enabled, it is essential to verify if a PF_RING kernel module is already built for your kernel version.
Kubeshark provides additional CLI tool for this purpose - [pf-ring-compiler](https://github.com/kubeshark/pf-ring-compiler).
Compatibility verification can be done by running:
```bash
pfring-compiler compatibility
```
This command checks for the availability of kernel modules for the kernel versions running across all nodes in the Kubernetes cluster.
Example output for a compatible cluster:
```bash
Node Kernel Version Supported
ip-192-168-77-230.us-west-2.compute.internal 5.10.199-190.747.amzn2.x86_64 true
ip-192-168-34-216.us-west-2.compute.internal 5.10.199-190.747.amzn2.x86_64 true
Cluster is compatible
```
Another option to verify availability of kernel modules is just inspecting available kernel module versions via:
```bash
curl https://api.kubeshark.co/kernel-modules/meta/versions.jso
```
Based on Kubernetes cluster compatibility and external connection capabilities, user has two options:
1. Use Kubeshark provided container `kubeshark/pf-ring-module`
2. Build custom container with required kernel module version.
### Pre-built kernel module exists and external egress allowed
In this case no additional configuration required.
Kubeshark will load PF_RING kernel module from the default `kubeshark/pf-ring-module:all` container.
### Pre-built kernel module doesn't exist or external egress isn't allowed
In this case building custom Docker image is required.
1. Compile PF_RING kernel module for target version
Skip if you have `pf_ring.ko` for the target kernel version.
Otherwise, follow [Appendix A](#appendix-a-pf_ring-kernel-module-compilation) for details.
2. Build container
The same build process Kubeshark has can be reused (follow [pfring-compilier](https://github.com/kubeshark/pf-ring-compiler/tree/main/modules) for details).
3. Configure Helm values
```yaml
tap:
kernelModule:
image: <container from stage 2>
```
## Appendix A: PF_RING kernel module compilation
PF_RING kernel module compilation can be completed automatically or manually.
### Automated complilation
In case your Kubernetes workers run supported Linux distribution, `kubeshark` CLI can be used to build PF_RING module:
```bash
pfring-compiler compile --target <distro>
```
This command requires:
- kubectl to be installed and configured with a proper context
- egress connection to Internet available
This command:
1. Runs Kubernetes job with build container
2. Waits for job to be completed
3. Downloads `pf-ring-<kernel version>.ko` file into the current folder.
4. Cleans up created job.
Currently supported distros:
- Ubuntu
- RHEL 9
- Amazon Linux 2
### Manual compilation
The process description is based on Ubuntu 22.04 distribution.
1. Get terminal access to the node with target kernel version
This can be done either via SSH directly to node or with debug container running on the target node:
```bash
kubectl debug node/<target node> -it --attach=true --image=ubuntu:22.04
```
2. Install build tools and kernel headers
```bash
apt update
apt install -y gcc build-essential make git wget tar gzip
apt install -y linux-headers-$(uname -r)
```
3. Download PF_RING source code
```bash
wget https://github.com/ntop/PF_RING/archive/refs/tags/8.4.0.tar.gz
tar -xf 8.4.0.tar.gz
cd PF_RING-8.4.0/kernel
```
4. Compile the kernel module
```bash
make KERNEL_SRC=/usr/src/linux-headers-$(uname -r)
```
5. Copy `pf_ring.ko` to the local file system.
Use `scp` or `kubectl cp` depending on type of access(SSH or debug pod).

246
helm-chart/README.md Normal file
View File

@@ -0,0 +1,246 @@
# Helm Chart of Kubeshark
## Official
Add the Helm repo for Kubeshark:
```shell
helm repo add kubeshark https://helm.kubeshark.co
```
then install Kubeshark:
```shell
helm install kubeshark kubeshark/kubeshark
```
## Local
Clone the repo:
```shell
git clone git@github.com:kubeshark/kubeshark.git --depth 1
cd kubeshark/helm-chart
```
Render the templates
```shell
helm template .
```
Install Kubeshark:
```shell
helm install kubeshark .
```
Uninstall Kubeshark:
```shell
helm uninstall kubeshark
```
## Port-forward
Do the port forwarding:
```shell
kubectl port-forward service/kubeshark-front 8899:80
```
Visit [localhost:8899](http://localhost:8899)
## Increase the Worker's Storage Limit
For example, change from the default 500Mi to 5Gi:
```shell
--set tap.storageLimit=5Gi
```
## Add a License
When it's necessary, you can use:
```shell
--set license=YOUR_LICENSE_GOES_HERE
```
Get your license from Kubeshark's [Admin Console](https://console.kubeshark.co/).
## Installing with Ingress (EKS) enabled
```shell
helm install kubeshark kubeshark/kubeshark -f values.yaml
```
Set this `value.yaml`:
```shell
tap:
ingress:
enabled: true
className: "alb"
host: ks.example.com
tls: []
annotations:
alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:7..8:certificate/b...65c
alb.ingress.kubernetes.io/target-type: ip
alb.ingress.kubernetes.io/scheme: internet-facing
```
## Disabling IPV6
Not all have IPV6 enabled, hence this has to be disabled as follows:
```shell
helm install kubeshark kubeshark/kubeshark \
--set tap.ipv6=false
```
## Metrics
Please refer to [metrics](./metrics.md) documentation for details.
## Configuration
| Parameter | Description | Default |
|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------|
| `tap.docker.registry` | Docker registry to pull from | `docker.io/kubeshark` |
| `tap.docker.tag` | Tag of the Docker images | `latest` |
| `tap.docker.imagePullPolicy` | Kubernetes image pull policy | `Always` |
| `tap.docker.imagePullSecrets` | Kubernetes secrets to pull the images | `[]` |
| `tap.proxy.worker.srvPort` | Worker server port | `30001` |
| `tap.proxy.hub.port` | Hub service port | `8898` |
| `tap.proxy.hub.srvPort` | Hub server port | `8898` |
| `tap.proxy.front.port` | Front-facing service port | `8899` |
| `tap.proxy.host` | Proxy server's IP | `127.0.0.1` |
| `tap.namespaces` | List of namespaces for the traffic capture | `[]` |
| `tap.release.repo` | URL of the Helm chart repository | `https://helm.kubeshark.co` |
| `tap.release.name` | Helm release name | `kubeshark` |
| `tap.release.namespace` | Helm release namespace | `default` |
| `tap.persistentStorage` | Use `persistentVolumeClaim` instead of `emptyDir` | `false` |
| `tap.persistentStorageStatic` | Use static persistent volume provisioning (explicitly defined `PersistentVolume` ) | `false` |
| `tap.efsFileSytemIdAndPath` | [EFS file system ID and, optionally, subpath and/or access point](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/examples/kubernetes/access_points/README.md) `<FileSystemId>:<Path>:<AccessPointId>` | "" |
| `tap.storageLimit` | Limit of either the `emptyDir` or `persistentVolumeClaim` | `500Mi` |
| `tap.storageClass` | Storage class of the `PersistentVolumeClaim` | `standard` |
| `tap.dryRun` | Preview of all pods matching the regex, without tapping them | `false` |
| `tap.pcap` | | `""` |
| `tap.resources.worker.limits.cpu` | CPU limit for worker | `750m` |
| `tap.resources.worker.limits.memory` | Memory limit for worker | `1Gi` |
| `tap.resources.worker.requests.cpu` | CPU request for worker | `50m` |
| `tap.resources.worker.requests.memory` | Memory request for worker | `50Mi` |
| `tap.resources.hub.limits.cpu` | CPU limit for hub | `750m` |
| `tap.resources.hub.limits.memory` | Memory limit for hub | `1Gi` |
| `tap.resources.hub.requests.cpu` | CPU request for hub | `50m` |
| `tap.resources.hub.requests.memory` | Memory request for hub | `50Mi` |
| `tap.serviceMesh` | Capture traffic from service meshes like Istio, Linkerd, Consul, etc. | `true` |
| `tap.tls` | Capture the encrypted/TLS traffic from cryptography libraries like OpenSSL | `true` |
| `tap.ignoreTainted` | Whether to ignore tainted nodes | `false` |
| `tap.labels` | Kubernetes labels to apply to all Kubeshark resources | `{}` |
| `tap.annotations` | Kubernetes annotations to apply to all Kubeshark resources | `{}` |
| `tap.nodeSelectorTerms` | Node selector terms | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
| `tap.auth.enabled` | Enable authentication | `false` |
| `tap.auth.type` | Authentication type (1 option available: `saml`) | `saml` |
| `tap.auth.approvedEmails` | List of approved email addresses for authentication | `[]` |
| `tap.auth.approvedDomains` | List of approved email domains for authentication | `[]` |
| `tap.auth.saml.idpMetadataUrl` | SAML IDP metadata URL <br/>(effective, if `tap.auth.type = saml`) | `` |
| `tap.auth.saml.x509crt` | A self-signed X.509 `.cert` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
| `tap.auth.saml.x509key` | A self-signed X.509 `.key` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
| `tap.auth.saml.roleAttribute` | A SAML attribute name corresponding to user's authorization role <br/>(effective, if `tap.auth.type = saml`) | `role` |
| `tap.auth.saml.roles` | A list of SAML authorization roles and their permissions <br/>(effective, if `tap.auth.type = saml`) | `{"admin":{"canDownloadPCAP":true,"canReplayTraffic":true,"canUpdateTargetedPods":true,"canUseScripting":true,"filter":"","showAdminConsoleLink":true}}` |
| `tap.ingress.enabled` | Enable `Ingress` | `false` |
| `tap.ingress.className` | Ingress class name | `""` |
| `tap.ingress.host` | Host of the `Ingress` | `ks.svc.cluster.local` |
| `tap.ingress.tls` | `Ingress` TLS configuration | `[]` |
| `tap.ingress.annotations` | `Ingress` annotations | `{}` |
| `tap.ipv6` | Enable IPv6 support for the front-end | `true` |
| `tap.debug` | Enable debug mode | `false` |
| `tap.kernelModule.enabled` | Use PF_RING kernel module([details](PF_RING.md)) | `true` |
| `tap.kernelModule.image` | Container image containing PF_RING kernel module with supported kernel version([details](PF_RING.md)) | "kubeshark/pf-ring-module:all" |
| `tap.kernelModule.unloadOnDestroy` | Create additional container which watches for pod termination and unloads PF_RING kernel module. | `false`|
| `tap.telemetry.enabled` | Enable anonymous usage statistics collection | `true` |
| `tap.defaultFilter` | Sets the default dashboard KFL filter (e.g. `http`) | `""` |
| `tap.globalFilter` | Prepends to any KFL filter and can be used to limit what is visible in the dashboard. For example, `redact("request.headers.Authorization")` will redact the appropriate field. | `""` |
| `logs.file` | Logs dump path | `""` |
| `kube.configPath` | Path to the `kubeconfig` file (`$HOME/.kube/config`) | `""` |
| `kube.context` | Kubernetes context to use for the deployment | `""` |
| `dumpLogs` | Enable dumping of logs | `false` |
| `headless` | Enable running in headless mode | `false` |
| `license` | License key for the Pro/Enterprise edition | `""` |
| `scripting.env` | Environment variables for the scripting | `{}` |
| `scripting.source` | Source directory of the scripts | `""` |
| `scripting.watchScripts` | Enable watch mode for the scripts in source directory | `true` |
| `tap.metrics.port` | Pod port used to expose Prometheus metrics | `49100` |
| `timezone` | IANA time zone applied to time shown in the front-end | `""` (local time zone applies) |
KernelMapping pairs kernel versions with a
DriverContainer image. Kernel versions can be matched
literally or using a regular expression
## Installing with SAML enabled
### Prerequisites:
##### 1. Generate X.509 certificate & key (TL;DR: https://ubuntu.com/server/docs/security-certificates)
**Example:**
```
openssl genrsa -out mykey.key 2048
openssl req -new -key mykey.key -out mycsr.csr
openssl x509 -signkey mykey.key -in mycsr.csr -req -days 365 -out mycert.crt
```
**What you get:**
- `mycert.crt` - use it for `tap.auth.saml.x509crt`
- `mykey.key` - use it for `tap.auth.saml.x509crt`
##### 2. Prepare your SAML IDP
You should set up the required SAML IDP (Google, Auth0, your custom IDP, etc.)
During setup, an IDP provider will typically request to enter:
- Metadata URL
- ACS URL (Assertion Consumer Service URL, aka Callback URL)
- SLO URL (Single Logout URL)
Correspondingly, you will enter these (if you run the most default Kubeshark setup):
- [http://localhost:8899/saml/metadata](http://localhost:8899/saml/metadata)
- [http://localhost:8899/saml/acs](http://localhost:8899/saml/acs)
- [http://localhost:8899/saml/slo](http://localhost:8899/saml/slo)
Otherwise, if you have `tap.ingress.enabled == true`, change protocol & domain respectively - showing example domain:
- [https://kubeshark.example.com/saml/metadata](https://kubeshark.example.com/saml/metadata)
- [https://kubeshark.example.com/saml/acs](https://kubeshark.example.com/saml/acs)
- [https://kubeshark.example.com/saml/slo](https://kubeshark.example.com/saml/slo)
```shell
helm install kubeshark kubeshark/kubeshark -f values.yaml
```
Set this `value.yaml`:
```shell
tap:
auth:
enabled: true
type: saml
saml:
idpMetadataUrl: "https://tiptophelmet.us.auth0.com/samlp/metadata/MpWiDCMMB5ShU1HRnhdb1sHM6VWqdnDG"
x509crt: |
-----BEGIN CERTIFICATE-----
MIIDlTCCAn0CFFRUzMh+dZvp+FvWd4gRaiBVN8EvMA0GCSqGSIb3DQEBCwUAMIGG
MSQwIgYJKoZIhvcNAQkBFhV3ZWJtYXN0ZXJAZXhhbXBsZS5jb20wHhcNMjMxMjI4
........<redacted: please, generate your own X.509 cert>........
ZMzM7YscqZwoVhTOhrD4/5nIfOD/hTWG/MBe2Um1V1IYF8aVEllotTKTgsF6ZblA
miCOgl6lIlZy
-----END CERTIFICATE-----
x509key: |
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDlgDFKsRHj+mok
euOF0IpwToOEpQGtafB75ytv3psD/tQAzEIug+rkDriVvsfcvafj0qcaTeYvnCoz
........<redacted: please, generate your own X.509 key>.........
sUpBCu0E3nRJM/QB2ui5KhNR7uvPSL+kSsaEq19/mXqsL+mRi9aqy2wMEvUSU/kt
UaV5sbRtTzYLxpOSQyi8CEFA+A==
-----END PRIVATE KEY-----
```

55
helm-chart/metrics.md Normal file
View File

@@ -0,0 +1,55 @@
# Metrics
Kubeshark provides metrics from `worker` components.
It can be useful for monitoring and debugging purpose.
## Configuration
By default, Kubeshark uses port `49100` to expose metrics via service `kubeshark-worker-metrics`.
In case you use [kube-prometheus-stack] (https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) community Helm chart, additional scrape configuration for Kubeshark worker metrics endpoint can be configured with values:
```
prometheus:
enabled: true
prometheusSpec:
additionalScrapeConfigs: |
- job_name: 'kubeshark-worker-metrics'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_pod_name]
target_label: pod
- source_labels: [__meta_kubernetes_pod_node_name]
target_label: node
- source_labels: [__meta_kubernetes_endpoint_port_name]
action: keep
regex: ^metrics$
- source_labels: [__address__, __meta_kubernetes_endpoint_port_number]
action: replace
regex: ([^:]+)(?::\d+)?
replacement: $1:49100
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
```
## Available metrics
| Name | Type | Description |
| --- | --- | --- |
| kubeshark_received_packets_total | Counter | Total number of packets received |
| kubeshark_dropped_packets_total | Counter | Total number of packets dropped |
| kubeshark_processed_bytes_total | Counter | Total number of bytes processed |
| kubeshark_tcp_packets_total | Counter | Total number of TCP packets |
| kubeshark_dns_packets_total | Counter | Total number of DNS packets |
| kubeshark_icmp_packets_total | Counter | Total number of ICMP packets |
| kubeshark_reassembled_tcp_payloads_total | Counter | Total number of reassembled TCP payloads |
| kubeshark_matched_pairs_total | Counter | Total number of matched pairs |
| kubeshark_dropped_tcp_streams_total | Counter | Total number of dropped TCP streams |
| kubeshark_live_tcp_streams | Gauge | Number of live TCP streams |
## Ready-to-use Dashboard
You can import a ready-to-use dashboard from [Grafana's Dashboards Portal](https://grafana.com/grafana/dashboards/20359-kubeshark-dashboard-v1-0-003/).

View File

@@ -0,0 +1,12 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: {{ include "kubeshark.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,53 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-cluster-role-{{ .Release.Namespace }}
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""
- extensions
- apps
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
verbs:
- list
- get
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-self-config-role
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""
- v1
resourceNames:
- kubeshark-secret
- kubeshark-config-map
resources:
- secrets
- configmaps
verbs:
- get
- watch
- update
- patch

View File

@@ -0,0 +1,40 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-cluster-role-binding-{{ .Release.Namespace }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeshark-cluster-role-{{ .Release.Namespace }}
subjects:
- kind: ServiceAccount
name: {{ include "kubeshark.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-self-config-role-binding
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubeshark-self-config-role
subjects:
- kind: ServiceAccount
name: {{ include "kubeshark.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,88 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: {{ include "kubeshark.name" . }}-hub
namespace: {{ .Release.Namespace }}
spec:
replicas: 1 # Set the desired number of replicas
selector:
matchLabels:
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 6 }}
template:
metadata:
labels:
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 8 }}
spec:
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
containers:
- name: kubeshark-hub
command:
- ./hub
- -port
- "8080"
{{- if .Values.tap.debug }}
- -debug
{{- end }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBESHARK_CLOUD_API_URL
value: 'https://api.kubeshark.co'
image: '{{ .Values.tap.docker.registry }}/hub:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (printf "v%s" .Chart.Version) }}'
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
readinessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 8080
livenessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 8080
resources:
limits:
cpu: {{ .Values.tap.resources.hub.limits.cpu }}
memory: {{ .Values.tap.resources.hub.limits.memory }}
requests:
cpu: {{ .Values.tap.resources.hub.requests.cpu }}
memory: {{ .Values.tap.resources.hub.requests.memory }}
volumeMounts:
- name: saml-x509-volume
mountPath: "/etc/saml/x509"
readOnly: true
volumes:
- name: saml-x509-volume
projected:
sources:
- secret:
name: kubeshark-saml-x509-crt-secret
items:
- key: AUTH_SAML_X509_CRT
path: kubeshark.crt
- secret:
name: kubeshark-saml-x509-key-secret
items:
- key: AUTH_SAML_X509_KEY
path: kubeshark.key

View File

@@ -0,0 +1,21 @@
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-hub
namespace: {{ .Release.Namespace }}
spec:
ports:
- name: kubeshark-hub
port: 80
targetPort: 8080
selector:
app.kubeshark.co/app: hub
type: ClusterIP

View File

@@ -0,0 +1,80 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubeshark.co/app: front
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: {{ include "kubeshark.name" . }}-front
namespace: {{ .Release.Namespace }}
spec:
replicas: 1 # Set the desired number of replicas
selector:
matchLabels:
app.kubeshark.co/app: front
{{- include "kubeshark.labels" . | nindent 6 }}
template:
metadata:
labels:
app.kubeshark.co/app: front
{{- include "kubeshark.labels" . | nindent 8 }}
spec:
containers:
- env:
- name: REACT_APP_DEFAULT_FILTER
value: '{{ not (eq .Values.tap.defaultFilter "") | ternary .Values.tap.defaultFilter " " }}'
- name: REACT_APP_AUTH_ENABLED
value: '{{ .Values.tap.auth.enabled }}'
- name: REACT_APP_AUTH_TYPE
value: '{{ not (eq .Values.tap.auth.type "") | ternary .Values.tap.auth.type " " }}'
- name: REACT_APP_AUTH_SAML_IDP_METADATA_URL
value: '{{ not (eq .Values.tap.auth.saml.idpMetadataUrl "") | ternary .Values.tap.auth.saml.idpMetadataUrl " " }}'
- name: REACT_APP_TIMEZONE
value: '{{ not (eq .Values.timezone "") | ternary .Values.timezone " " }}'
- name: REACT_APP_REPLAY_DISABLED
value: '{{ .Values.tap.replayDisabled }}'
- name: REACT_APP_SCRIPTING_DISABLED
value: '{{ .Values.tap.scriptingDisabled }}'
- name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED
value: '{{ .Values.tap.targetedPodsUpdateDisabled }}'
- name: REACT_APP_RECORDING_DISABLED
value: '{{ .Values.tap.recordingDisabled }}'
image: '{{ .Values.tap.docker.registry }}/front:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (printf "v%s" .Chart.Version) }}'
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
name: kubeshark-front
livenessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 8080
readinessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 8080
timeoutSeconds: 1
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
readOnly: true
volumes:
- name: nginx-config
configMap:
name: kubeshark-nginx-config-map
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}

View File

@@ -0,0 +1,20 @@
---
apiVersion: v1
kind: Service
metadata:
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-front
namespace: {{ .Release.Namespace }}
spec:
ports:
- name: kubeshark-front
port: 80
targetPort: 8080
selector:
app.kubeshark.co/app: front
type: ClusterIP

View File

@@ -0,0 +1,43 @@
---
{{- if .Values.tap.persistentStorageStatic }}
apiVersion: v1
kind: PersistentVolume
metadata:
name: kubeshark-persistent-volume
namespace: {{ .Release.Namespace }}
spec:
capacity:
storage: {{ .Values.tap.storageLimit }}
volumeMode: Filesystem
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: {{ .Values.tap.storageClass }}
{{- if .Values.tap.efsFileSytemIdAndPath }}
csi:
driver: efs.csi.aws.com
volumeHandle: {{ .Values.tap.efsFileSytemIdAndPath }}
{{ end }}
---
{{ end }}
{{- if .Values.tap.persistentStorage }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-persistent-volume-claim
namespace: {{ .Release.Namespace }}
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: {{ .Values.tap.storageLimit }}
storageClassName: {{ .Values.tap.storageClass }}
status: {}
{{- end }}

View File

@@ -0,0 +1,225 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app.kubeshark.co/app: worker
sidecar.istio.io/inject: "false"
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-worker-daemon-set
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
app.kubeshark.co/app: worker
{{- include "kubeshark.labels" . | nindent 6 }}
template:
metadata:
labels:
app.kubeshark.co/app: worker
{{- include "kubeshark.labels" . | nindent 8 }}
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
{{- if .Values.tap.kernelModule.enabled }}
initContainers:
- name: load-pf-ring
image: {{ .Values.tap.kernelModule.image }}
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
securityContext:
capabilities:
add:
{{- range .Values.tap.capabilities.kernelModule }}
{{ print "- " . }}
{{- end }}
drop:
- ALL
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
{{- end }}
containers:
- command:
- ./worker
- -i
- any
- -port
- '{{ .Values.tap.proxy.worker.srvPort }}'
- -metrics-port
- '{{ .Values.tap.metrics.port }}'
- -unixsocket
{{- if .Values.tap.serviceMesh }}
- -servicemesh
{{- end }}
- -procfs
- /hostproc
{{- if .Values.tap.kernelModule.enabled }}
- -kernel-module
{{- end }}
{{- if .Values.tap.debug }}
- -debug
- -dumptracer
- "100000000"
{{- end }}
image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (printf "v%s" .Chart.Version) }}'
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
name: sniffer
ports:
- containerPort: {{ .Values.tap.metrics.port }}
protocol: TCP
name: metrics
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: TCP_STREAM_CHANNEL_TIMEOUT_MS
value: '{{ .Values.tap.tcpStreamChannelTimeoutMs }}'
- name: KUBESHARK_CLOUD_API_URL
value: 'https://api.kubeshark.co'
resources:
limits:
cpu: {{ .Values.tap.resources.sniffer.limits.cpu }}
memory: {{ .Values.tap.resources.sniffer.limits.memory }}
requests:
cpu: {{ .Values.tap.resources.sniffer.requests.cpu }}
memory: {{ .Values.tap.resources.sniffer.requests.memory }}
securityContext:
capabilities:
add:
{{- range .Values.tap.capabilities.networkCapture }}
{{ print "- " . }}
{{- end }}
{{- if .Values.tap.serviceMesh }}
{{- range .Values.tap.capabilities.serviceMeshCapture }}
{{ print "- " . }}
{{- end }}
{{- end }}
drop:
- ALL
readinessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 5
tcpSocket:
port: {{ .Values.tap.proxy.worker.srvPort }}
livenessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 5
tcpSocket:
port: {{ .Values.tap.proxy.worker.srvPort }}
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
- mountPath: /app/data
name: data
{{- if and (eq .Values.tap.kernelModule.enabled true) (eq .Values.tap.kernelModule.unloadOnDestroy true) }}
- name: unload-pf-ring
image: {{ .Values.tap.kernelModule.image }}
command: ["/bin/sh"]
args: ["-c", "trap 'rmmod pf_ring && sleep 3' SIGTERM; while true; do sleep 1; done"]
securityContext:
capabilities:
add:
{{- range .Values.tap.capabilities.kernelModule }}
{{ print "- " . }}
{{- end }}
drop:
- ALL
{{- end }}
{{- if .Values.tap.tls }}
- command:
- ./tracer
- -procfs
- /hostproc
{{- if .Values.tap.debug }}
- -debug
{{- end }}
image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (printf "v%s" .Chart.Version) }}'
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
name: tracer
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
limits:
cpu: {{ .Values.tap.resources.tracer.limits.cpu }}
memory: {{ .Values.tap.resources.tracer.limits.memory }}
requests:
cpu: {{ .Values.tap.resources.tracer.requests.cpu }}
memory: {{ .Values.tap.resources.tracer.requests.memory }}
securityContext:
capabilities:
add:
{{- range .Values.tap.capabilities.ebpfCapture }}
{{ print "- " . }}
{{- end }}
drop:
- ALL
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
- mountPath: /app/data
name: data
{{- end }}
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
{{- if not .Values.tap.ignoreTainted }}
- effect: NoSchedule
operator: Exists
{{- end }}
{{- if gt (len .Values.tap.nodeSelectorTerms) 0}}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
{{- toYaml .Values.tap.nodeSelectorTerms | nindent 12 }}
{{- end }}
volumes:
- hostPath:
path: /proc
name: proc
- hostPath:
path: /sys
name: sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: data
{{- if .Values.tap.persistentStorage }}
persistentVolumeClaim:
claimName: kubeshark-persistent-volume-claim
{{- else }}
emptyDir:
sizeLimit: {{ .Values.tap.storageLimit }}
{{- end }}

View File

@@ -0,0 +1,39 @@
---
{{- if .Values.tap.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.org/websocket-services: "kubeshark-front"
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
{{- if .Values.tap.ingress.annotations }}
{{- toYaml .Values.tap.ingress.annotations | nindent 4 }}
{{- end }}
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
name: kubeshark-ingress
namespace: {{ .Release.Namespace }}
spec:
{{- if .Values.tap.ingress.className }}
ingressClassName: {{ .Values.tap.ingress.className }}
{{- end }}
rules:
- host: {{ .Values.tap.ingress.host }}
http:
paths:
- backend:
service:
name: kubeshark-front
port:
number: 80
path: /
pathType: Prefix
{{- if .Values.tap.ingress.tls }}
tls:
{{- toYaml .Values.tap.ingress.tls | nindent 2 }}
{{- end }}
status:
loadBalancer: {}
{{- end }}

View File

@@ -0,0 +1,61 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeshark-nginx-config-map
namespace: {{ .Release.Namespace }}
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
data:
default.conf: |
server {
listen 8080;
{{- if .Values.tap.ipv6 }}
listen [::]:8080;
{{- end }}
access_log /dev/stdout;
error_log /dev/stdout;
client_body_buffer_size 64k;
client_header_buffer_size 32k;
large_client_header_buffers 8 64k;
location /api {
rewrite ^/api(.*)$ $1 break;
proxy_pass http://kubeshark-hub;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $http_host;
proxy_set_header Upgrade websocket;
proxy_set_header Connection Upgrade;
proxy_set_header Authorization $http_authorization;
proxy_pass_header Authorization;
proxy_connect_timeout 4s;
proxy_read_timeout 120s;
proxy_send_timeout 12s;
proxy_pass_request_headers on;
}
location /saml {
rewrite ^/saml(.*)$ /saml$1 break;
proxy_pass http://kubeshark-hub;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $http_host;
proxy_connect_timeout 4s;
proxy_read_timeout 120s;
proxy_send_timeout 12s;
proxy_pass_request_headers on;
}
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
expires -1;
add_header Cache-Control no-cache;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}

View File

@@ -0,0 +1,31 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: kubeshark-config-map
namespace: {{ .Release.Namespace }}
labels:
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 4 }}
data:
POD_REGEX: '{{ .Values.tap.regex }}'
NAMESPACES: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}'
SCRIPTING_SCRIPTS: '{}'
INGRESS_ENABLED: '{{ .Values.tap.ingress.enabled }}'
INGRESS_HOST: '{{ .Values.tap.ingress.host }}'
PROXY_FRONT_PORT: '{{ .Values.tap.proxy.front.port }}'
AUTH_ENABLED: '{{ .Values.tap.auth.enabled | ternary "true" "" }}'
AUTH_TYPE: '{{ .Values.tap.auth.type }}'
AUTH_SAML_IDP_METADATA_URL: '{{ .Values.tap.auth.saml.idpMetadataUrl }}'
AUTH_SAML_ROLE_ATTRIBUTE: '{{ .Values.tap.auth.saml.roleAttribute }}'
AUTH_SAML_ROLES: '{{ .Values.tap.auth.saml.roles | toJson }}'
TELEMETRY_DISABLED: '{{ not .Values.tap.telemetry.enabled | ternary "true" "" }}'
REPLAY_DISABLED: '{{ .Values.tap.replayDisabled | ternary "true" "" }}'
SCRIPTING_DISABLED: '{{ .Values.tap.scriptingDisabled | ternary "true" "" }}'
TARGETED_PODS_UPDATE_DISABLED: '{{ .Values.tap.targetedPodsUpdateDisabled | ternary "true" "" }}'
RECORDING_DISABLED: '{{ .Values.tap.recordingDisabled | ternary "true" "" }}'
GLOBAL_FILTER: {{ include "kubeshark.escapeDoubleQuotes" .Values.tap.globalFilter | quote }}
TRAFFIC_SAMPLE_RATE: '{{ .Values.tap.trafficSampleRate }}'
JSON_TTL: '{{ .Values.tap.misc.jsonTTL }}'
PCAP_TTL: '{{ .Values.tap.misc.pcapTTL }}'
PCAP_ERROR_TTL: '{{ .Values.tap.misc.pcapErrorTTL }}'
TIMEZONE: '{{ not (eq .Values.timezone "") | ternary .Values.timezone " " }}'

View File

@@ -0,0 +1,41 @@
kind: Secret
apiVersion: v1
metadata:
name: kubeshark-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 4 }}
stringData:
LICENSE: '{{ .Values.license }}'
SCRIPTING_ENV: '{{ .Values.scripting.env | toJson }}'
---
kind: Secret
apiVersion: v1
metadata:
name: kubeshark-saml-x509-crt-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 4 }}
stringData:
AUTH_SAML_X509_CRT: |
{{ .Values.tap.auth.saml.x509crt | nindent 4 }}
---
kind: Secret
apiVersion: v1
metadata:
name: kubeshark-saml-x509-key-secret
namespace: {{ .Release.Namespace }}
labels:
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 4 }}
stringData:
AUTH_SAML_X509_KEY: |
{{ .Values.tap.auth.saml.x509key | nindent 4 }}
---

View File

@@ -0,0 +1,52 @@
{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" }}
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-install
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-scc
priority: 10
allowPrivilegedContainer: true
allowHostDirVolumePlugin: true
allowHostNetwork: true
allowHostPorts: true
allowHostPID: true
allowHostIPC: true
readOnlyRootFilesystem: false
requiredDropCapabilities:
- MKNOD
allowedCapabilities:
- NET_RAW
- NET_ADMIN
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
- SYS_MODULE
runAsUser:
type: RunAsAny
fsGroup:
type: MustRunAs
seLinuxContext:
type: RunAsAny
supplementalGroups:
type: RunAsAny
seccompProfiles:
- '*'
volumes:
- configMap
- downwardAPI
- emptyDir
- persistentVolumeClaim
- secret
- hostPath
- projected
- ephemeral
users:
- system:serviceaccount:{{ .Release.Namespace }}:kubeshark-service-account
{{- end }}

View File

@@ -0,0 +1,18 @@
---
kind: Service
apiVersion: v1
metadata:
name: kubeshark-worker-metrics
namespace: {{ .Release.Namespace }}
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '{{ .Values.tap.metrics.port }}'
spec:
selector:
app.kubeshark.co/app: worker
{{- include "kubeshark.labels" . | nindent 4 }}
ports:
- name: metrics
protocol: TCP
port: {{ .Values.tap.metrics.port }}
targetPort: {{ .Values.tap.metrics.port }}

View File

@@ -0,0 +1,58 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: kubeshark-hub-network-policy
namespace: {{ .Release.Namespace }}
spec:
podSelector:
matchLabels:
app.kubeshark.co/app: hub
policyTypes:
- Ingress
- Egress
ingress:
- ports:
- protocol: TCP
port: 8080
egress:
- {}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: kubeshark-front-network-policy
namespace: {{ .Release.Namespace }}
spec:
podSelector:
matchLabels:
app.kubeshark.co/app: front
policyTypes:
- Ingress
- Egress
ingress:
- ports:
- protocol: TCP
port: 8080
egress:
- {}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: kubeshark-worker-network-policy
namespace: {{ .Release.Namespace }}
spec:
podSelector:
matchLabels:
app.kubeshark.co/app: worker
policyTypes:
- Ingress
- Egress
ingress:
- ports:
- protocol: TCP
port: {{ .Values.tap.proxy.worker.srvPort }}
- protocol: TCP
port: {{ .Values.tap.metrics.port }}
egress:
- {}

View File

@@ -0,0 +1,27 @@
Thank you for installing {{ title .Chart.Name }}.
Registry: {{ .Values.tap.docker.registry }}
Tag: {{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (printf "v%s" .Chart.Version) }}
Your deployment has been successful. The release is named `{{ .Release.Name }}` and it has been deployed in the `{{ .Release.Namespace }}` namespace.
{{- if .Values.tap.telemetry.enabled }}
Notice: Telemetry is enabled. Kubeshark will collect anonymous usage statistics.
{{ end }}
{{- if .Values.tap.ingress.enabled }}
You can now access the application through the following URL:
http{{ if .Values.tap.ingress.tls }}s{{ end }}://{{ .Values.tap.ingress.host }}
{{- else }}
To access the application, follow these steps:
1. Perform port forwarding with the following commands:
kubectl port-forward -n {{ .Release.Namespace }} service/kubeshark-front 8899:80
2. Once port forwarding is done, you can access the application by visiting the following URL in your web browser:
http://0.0.0.0:8899
{{ end }}

View File

@@ -0,0 +1,58 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "kubeshark.name" -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kubeshark.fullname" -}}
{{- printf "%s-%s" .Release.Name .Chart.Name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kubeshark.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kubeshark.labels" -}}
helm.sh/chart: {{ include "kubeshark.chart" . }}
{{ include "kubeshark.selectorLabels" . }}
app.kubernetes.io/version: {{ .Chart.Version | quote }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.tap.labels }}
{{ toYaml .Values.tap.labels }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kubeshark.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kubeshark.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "kubeshark.serviceAccountName" -}}
{{- printf "%s-service-account" .Release.Name }}
{{- end }}
{{/*
Escape double quotes in a string
*/}}
{{- define "kubeshark.escapeDoubleQuotes" -}}
{{- regexReplaceAll "\"" . "\"" -}}
{{- end -}}

131
helm-chart/values.yaml Normal file
View File

@@ -0,0 +1,131 @@
tap:
docker:
registry: docker.io/kubeshark
tag: ""
imagePullPolicy: Always
imagePullSecrets: []
proxy:
worker:
srvPort: 31001
hub:
srvPort: 8898
front:
port: 8899
host: 127.0.0.1
regex: .*
namespaces: []
release:
repo: https://helm.kubeshark.co
name: kubeshark
namespace: default
persistentStorage: false
persistentStorageStatic: false
efsFileSytemIdAndPath: ""
storageLimit: 500Mi
storageClass: standard
dryRun: false
resources:
hub:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
sniffer:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
tracer:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
serviceMesh: true
tls: true
ignoreTainted: false
labels: {}
annotations: {}
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
auth:
enabled: false
type: saml
saml:
idpMetadataUrl: ""
x509crt: ""
x509key: ""
roleAttribute: role
roles:
admin:
filter: ""
canReplayTraffic: true
canDownloadPCAP: true
canUseScripting: true
canUpdateTargetedPods: true
showAdminConsoleLink: true
ingress:
enabled: false
className: ""
host: ks.svc.cluster.local
tls: []
annotations: {}
ipv6: true
debug: false
kernelModule:
enabled: true
image: kubeshark/pf-ring-module:all
unloadOnDestroy: false
telemetry:
enabled: true
defaultFilter: ""
replayDisabled: false
scriptingDisabled: false
targetedPodsUpdateDisabled: false
recordingDisabled: false
capabilities:
networkCapture:
- NET_RAW
- NET_ADMIN
serviceMeshCapture:
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
kernelModule:
- SYS_MODULE
ebpfCapture:
- SYS_ADMIN
- SYS_PTRACE
- SYS_RESOURCE
- IPC_LOCK
globalFilter: ""
metrics:
port: 49100
trafficSampleRate: 100
tcpStreamChannelTimeoutMs: 10000
misc:
jsonTTL: 5m
pcapTTL: 10s
pcapErrorTTL: 60s
logs:
file: ""
kube:
configPath: ""
context: ""
dumpLogs: false
headless: false
license: ""
scripting:
env: {}
source: ""
watchScripts: true
timezone: ""

94
install.sh Normal file
View File

@@ -0,0 +1,94 @@
#!/bin/sh
EXE_NAME=kubeshark
ALIAS_NAME=ks
PROG_NAME=Kubeshark
INSTALL_PATH=/usr/local/bin/$EXE_NAME
ALIAS_PATH=/usr/local/bin/$ALIAS_NAME
REPO=https://github.com/kubeshark/kubeshark
OS=$(echo $(uname -s) | tr '[:upper:]' '[:lower:]')
ARCH=$(echo $(uname -m) | tr '[:upper:]' '[:lower:]')
SUPPORTED_PAIRS="linux_amd64 linux_arm64 darwin_amd64 darwin_arm64"
ESC="\033["
F_DEFAULT=39
F_RED=31
F_GREEN=32
F_YELLOW=33
B_DEFAULT=49
B_RED=41
B_BLUE=44
B_LIGHT_BLUE=104
if [ "$ARCH" = "x86_64" ]; then
ARCH="amd64"
fi
if [ "$ARCH" = "aarch64" ]; then
ARCH="arm64"
fi
echo $SUPPORTED_PAIRS | grep -w -q "${OS}_${ARCH}"
if [ $? != 0 ] ; then
echo "\n${ESC}${F_RED}m🛑 Unsupported OS \"$OS\" or architecture \"$ARCH\". Failed to install $PROG_NAME.${ESC}${F_DEFAULT}m"
echo "${ESC}${B_RED}mPlease report 🐛 to $REPO/issues${ESC}${F_DEFAULT}m"
exit 1
fi
# Check for Homebrew and kubeshark installation
if command -v brew >/dev/null; then
if brew list kubeshark &>/dev/null; then
echo "📦 Found $PROG_NAME instance installed with Homebrew"
echo "${ESC}${F_GREEN}m⬇ Removing before installation with script${ESC}${F_DEFAULT}m"
brew uninstall kubeshark
fi
fi
echo "\n🦈 ${ESC}${F_DEFAULT};${B_BLUE}m Started to download $PROG_NAME ${ESC}${B_DEFAULT};${F_DEFAULT}m"
if curl -# --fail -Lo $EXE_NAME ${REPO}/releases/latest/download/${EXE_NAME}_${OS}_${ARCH} ; then
chmod +x $PWD/$EXE_NAME
echo "\n${ESC}${F_GREEN}m⬇ $PROG_NAME is downloaded into $PWD/$EXE_NAME${ESC}${F_DEFAULT}m"
else
echo "\n${ESC}${F_RED}m🛑 Couldn't download ${REPO}/releases/latest/download/${EXE_NAME}_${OS}_${ARCH}\n\
⚠️ Check your internet connection.\n\
⚠️ Make sure 'curl' command is available.\n\
⚠️ Make sure there is no directory named '${EXE_NAME}' in ${PWD}\n\
${ESC}${F_DEFAULT}m"
echo "${ESC}${B_RED}mPlease report 🐛 to $REPO/issues${ESC}${F_DEFAULT}m"
exit 1
fi
use_cmd=$EXE_NAME
printf "Do you want to install system-wide? Requires sudo 😇 (y/N)? "
old_stty_cfg=$(stty -g)
stty raw -echo ; answer=$(head -c 1) ; stty $old_stty_cfg
if echo "$answer" | grep -iq "^y" ;then
echo "$answer"
sudo mv ./$EXE_NAME $INSTALL_PATH || exit 1
echo "${ESC}${F_GREEN}m$PROG_NAME is installed into $INSTALL_PATH${ESC}${F_DEFAULT}m\n"
ls $ALIAS_PATH >> /dev/null 2>&1
if [ $? != 0 ] ; then
printf "Do you want to add 'ks' alias for Kubeshark? (y/N)? "
old_stty_cfg=$(stty -g)
stty raw -echo ; answer=$(head -c 1) ; stty $old_stty_cfg
if echo "$answer" | grep -iq "^y" ; then
echo "$answer"
sudo ln -s $INSTALL_PATH $ALIAS_PATH
use_cmd=$ALIAS_NAME
else
echo "$answer"
fi
else
use_cmd=$ALIAS_NAME
fi
else
echo "$answer"
use_cmd="./$EXE_NAME"
fi
echo "${ESC}${F_GREEN}m✅ You can use the ${ESC}${F_DEFAULT};${B_LIGHT_BLUE}m $use_cmd ${ESC}${B_DEFAULT};${F_GREEN}m command now.${ESC}${F_DEFAULT}m"
echo "\n${ESC}${F_YELLOW}mPlease give us a star 🌟 on ${ESC}${F_DEFAULT}m$REPO${ESC}${F_YELLOW}m if you ❤️ $PROG_NAME!${ESC}${F_DEFAULT}m"

View File

@@ -5,10 +5,13 @@ import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"time"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
@@ -73,7 +76,7 @@ func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postWorkerUrl, "application/json", bytes.NewBuffer(podMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postWorkerUrl, "application/json", bytes.NewBuffer(podMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
@@ -87,69 +90,6 @@ func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
}
}
type postStorageLimit struct {
Limit int64 `json:"limit"`
}
func (connector *Connector) PostStorageLimitToHub(limit int64) {
payload := &postStorageLimit{
Limit: limit,
}
postStorageLimitUrl := fmt.Sprintf("%s/pcaps/set-storage-limit", connector.url)
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the storage limit:")
} else {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postStorageLimitUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed sending the storage limit to Hub. Retrying...")
} else {
log.Debug().Int("limit", int(limit)).Msg("Reported storage limit to Hub:")
return
}
time.Sleep(DefaultSleep)
}
}
}
type postRegexRequest struct {
Regex string `json:"regex"`
Namespaces []string `json:"namespaces"`
}
func (connector *Connector) PostRegexToHub(regex string, namespaces []string) {
postRegexUrl := fmt.Sprintf("%s/pods/regex", connector.url)
payload := postRegexRequest{
Regex: regex,
Namespaces: namespaces,
}
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the pod regex:")
} else {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postRegexUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed sending the pod regex to Hub. Retrying...")
} else {
log.Debug().Str("regex", regex).Strs("namespaces", namespaces).Msg("Reported pod regex to Hub:")
return
}
time.Sleep(DefaultSleep)
}
}
}
type postLicenseRequest struct {
License string `json:"license"`
}
@@ -167,7 +107,7 @@ func (connector *Connector) PostLicense(license string) {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
@@ -181,44 +121,27 @@ func (connector *Connector) PostLicense(license string) {
}
}
func (connector *Connector) PostEnv(env map[string]interface{}) {
if len(env) == 0 {
return
}
postEnvUrl := fmt.Sprintf("%s/scripts/env", connector.url)
if envMarshalled, err := json.Marshal(env); err != nil {
log.Error().Err(err).Msg("Failed to marshal the env:")
} else {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postEnvUrl, "application/json", bytes.NewBuffer(envMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed sending the scripting environment variables to Hub. Retrying...")
} else {
log.Debug().Interface("env", env).Msg("Reported scripting environment variables to Hub:")
return
}
time.Sleep(DefaultSleep)
}
}
type postScriptRequest struct {
Title string `json:"title"`
Code string `json:"code"`
}
func (connector *Connector) PostScript(script *misc.Script) (index int64, err error) {
postScriptUrl := fmt.Sprintf("%s/scripts", connector.url)
payload := postScriptRequest{
Title: script.Title,
Code: script.Code,
}
var scriptMarshalled []byte
if scriptMarshalled, err = json.Marshal(script); err != nil {
if scriptMarshalled, err = json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the script:")
} else {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postScriptUrl, "application/json", bytes.NewBuffer(scriptMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if resp, err = utils.Post(postScriptUrl, "application/json", bytes.NewBuffer(scriptMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
@@ -267,6 +190,7 @@ func (connector *Connector) PutScript(script *misc.Script, index int64) (err err
return
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("License-Key", config.Config.License)
var resp *http.Response
resp, err = client.Do(req)
@@ -305,6 +229,7 @@ func (connector *Connector) DeleteScript(index int64) (err error) {
return
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("License-Key", config.Config.License)
var resp *http.Response
resp, err = client.Do(req)
@@ -328,22 +253,39 @@ func (connector *Connector) DeleteScript(index int64) (err error) {
return
}
func (connector *Connector) PostScriptDone() {
postScripDonetUrl := fmt.Sprintf("%s/scripts/done", connector.url)
func (connector *Connector) PostPcapsMerge(out *os.File) {
postEnvUrl := fmt.Sprintf("%s/pcaps/merge", connector.url)
ok := false
var err error
for !ok {
var resp *http.Response
if resp, err = utils.Post(postScripDonetUrl, "application/json", nil, connector.client); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
if envMarshalled, err := json.Marshal(map[string]string{"query": ""}); err != nil {
log.Error().Err(err).Msg("Failed to marshal the env:")
} else {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postEnvUrl, "application/json", bytes.NewBuffer(envMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed exported PCAP download. Retrying...")
} else {
defer resp.Body.Close()
// Check server response
if resp.StatusCode != http.StatusOK {
log.Error().Str("status", resp.Status).Err(err).Msg("Failed exported PCAP download.")
return
}
// Writer the body to file
_, err = io.Copy(out, resp.Body)
if err != nil {
log.Error().Err(err).Msg("Failed writing PCAP export:")
return
}
log.Info().Str("path", out.Name()).Msg("Downloaded exported PCAP:")
return
}
log.Warn().Err(err).Msg("Failed sending the POST scripts done to Hub. Retrying...")
} else {
log.Debug().Msg("Reported POST scripts done to Hub.")
return
time.Sleep(DefaultSleep)
}
time.Sleep(DefaultSleep)
}
}

13
kubectl.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
# Useful kubectl commands for Kubeshark development
# This command outputs all Kubernetes resources using YAML format and pipes it to VS Code
if [ $1 = "view-all-resources" ] ; then
kubectl get $(kubectl api-resources | awk '{print $1}' | tail -n +2 | tr '\n' ',' | sed s/,\$//) -o yaml | code -
fi
# This command outputs all Kubernetes resources in "kubeshark" namespace using YAML format and pipes it to VS Code
if [[ $1 = "view-kubeshark-resources" ]] ; then
kubectl get $(kubectl api-resources | awk '{print $1}' | tail -n +2 | tr '\n' ',' | sed s/,\$//) -n kubeshark -o yaml | code -
fi

71
kubernetes/config.go Normal file
View File

@@ -0,0 +1,71 @@
package kubernetes
import (
"context"
"github.com/kubeshark/kubeshark/config"
"github.com/rs/zerolog/log"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
SUFFIX_SECRET = "secret"
SUFFIX_CONFIG_MAP = "config-map"
SECRET_LICENSE = "LICENSE"
CONFIG_POD_REGEX = "POD_REGEX"
CONFIG_NAMESPACES = "NAMESPACES"
CONFIG_SCRIPTING_ENV = "SCRIPTING_ENV"
CONFIG_INGRESS_ENABLED = "INGRESS_ENABLED"
CONFIG_INGRESS_HOST = "INGRESS_HOST"
CONFIG_PROXY_FRONT_PORT = "PROXY_FRONT_PORT"
CONFIG_AUTH_ENABLED = "AUTH_ENABLED"
CONFIG_AUTH_TYPE = "AUTH_TYPE"
CONFIG_AUTH_SAML_IDP_METADATA_URL = "AUTH_SAML_IDP_METADATA_URL"
)
func SetSecret(provider *Provider, key string, value string) (updated bool, err error) {
var secret *v1.Secret
secret, err = provider.clientSet.CoreV1().Secrets(config.Config.Tap.Release.Namespace).Get(context.TODO(), SELF_RESOURCES_PREFIX+SUFFIX_SECRET, metav1.GetOptions{})
if err != nil {
return
}
if secret.StringData[key] != value {
updated = true
}
secret.Data[key] = []byte(value)
_, err = provider.clientSet.CoreV1().Secrets(config.Config.Tap.Release.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{})
if err == nil {
if updated {
log.Info().Str("secret", key).Str("value", value).Msg("Updated:")
}
} else {
log.Error().Str("secret", key).Err(err).Send()
}
return
}
func SetConfig(provider *Provider, key string, value string) (updated bool, err error) {
var configMap *v1.ConfigMap
configMap, err = provider.clientSet.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Get(context.TODO(), SELF_RESOURCES_PREFIX+SUFFIX_CONFIG_MAP, metav1.GetOptions{})
if err != nil {
return
}
if configMap.Data[key] != value {
updated = true
}
configMap.Data[key] = value
_, err = provider.clientSet.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{})
if err == nil {
if updated {
log.Info().Str("config", key).Str("value", value).Msg("Updated:")
}
} else {
log.Error().Str("config", key).Err(err).Send()
}
return
}

View File

@@ -1,24 +1,11 @@
package kubernetes
const (
SelfResourcesPrefix = "kubeshark-"
FrontPodName = SelfResourcesPrefix + "front"
SELF_RESOURCES_PREFIX = "kubeshark-"
FrontPodName = SELF_RESOURCES_PREFIX + "front"
FrontServiceName = FrontPodName
HubPodName = SelfResourcesPrefix + "hub"
HubPodName = SELF_RESOURCES_PREFIX + "hub"
HubServiceName = HubPodName
ClusterRoleBindingName = SelfResourcesPrefix + "cluster-role-binding"
ClusterRoleName = SelfResourcesPrefix + "cluster-role"
K8sAllNamespaces = ""
RoleBindingName = SelfResourcesPrefix + "role-binding"
RoleName = SelfResourcesPrefix + "role"
ServiceAccountName = SelfResourcesPrefix + "service-account"
WorkerDaemonSetName = SelfResourcesPrefix + "worker-daemon-set"
WorkerPodName = SelfResourcesPrefix + "worker"
MinKubernetesServerVersion = "1.16.0"
)
const (
LabelPrefixApp = "app.kubernetes.io/"
LabelManagedBy = LabelPrefixApp + "managed-by"
LabelCreatedBy = LabelPrefixApp + "created-by"
)

192
kubernetes/cp.go Normal file
View File

@@ -0,0 +1,192 @@
package kubernetes
import (
"archive/tar"
"bufio"
"context"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
"github.com/rs/zerolog/log"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/remotecommand"
)
func CopyFromPod(ctx context.Context, provider *Provider, pod v1.Pod, srcPath string, dstPath string) error {
const containerName = "sniffer"
cmdArr := []string{"tar", "cf", "-", srcPath}
req := provider.clientSet.CoreV1().RESTClient().
Post().
Namespace(pod.Namespace).
Resource("pods").
Name(pod.Name).
SubResource("exec").
VersionedParams(&v1.PodExecOptions{
Container: containerName,
Command: cmdArr,
Stdin: true,
Stdout: true,
Stderr: true,
TTY: false,
}, scheme.ParameterCodec)
exec, err := remotecommand.NewSPDYExecutor(&provider.clientConfig, "POST", req.URL())
if err != nil {
return err
}
reader, outStream := io.Pipe()
errReader, errStream := io.Pipe()
go logErrors(errReader, pod)
go func() {
defer outStream.Close()
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdin: os.Stdin,
Stdout: outStream,
Stderr: errStream,
Tty: false,
})
if err != nil {
log.Error().Err(err).Str("pod", pod.Name).Msg("SPDYExecutor:")
}
}()
prefix := getPrefix(srcPath)
prefix = path.Clean(prefix)
prefix = stripPathShortcuts(prefix)
dstPath = path.Join(dstPath, path.Base(prefix))
err = untarAll(reader, dstPath, prefix)
// fo(reader)
return err
}
// func fo(fi io.Reader) {
// fo, err := os.Create("output.tar")
// if err != nil {
// panic(err)
// }
// // make a buffer to keep chunks that are read
// buf := make([]byte, 1024)
// for {
// // read a chunk
// n, err := fi.Read(buf)
// if err != nil && err != io.EOF {
// panic(err)
// }
// if n == 0 {
// break
// }
// // write a chunk
// if _, err := fo.Write(buf[:n]); err != nil {
// panic(err)
// }
// }
// }
func logErrors(reader io.Reader, pod v1.Pod) {
r := bufio.NewReader(reader)
for {
msg, _, err := r.ReadLine()
log.Warn().Str("pod", pod.Name).Str("msg", string(msg)).Msg("SPDYExecutor:")
if err != nil {
if err != io.EOF {
log.Error().Err(err).Send()
}
return
}
}
}
func untarAll(reader io.Reader, destDir, prefix string) error {
tarReader := tar.NewReader(reader)
for {
header, err := tarReader.Next()
if err != nil {
if err != io.EOF {
return err
}
break
}
if !strings.HasPrefix(header.Name, prefix) {
return fmt.Errorf("tar contents corrupted")
}
mode := header.FileInfo().Mode()
destFileName := filepath.Join(destDir, header.Name[len(prefix):])
baseName := filepath.Dir(destFileName)
if err := os.MkdirAll(baseName, 0755); err != nil {
return err
}
if header.FileInfo().IsDir() {
if err := os.MkdirAll(destFileName, 0755); err != nil {
return err
}
continue
}
evaledPath, err := filepath.EvalSymlinks(baseName)
if err != nil {
return err
}
if mode&os.ModeSymlink != 0 {
linkname := header.Linkname
if !filepath.IsAbs(linkname) {
_ = filepath.Join(evaledPath, linkname)
}
if err := os.Symlink(linkname, destFileName); err != nil {
return err
}
} else {
outFile, err := os.Create(destFileName)
if err != nil {
return err
}
defer outFile.Close()
if _, err := io.Copy(outFile, tarReader); err != nil {
return err
}
if err := outFile.Close(); err != nil {
return err
}
}
}
return nil
}
func getPrefix(file string) string {
return strings.TrimLeft(file, "/")
}
func stripPathShortcuts(p string) string {
newPath := p
trimmed := strings.TrimPrefix(newPath, "../")
for trimmed != newPath {
newPath = trimmed
trimmed = strings.TrimPrefix(newPath, "../")
}
// trim leftover {".", ".."}
if newPath == "." || newPath == ".." {
newPath = ""
}
if len(newPath) > 0 && string(newPath[0]) == "/" {
return newPath[1:]
}
return newPath
}

186
kubernetes/helm/helm.go Normal file
View File

@@ -0,0 +1,186 @@
package helm
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/misc"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chart/loader"
"helm.sh/helm/v3/pkg/cli"
"helm.sh/helm/v3/pkg/downloader"
"helm.sh/helm/v3/pkg/getter"
"helm.sh/helm/v3/pkg/kube"
"helm.sh/helm/v3/pkg/registry"
"helm.sh/helm/v3/pkg/release"
"helm.sh/helm/v3/pkg/repo"
)
const ENV_HELM_DRIVER = "HELM_DRIVER"
var settings = cli.New()
type Helm struct {
repo string
releaseName string
releaseNamespace string
}
func NewHelm(repo string, releaseName string, releaseNamespace string) *Helm {
return &Helm{
repo: repo,
releaseName: releaseName,
releaseNamespace: releaseNamespace,
}
}
func parseOCIRef(chartRef string) (string, string, error) {
refTagRegexp := regexp.MustCompile(`^(oci://[^:]+(:[0-9]{1,5})?[^:]+):(.*)$`)
caps := refTagRegexp.FindStringSubmatch(chartRef)
if len(caps) != 4 {
return "", "", errors.Errorf("improperly formatted oci chart reference: %s", chartRef)
}
chartRef = caps[1]
tag := caps[3]
return chartRef, tag, nil
}
func (h *Helm) Install() (rel *release.Release, err error) {
kubeConfigPath := config.Config.KubeConfigPath()
actionConfig := new(action.Configuration)
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
log.Info().Msgf(format, v...)
}); err != nil {
return
}
client := action.NewInstall(actionConfig)
client.Namespace = h.releaseNamespace
client.ReleaseName = h.releaseName
chartPath := os.Getenv(fmt.Sprintf("%s_HELM_CHART_PATH", strings.ToUpper(misc.Program)))
if chartPath == "" {
var chartURL string
chartURL, err = repo.FindChartInRepoURL(h.repo, h.releaseName, "", "", "", "", getter.All(&cli.EnvSettings{}))
if err != nil {
return
}
var cp string
cp, err = client.ChartPathOptions.LocateChart(chartURL, settings)
if err != nil {
return
}
m := &downloader.Manager{
Out: os.Stdout,
ChartPath: cp,
Keyring: client.ChartPathOptions.Keyring,
SkipUpdate: false,
Getters: getter.All(settings),
RepositoryConfig: settings.RepositoryConfig,
RepositoryCache: settings.RepositoryCache,
Debug: settings.Debug,
}
dl := downloader.ChartDownloader{
Out: m.Out,
Verify: m.Verify,
Keyring: m.Keyring,
RepositoryConfig: m.RepositoryConfig,
RepositoryCache: m.RepositoryCache,
RegistryClient: m.RegistryClient,
Getters: m.Getters,
Options: []getter.Option{
getter.WithInsecureSkipVerifyTLS(false),
},
}
repoPath := filepath.Dir(m.ChartPath)
err = os.MkdirAll(repoPath, os.ModePerm)
if err != nil {
return
}
version := ""
if registry.IsOCI(chartURL) {
chartURL, version, err = parseOCIRef(chartURL)
if err != nil {
return
}
dl.Options = append(dl.Options,
getter.WithRegistryClient(m.RegistryClient),
getter.WithTagName(version))
}
log.Info().
Str("url", chartURL).
Str("repo-path", repoPath).
Msg("Downloading Helm chart:")
if _, _, err = dl.DownloadTo(chartURL, version, repoPath); err != nil {
return
}
chartPath = m.ChartPath
}
var chart *chart.Chart
chart, err = loader.Load(chartPath)
if err != nil {
return
}
log.Info().
Str("release", chart.Metadata.Name).
Str("version", chart.Metadata.Version).
Strs("source", chart.Metadata.Sources).
Str("kube-version", chart.Metadata.KubeVersion).
Msg("Installing using Helm:")
var configMarshalled []byte
configMarshalled, err = json.Marshal(config.Config)
if err != nil {
return
}
var configUnmarshalled map[string]interface{}
err = json.Unmarshal(configMarshalled, &configUnmarshalled)
if err != nil {
return
}
rel, err = client.Run(chart, configUnmarshalled)
if err != nil {
return
}
return
}
func (h *Helm) Uninstall() (resp *release.UninstallReleaseResponse, err error) {
kubeConfigPath := config.Config.KubeConfigPath()
actionConfig := new(action.Configuration)
if err = actionConfig.Init(kube.GetConfig(kubeConfigPath, "", h.releaseNamespace), h.releaseNamespace, os.Getenv(ENV_HELM_DRIVER), func(format string, v ...interface{}) {
log.Info().Msgf(format, v...)
}); err != nil {
return
}
client := action.NewUninstall(actionConfig)
resp, err = client.Run(h.releaseName)
if err != nil {
return
}
return
}

View File

@@ -3,38 +3,25 @@ package kubernetes
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/url"
"path/filepath"
"regexp"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/semver"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
auth "k8s.io/api/authorization/v1"
core "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/version"
"k8s.io/apimachinery/pkg/watch"
applyconfapp "k8s.io/client-go/applyconfigurations/apps/v1"
applyconfcore "k8s.io/client-go/applyconfigurations/core/v1"
v1 "k8s.io/client-go/applyconfigurations/core/v1"
applyconfmeta "k8s.io/client-go/applyconfigurations/meta/v1"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
watchtools "k8s.io/client-go/tools/watch"
)
type Provider struct {
@@ -45,14 +32,6 @@ type Provider struct {
createdBy string
}
const (
fieldManagerName = "kubeshark-manager"
procfsVolumeName = "proc"
procfsMountPath = "/hostproc"
sysfsVolumeName = "sys"
sysfsMountPath = "/sys"
)
func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) {
kubernetesConfig := loadKubernetesConfiguration(kubeConfigPath, contextName)
restClientConfig, err := kubernetesConfig.ClientConfig()
@@ -91,350 +70,11 @@ func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) {
}, nil
}
//NewProviderInCluster Used in another repo that calls this function
func NewProviderInCluster() (*Provider, error) {
restClientConfig, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
clientSet, err := getClientSet(restClientConfig)
if err != nil {
return nil, err
}
return &Provider{
clientSet: clientSet,
kubernetesConfig: nil, // not relevant in cluster
clientConfig: *restClientConfig,
managedBy: misc.Program,
createdBy: misc.Program,
}, nil
}
func (provider *Provider) CurrentNamespace() (string, error) {
if provider.kubernetesConfig == nil {
return "", errors.New("kubernetesConfig is nil, The CLI will not work with in-cluster kubernetes config, use a kubeconfig file when initializing the Provider")
}
ns, _, err := provider.kubernetesConfig.Namespace()
return ns, err
}
func (provider *Provider) WaitUtilNamespaceDeleted(ctx context.Context, name string) error {
fieldSelector := fmt.Sprintf("metadata.name=%s", name)
var limit int64 = 1
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector
options.Limit = limit
return provider.clientSet.CoreV1().Namespaces().List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fieldSelector
options.Limit = limit
return provider.clientSet.CoreV1().Namespaces().Watch(ctx, options)
},
}
var preconditionFunc watchtools.PreconditionFunc = func(store cache.Store) (bool, error) {
_, exists, err := store.Get(&core.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}})
if err != nil {
return false, err
}
if exists {
return false, nil
}
return true, nil
}
conditionFunc := func(e watch.Event) (bool, error) {
if e.Type == watch.Deleted {
return true, nil
}
return false, nil
}
obj := &core.Namespace{}
_, err := watchtools.UntilWithSync(ctx, lw, obj, preconditionFunc, conditionFunc)
return err
}
func (provider *Provider) CreateNamespace(ctx context.Context, name string) (*core.Namespace, error) {
namespaceSpec := &core.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: buildWithDefaultLabels(map[string]string{}, provider),
},
}
return provider.clientSet.CoreV1().Namespaces().Create(ctx, namespaceSpec, metav1.CreateOptions{})
}
type PodOptions struct {
Namespace string
PodName string
PodImage string
ServiceAccountName string
Resources configStructs.Resources
ImagePullPolicy core.PullPolicy
ImagePullSecrets []core.LocalObjectReference
Debug bool
}
func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
cpuLimit, err := resource.ParseQuantity(opts.Resources.CpuLimit)
if err != nil {
return nil, fmt.Errorf("invalid cpu limit for %s container", opts.PodName)
}
memLimit, err := resource.ParseQuantity(opts.Resources.MemoryLimit)
if err != nil {
return nil, fmt.Errorf("invalid memory limit for %s container", opts.PodName)
}
cpuRequests, err := resource.ParseQuantity(opts.Resources.CpuRequests)
if err != nil {
return nil, fmt.Errorf("invalid cpu request for %s container", opts.PodName)
}
memRequests, err := resource.ParseQuantity(opts.Resources.MemoryRequests)
if err != nil {
return nil, fmt.Errorf("invalid memory request for %s container", opts.PodName)
}
command := []string{
"./hub",
}
if opts.Debug {
command = append(command, "-debug")
}
containers := []core.Container{
{
Name: opts.PodName,
Image: opts.PodImage,
ImagePullPolicy: opts.ImagePullPolicy,
Command: command,
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
"cpu": cpuLimit,
"memory": memLimit,
},
Requests: core.ResourceList{
"cpu": cpuRequests,
"memory": memRequests,
},
},
},
}
pod := &core.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: opts.PodName,
Labels: buildWithDefaultLabels(map[string]string{
"app": opts.PodName,
}, provider),
},
Spec: core.PodSpec{
Containers: containers,
DNSPolicy: core.DNSClusterFirstWithHostNet,
TerminationGracePeriodSeconds: new(int64),
Tolerations: []core.Toleration{
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoExecute,
},
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoSchedule,
},
},
ImagePullSecrets: opts.ImagePullSecrets,
},
}
//define the service account only when it exists to prevent pod crash
if opts.ServiceAccountName != "" {
pod.Spec.ServiceAccountName = opts.ServiceAccountName
}
return pod, nil
}
func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPort string) (*core.Pod, error) {
cpuLimit, err := resource.ParseQuantity(opts.Resources.CpuLimit)
if err != nil {
return nil, fmt.Errorf("invalid cpu limit for %s container", opts.PodName)
}
memLimit, err := resource.ParseQuantity(opts.Resources.MemoryLimit)
if err != nil {
return nil, fmt.Errorf("invalid memory limit for %s container", opts.PodName)
}
cpuRequests, err := resource.ParseQuantity(opts.Resources.CpuRequests)
if err != nil {
return nil, fmt.Errorf("invalid cpu request for %s container", opts.PodName)
}
memRequests, err := resource.ParseQuantity(opts.Resources.MemoryRequests)
if err != nil {
return nil, fmt.Errorf("invalid memory request for %s container", opts.PodName)
}
volumeMounts := []core.VolumeMount{}
volumes := []core.Volume{}
containers := []core.Container{
{
Name: opts.PodName,
Image: docker.GetFrontImage(),
ImagePullPolicy: opts.ImagePullPolicy,
VolumeMounts: volumeMounts,
ReadinessProbe: &core.Probe{
FailureThreshold: 3,
ProbeHandler: core.ProbeHandler{
TCPSocket: &core.TCPSocketAction{
Port: intstr.Parse("80"),
},
},
PeriodSeconds: 1,
SuccessThreshold: 1,
TimeoutSeconds: 1,
},
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
"cpu": cpuLimit,
"memory": memLimit,
},
Requests: core.ResourceList{
"cpu": cpuRequests,
"memory": memRequests,
},
},
Env: []core.EnvVar{
{
Name: "REACT_APP_DEFAULT_FILTER",
Value: " ",
},
{
Name: "REACT_APP_HUB_HOST",
Value: " ",
},
{
Name: "REACT_APP_HUB_PORT",
Value: hubPort,
},
},
},
}
pod := &core.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: opts.PodName,
Labels: buildWithDefaultLabels(map[string]string{
"app": opts.PodName,
}, provider),
},
Spec: core.PodSpec{
Containers: containers,
Volumes: volumes,
DNSPolicy: core.DNSClusterFirstWithHostNet,
TerminationGracePeriodSeconds: new(int64),
Tolerations: []core.Toleration{
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoExecute,
},
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoSchedule,
},
},
ImagePullSecrets: opts.ImagePullSecrets,
},
}
//define the service account only when it exists to prevent pod crash
if opts.ServiceAccountName != "" {
pod.Spec.ServiceAccountName = opts.ServiceAccountName
}
return pod, nil
}
func (provider *Provider) CreatePod(ctx context.Context, namespace string, podSpec *core.Pod) (*core.Pod, error) {
return provider.clientSet.CoreV1().Pods(namespace).Create(ctx, podSpec, metav1.CreateOptions{})
}
func (provider *Provider) CreateService(ctx context.Context, namespace string, serviceName string, appLabelValue string, targetPort int, port int32) (*core.Service, error) {
service := core.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Labels: buildWithDefaultLabels(map[string]string{}, provider),
},
Spec: core.ServiceSpec{
Ports: []core.ServicePort{
{
Name: serviceName,
TargetPort: intstr.FromInt(targetPort),
Port: port,
},
},
Type: core.ServiceTypeClusterIP,
Selector: map[string]string{"app": appLabelValue},
},
}
return provider.clientSet.CoreV1().Services(namespace).Create(ctx, &service, metav1.CreateOptions{})
}
func (provider *Provider) CanI(ctx context.Context, namespace string, resource string, verb string, group string) (bool, error) {
selfSubjectAccessReview := &auth.SelfSubjectAccessReview{
Spec: auth.SelfSubjectAccessReviewSpec{
ResourceAttributes: &auth.ResourceAttributes{
Namespace: namespace,
Resource: resource,
Verb: verb,
Group: group,
},
},
}
response, err := provider.clientSet.AuthorizationV1().SelfSubjectAccessReviews().Create(ctx, selfSubjectAccessReview, metav1.CreateOptions{})
if err != nil {
return false, err
}
return response.Status.Allowed, nil
}
func (provider *Provider) DoesNamespaceExist(ctx context.Context, name string) (bool, error) {
namespaceResource, err := provider.clientSet.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(namespaceResource, err)
}
func (provider *Provider) DoesServiceAccountExist(ctx context.Context, namespace string, name string) (bool, error) {
serviceAccountResource, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(serviceAccountResource, err)
}
func (provider *Provider) DoesServiceExist(ctx context.Context, namespace string, name string) (bool, error) {
serviceResource, err := provider.clientSet.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(serviceResource, err)
}
func (provider *Provider) DoesClusterRoleExist(ctx context.Context, name string) (bool, error) {
clusterRoleResource, err := provider.clientSet.RbacV1().ClusterRoles().Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(clusterRoleResource, err)
}
func (provider *Provider) DoesClusterRoleBindingExist(ctx context.Context, name string) (bool, error) {
clusterRoleBindingResource, err := provider.clientSet.RbacV1().ClusterRoleBindings().Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(clusterRoleBindingResource, err)
}
func (provider *Provider) DoesRoleExist(ctx context.Context, namespace string, name string) (bool, error) {
roleResource, err := provider.clientSet.RbacV1().Roles(namespace).Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(roleResource, err)
}
func (provider *Provider) DoesRoleBindingExist(ctx context.Context, namespace string, name string) (bool, error) {
roleBindingResource, err := provider.clientSet.RbacV1().RoleBindings(namespace).Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(roleBindingResource, err)
}
func (provider *Provider) doesResourceExist(resource interface{}, err error) (bool, error) {
// Getting NotFound error is the expected behavior when a resource does not exist.
if k8serrors.IsNotFound(err) {
@@ -448,402 +88,6 @@ func (provider *Provider) doesResourceExist(resource interface{}, err error) (bo
return resource != nil, nil
}
func (provider *Provider) CreateSelfRBAC(ctx context.Context, namespace string, serviceAccountName string, clusterRoleName string, clusterRoleBindingName string, version string, resources []string) error {
serviceAccount := &core.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
}, provider),
},
}
clusterRole := &rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRoleName,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
}, provider),
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{"", "extensions", "apps"},
Resources: resources,
Verbs: []string{"list", "get", "watch"},
},
},
}
clusterRoleBinding := &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRoleBindingName,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
}, provider),
},
RoleRef: rbac.RoleRef{
Name: clusterRoleName,
Kind: "ClusterRole",
APIGroup: "rbac.authorization.k8s.io",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccountName,
Namespace: namespace,
},
},
}
_, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Create(ctx, serviceAccount, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().ClusterRoles().Create(ctx, clusterRole, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().ClusterRoleBindings().Create(ctx, clusterRoleBinding, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
return nil
}
func (provider *Provider) CreateSelfRBACNamespaceRestricted(ctx context.Context, namespace string, serviceAccountName string, roleName string, roleBindingName string, version string) error {
serviceAccount := &core.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
}, provider),
},
}
role := &rbac.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
}, provider),
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{"", "extensions", "apps"},
Resources: []string{"pods", "services", "endpoints"},
Verbs: []string{"list", "get", "watch"},
},
},
}
roleBinding := &rbac.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleBindingName,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): version,
}, provider),
},
RoleRef: rbac.RoleRef{
Name: roleName,
Kind: "Role",
APIGroup: "rbac.authorization.k8s.io",
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: serviceAccountName,
Namespace: namespace,
},
},
}
_, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Create(ctx, serviceAccount, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().Roles(namespace).Create(ctx, role, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
_, err = provider.clientSet.RbacV1().RoleBindings(namespace).Create(ctx, roleBinding, metav1.CreateOptions{})
if err != nil && !k8serrors.IsAlreadyExists(err) {
return err
}
return nil
}
func (provider *Provider) RemoveNamespace(ctx context.Context, name string) error {
err := provider.clientSet.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveClusterRole(ctx context.Context, name string) error {
err := provider.clientSet.RbacV1().ClusterRoles().Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveClusterRoleBinding(ctx context.Context, name string) error {
err := provider.clientSet.RbacV1().ClusterRoleBindings().Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveRoleBinding(ctx context.Context, namespace string, name string) error {
err := provider.clientSet.RbacV1().RoleBindings(namespace).Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveRole(ctx context.Context, namespace string, name string) error {
err := provider.clientSet.RbacV1().Roles(namespace).Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveServiceAccount(ctx context.Context, namespace string, name string) error {
err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Delete(ctx, name, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemovePod(ctx context.Context, namespace string, podName string) error {
err := provider.clientSet.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveConfigMap(ctx context.Context, namespace string, configMapName string) error {
err := provider.clientSet.CoreV1().ConfigMaps(namespace).Delete(ctx, configMapName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveService(ctx context.Context, namespace string, serviceName string) error {
err := provider.clientSet.CoreV1().Services(namespace).Delete(ctx, serviceName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveDaemonSet(ctx context.Context, namespace string, daemonSetName string) error {
err := provider.clientSet.AppsV1().DaemonSets(namespace).Delete(ctx, daemonSetName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) handleRemovalError(err error) error {
// Ignore NotFound - There is nothing to delete.
// Ignore Forbidden - Assume that a user could not have created the resource in the first place.
if k8serrors.IsNotFound(err) || k8serrors.IsForbidden(err) {
return nil
}
return err
}
func (provider *Provider) ApplyWorkerDaemonSet(
ctx context.Context,
namespace string,
daemonSetName string,
podImage string,
workerPodName string,
serviceAccountName string,
resources configStructs.Resources,
imagePullPolicy core.PullPolicy,
imagePullSecrets []core.LocalObjectReference,
serviceMesh bool,
tls bool,
debug bool,
) error {
log.Debug().
Str("namespace", namespace).
Str("daemonset-name", daemonSetName).
Str("image", podImage).
Str("pod", workerPodName).
Msg("Applying worker DaemonSets.")
command := []string{"./worker", "-i", "any", "-port", "8897"}
if debug {
command = append(command, "-debug")
}
if serviceMesh {
command = append(command, "-servicemesh")
}
if tls {
command = append(command, "-tls")
}
if serviceMesh || tls {
command = append(command, "-procfs", procfsMountPath)
}
workerContainer := applyconfcore.Container()
workerContainer.WithName(workerPodName)
workerContainer.WithImage(podImage)
workerContainer.WithImagePullPolicy(imagePullPolicy)
caps := applyconfcore.Capabilities().WithDrop("ALL")
caps = caps.WithAdd("NET_RAW").WithAdd("NET_ADMIN") // to listen to traffic using libpcap
if serviceMesh || tls {
caps = caps.WithAdd("SYS_ADMIN") // to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
caps = caps.WithAdd("SYS_PTRACE") // to set netns to other process + to open libssl.so of other process
if serviceMesh {
caps = caps.WithAdd("DAC_OVERRIDE") // to read /proc/PID/environ
}
if tls {
caps = caps.WithAdd("SYS_RESOURCE") // to change rlimits for eBPF
}
}
workerContainer.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
workerContainer.WithCommand(command...)
var envvars []*v1.EnvVarApplyConfiguration
// Worker build with -race flag requires the GODEBUG=netdns=go
// envvars = append(envvars, applyconfcore.EnvVar().WithName("GODEBUG").WithValue("netdns=go"))
if debug {
envvars = append(envvars, applyconfcore.EnvVar().WithName("MEMORY_PROFILING_ENABLED").WithValue("true"))
envvars = append(envvars, applyconfcore.EnvVar().WithName("MEMORY_PROFILING_INTERVAL_SECONDS").WithValue("10"))
envvars = append(envvars, applyconfcore.EnvVar().WithName("MEMORY_USAGE_INTERVAL_MILLISECONDS").WithValue("500"))
}
workerContainer.WithEnv(envvars...)
cpuLimit, err := resource.ParseQuantity(resources.CpuLimit)
if err != nil {
return fmt.Errorf("invalid cpu limit for %s container", workerPodName)
}
memLimit, err := resource.ParseQuantity(resources.MemoryLimit)
if err != nil {
return fmt.Errorf("invalid memory limit for %s container", workerPodName)
}
cpuRequests, err := resource.ParseQuantity(resources.CpuRequests)
if err != nil {
return fmt.Errorf("invalid cpu request for %s container", workerPodName)
}
memRequests, err := resource.ParseQuantity(resources.MemoryRequests)
if err != nil {
return fmt.Errorf("invalid memory request for %s container", workerPodName)
}
workerResourceLimits := core.ResourceList{
"cpu": cpuLimit,
"memory": memLimit,
}
workerResourceRequests := core.ResourceList{
"cpu": cpuRequests,
"memory": memRequests,
}
workerResources := applyconfcore.ResourceRequirements().WithRequests(workerResourceRequests).WithLimits(workerResourceLimits)
workerContainer.WithResources(workerResources)
nodeAffinity := applyconfcore.NodeAffinity()
affinity := applyconfcore.Affinity()
affinity.WithNodeAffinity(nodeAffinity)
noExecuteToleration := applyconfcore.Toleration()
noExecuteToleration.WithOperator(core.TolerationOpExists)
noExecuteToleration.WithEffect(core.TaintEffectNoExecute)
noScheduleToleration := applyconfcore.Toleration()
noScheduleToleration.WithOperator(core.TolerationOpExists)
noScheduleToleration.WithEffect(core.TaintEffectNoSchedule)
// Host procfs is needed inside the container because we need access to
// the network namespaces of processes on the machine.
//
procfsVolume := applyconfcore.Volume()
procfsVolume.WithName(procfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/proc"))
procfsVolumeMount := applyconfcore.VolumeMount().WithName(procfsVolumeName).WithMountPath(procfsMountPath).WithReadOnly(true)
workerContainer.WithVolumeMounts(procfsVolumeMount)
// We need access to /sys in order to install certain eBPF tracepoints
//
sysfsVolume := applyconfcore.Volume()
sysfsVolume.WithName(sysfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/sys"))
sysfsVolumeMount := applyconfcore.VolumeMount().WithName(sysfsVolumeName).WithMountPath(sysfsMountPath).WithReadOnly(true)
workerContainer.WithVolumeMounts(sysfsVolumeMount)
podSpec := applyconfcore.PodSpec()
podSpec.WithHostNetwork(true)
podSpec.WithDNSPolicy(core.DNSClusterFirstWithHostNet)
podSpec.WithTerminationGracePeriodSeconds(0)
if serviceAccountName != "" {
podSpec.WithServiceAccountName(serviceAccountName)
}
podSpec.WithContainers(workerContainer)
podSpec.WithAffinity(affinity)
podSpec.WithTolerations(noExecuteToleration, noScheduleToleration)
podSpec.WithVolumes(procfsVolume, sysfsVolume)
if len(imagePullSecrets) > 0 {
localObjectReference := applyconfcore.LocalObjectReference()
for _, secret := range imagePullSecrets {
localObjectReference.WithName(secret.Name)
}
podSpec.WithImagePullSecrets(localObjectReference)
}
podTemplate := applyconfcore.PodTemplateSpec()
podTemplate.WithLabels(buildWithDefaultLabels(map[string]string{
"app": workerPodName,
}, provider))
podTemplate.WithSpec(podSpec)
labelSelector := applyconfmeta.LabelSelector()
labelSelector.WithMatchLabels(map[string]string{"app": workerPodName})
applyOptions := metav1.ApplyOptions{
Force: true,
FieldManager: fieldManagerName,
}
daemonSet := applyconfapp.DaemonSet(daemonSetName, namespace)
daemonSet.
WithLabels(buildWithDefaultLabels(map[string]string{}, provider)).
WithSpec(applyconfapp.DaemonSetSpec().WithSelector(labelSelector).WithTemplate(podTemplate))
_, err = provider.clientSet.AppsV1().DaemonSets(namespace).Apply(ctx, daemonSet, applyOptions)
return err
}
func (provider *Provider) ResetWorkerDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, workerPodName string) error {
workerContainer := applyconfcore.Container()
workerContainer.WithName(workerPodName)
workerContainer.WithImage(podImage)
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
nodeSelectorRequirement.WithKey(fmt.Sprintf("%s-non-existing-label", misc.Program))
nodeSelectorRequirement.WithOperator(core.NodeSelectorOpExists)
nodeSelectorTerm := applyconfcore.NodeSelectorTerm()
nodeSelectorTerm.WithMatchExpressions(nodeSelectorRequirement)
nodeSelector := applyconfcore.NodeSelector()
nodeSelector.WithNodeSelectorTerms(nodeSelectorTerm)
nodeAffinity := applyconfcore.NodeAffinity()
nodeAffinity.WithRequiredDuringSchedulingIgnoredDuringExecution(nodeSelector)
affinity := applyconfcore.Affinity()
affinity.WithNodeAffinity(nodeAffinity)
podSpec := applyconfcore.PodSpec()
podSpec.WithContainers(workerContainer)
podSpec.WithAffinity(affinity)
podTemplate := applyconfcore.PodTemplateSpec()
podTemplate.WithLabels(buildWithDefaultLabels(map[string]string{
"app": workerPodName,
}, provider))
podTemplate.WithSpec(podSpec)
labelSelector := applyconfmeta.LabelSelector()
labelSelector.WithMatchLabels(map[string]string{"app": workerPodName})
applyOptions := metav1.ApplyOptions{
Force: true,
FieldManager: fieldManagerName,
}
daemonSet := applyconfapp.DaemonSet(daemonSetName, namespace)
daemonSet.
WithLabels(buildWithDefaultLabels(map[string]string{}, provider)).
WithSpec(applyconfapp.DaemonSetSpec().WithSelector(labelSelector).WithTemplate(podTemplate))
_, err := provider.clientSet.AppsV1().DaemonSets(namespace).Apply(ctx, daemonSet, applyOptions)
return err
}
func (provider *Provider) listPodsImpl(ctx context.Context, regex *regexp.Regexp, namespaces []string, listOptions metav1.ListOptions) ([]core.Pod, error) {
var pods []core.Pod
for _, namespace := range namespaces {
@@ -868,10 +112,6 @@ func (provider *Provider) ListAllPodsMatchingRegex(ctx context.Context, regex *r
return provider.listPodsImpl(ctx, regex, namespaces, metav1.ListOptions{})
}
func (provider *Provider) GetPod(ctx context.Context, namespaces string, podName string) (*core.Pod, error) {
return provider.clientSet.CoreV1().Pods(namespaces).Get(ctx, podName, metav1.GetOptions{})
}
func (provider *Provider) ListAllRunningPodsMatchingRegex(ctx context.Context, regex *regexp.Regexp, namespaces []string) ([]core.Pod, error) {
pods, err := provider.ListAllPodsMatchingRegex(ctx, regex, namespaces)
if err != nil {
@@ -887,8 +127,14 @@ func (provider *Provider) ListAllRunningPodsMatchingRegex(ctx context.Context, r
return matchingPods, nil
}
func (provider *Provider) ListPodsByAppLabel(ctx context.Context, namespaces string, labelName string) ([]core.Pod, error) {
pods, err := provider.clientSet.CoreV1().Pods(namespaces).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("app=%s", labelName)})
func (provider *Provider) ListPodsByAppLabel(ctx context.Context, namespaces string, labels map[string]string) ([]core.Pod, error) {
pods, err := provider.clientSet.CoreV1().Pods(namespaces).List(ctx, metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(
&metav1.LabelSelector{
MatchLabels: labels,
},
),
})
if err != nil {
return nil, err
}
@@ -896,15 +142,6 @@ func (provider *Provider) ListPodsByAppLabel(ctx context.Context, namespaces str
return pods.Items, err
}
func (provider *Provider) ListAllNamespaces(ctx context.Context) ([]core.Namespace, error) {
namespaces, err := provider.clientSet.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
return namespaces.Items, err
}
func (provider *Provider) GetPodLogs(ctx context.Context, namespace string, podName string, containerName string) (string, error) {
podLogOpts := core.PodLogOptions{Container: containerName}
req := provider.clientSet.CoreV1().Pods(namespace).GetLogs(podName, &podLogOpts)
@@ -930,41 +167,6 @@ func (provider *Provider) GetNamespaceEvents(ctx context.Context, namespace stri
return eventList.String(), nil
}
func (provider *Provider) ListManagedServiceAccounts(ctx context.Context, namespace string) (*core.ServiceAccountList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.CoreV1().ServiceAccounts(namespace).List(ctx, listOptions)
}
func (provider *Provider) ListManagedClusterRoles(ctx context.Context) (*rbac.ClusterRoleList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.RbacV1().ClusterRoles().List(ctx, listOptions)
}
func (provider *Provider) ListManagedClusterRoleBindings(ctx context.Context) (*rbac.ClusterRoleBindingList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.RbacV1().ClusterRoleBindings().List(ctx, listOptions)
}
func (provider *Provider) ListManagedRoles(ctx context.Context, namespace string) (*rbac.RoleList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.RbacV1().Roles(namespace).List(ctx, listOptions)
}
func (provider *Provider) ListManagedRoleBindings(ctx context.Context, namespace string) (*rbac.RoleBindingList, error) {
listOptions := metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", LabelManagedBy, provider.managedBy),
}
return provider.clientSet.RbacV1().RoleBindings(namespace).List(ctx, listOptions)
}
// ValidateNotProxy We added this after a customer tried to run kubeshark from lens, which used len's kube config, which have cluster server configuration, which points to len's local proxy.
// The workaround was to use the user's local default kube config.
// For now - we are blocking the option to run kubeshark through a proxy to k8s server
@@ -1004,6 +206,14 @@ func (provider *Provider) GetKubernetesVersion() (*semver.SemVersion, error) {
return &serverVersionSemVer, nil
}
func (provider *Provider) GetNamespaces() []string {
if len(config.Config.Tap.Namespaces) > 0 {
return utils.Unique(config.Config.Tap.Namespaces)
} else {
return []string{K8sAllNamespaces}
}
}
func getClientSet(config *rest.Config) (*kubernetes.Clientset, error) {
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {

View File

@@ -11,6 +11,7 @@ import (
"strings"
"time"
"github.com/kubeshark/kubeshark/config"
"github.com/rs/zerolog/log"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/client-go/tools/portforward"
@@ -23,6 +24,7 @@ const selfServicePort = 80
func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16, selfNamespace string, selfServiceName string) (*http.Server, error) {
log.Info().
Str("proxy-host", proxyHost).
Str("namespace", selfNamespace).
Str("service", selfServiceName).
Int("src-port", int(srcPort)).
@@ -66,8 +68,12 @@ func getSelfHubProxiedHostAndPath(selfNamespace string, selfServiceName string)
return fmt.Sprintf("/api/v1/namespaces/%s/services/%s:%d/proxy", selfNamespace, selfServiceName, selfServicePort)
}
func GetLocalhostOnPort(port uint16) string {
return fmt.Sprintf("http://localhost:%d", port)
func GetProxyOnPort(port uint16) string {
return fmt.Sprintf("http://%s:%d", config.Config.Tap.Proxy.Host, port)
}
func GetHubUrl() string {
return fmt.Sprintf("%s/api", GetProxyOnPort(config.Config.Tap.Proxy.Front.Port))
}
func getRerouteHttpHandlerSelfAPI(proxyHandler http.Handler, selfNamespace string, selfServiceName string) http.Handler {
@@ -100,7 +106,7 @@ func getRerouteHttpHandlerSelfStatic(proxyHandler http.Handler, selfNamespace st
}
func NewPortForward(kubernetesProvider *Provider, namespace string, podRegex *regexp.Regexp, srcPort uint16, dstPort uint16, ctx context.Context) (*portforward.PortForwarder, error) {
pods, err := kubernetesProvider.ListAllRunningPodsMatchingRegex(ctx, podRegex, []string{namespace})
pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, namespace, map[string]string{"app.kubeshark.co/app": "front"})
if err != nil {
return nil, err
} else if len(pods) == 0 {

View File

@@ -1,67 +0,0 @@
package kubernetes
import (
"github.com/kubeshark/base/pkg/models"
"github.com/kubeshark/kubeshark/config"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func GetNodeHostToTargetedPodsMap(targetedPods []core.Pod) models.NodeToPodsMap {
nodeToTargetedPodsMap := make(models.NodeToPodsMap)
for _, pod := range targetedPods {
minimizedPod := getMinimizedPod(pod)
existingList := nodeToTargetedPodsMap[pod.Spec.NodeName]
if existingList == nil {
nodeToTargetedPodsMap[pod.Spec.NodeName] = []core.Pod{minimizedPod}
} else {
nodeToTargetedPodsMap[pod.Spec.NodeName] = append(nodeToTargetedPodsMap[pod.Spec.NodeName], minimizedPod)
}
}
return nodeToTargetedPodsMap
}
func getMinimizedPod(fullPod core.Pod) core.Pod {
return core.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fullPod.Name,
Namespace: fullPod.Namespace,
},
Status: core.PodStatus{
PodIP: fullPod.Status.PodIP,
ContainerStatuses: getMinimizedContainerStatuses(fullPod),
},
}
}
func getMinimizedContainerStatuses(fullPod core.Pod) []core.ContainerStatus {
result := make([]core.ContainerStatus, len(fullPod.Status.ContainerStatuses))
for i, container := range fullPod.Status.ContainerStatuses {
result[i] = core.ContainerStatus{
ContainerID: container.ContainerID,
}
}
return result
}
func GetPodInfosForPods(pods []core.Pod) []*models.PodInfo {
podInfos := make([]*models.PodInfo, 0)
for _, pod := range pods {
podInfos = append(podInfos, &models.PodInfo{Name: pod.Name, Namespace: pod.Namespace, NodeName: pod.Spec.NodeName})
}
return podInfos
}
func buildWithDefaultLabels(labels map[string]string, provider *Provider) map[string]string {
labels["LabelManagedBy"] = provider.managedBy
labels["LabelCreatedBy"] = provider.createdBy
for k, v := range config.Config.ResourceLabels {
labels[k] = v
}
return labels
}

View File

@@ -1,55 +0,0 @@
package kubernetes
import (
"context"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/rs/zerolog/log"
core "k8s.io/api/core/v1"
)
func CreateWorkers(
kubernetesProvider *Provider,
selfServiceAccountExists bool,
ctx context.Context,
namespace string,
resources configStructs.Resources,
imagePullPolicy core.PullPolicy,
imagePullSecrets []core.LocalObjectReference,
serviceMesh bool,
tls bool,
debug bool,
) error {
image := docker.GetWorkerImage()
var serviceAccountName string
if selfServiceAccountExists {
serviceAccountName = ServiceAccountName
} else {
serviceAccountName = ""
}
log.Info().Msg("Creating the worker DaemonSet...")
if err := kubernetesProvider.ApplyWorkerDaemonSet(
ctx,
namespace,
WorkerDaemonSetName,
image,
WorkerPodName,
serviceAccountName,
resources,
imagePullPolicy,
imagePullSecrets,
serviceMesh,
tls,
debug,
); err != nil {
return err
}
log.Info().Msg("Successfully created the worker DaemonSet.")
return nil
}

34
manifests/README.md Normal file
View File

@@ -0,0 +1,34 @@
# Manifests
## Apply
Clone the repo:
```shell
git clone git@github.com:kubeshark/kubeshark.git --depth 1
cd kubeshark/manifests
```
To apply the manifests, run:
```shell
kubectl apply -f .
```
To clean up:
```shell
kubectl delete namespace kubeshark
kubectl delete clusterrolebinding kubeshark-cluster-role-binding
kubectl delete clusterrole kubeshark-cluster-role
```
## Accesing
Do the port forwarding:
```shell
kubectl port-forward service/kubeshark-front 8899:80
```
Visit [localhost:8899](http://localhost:8899)

776
manifests/complete.yaml Normal file
View File

@@ -0,0 +1,776 @@
---
# Source: kubeshark/templates/16-network-policies.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: kubeshark-hub-network-policy
namespace: default
spec:
podSelector:
matchLabels:
app.kubeshark.co/app: hub
policyTypes:
- Ingress
- Egress
ingress:
- ports:
- protocol: TCP
port: 8080
egress:
- {}
---
# Source: kubeshark/templates/16-network-policies.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: kubeshark-front-network-policy
namespace: default
spec:
podSelector:
matchLabels:
app.kubeshark.co/app: front
policyTypes:
- Ingress
- Egress
ingress:
- ports:
- protocol: TCP
port: 8080
egress:
- {}
---
# Source: kubeshark/templates/16-network-policies.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: kubeshark-worker-network-policy
namespace: default
spec:
podSelector:
matchLabels:
app.kubeshark.co/app: worker
policyTypes:
- Ingress
- Egress
ingress:
- ports:
- protocol: TCP
port: 31001
- protocol: TCP
port: 49100
egress:
- {}
---
# Source: kubeshark/templates/01-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-service-account
namespace: default
---
# Source: kubeshark/templates/13-secret.yaml
kind: Secret
apiVersion: v1
metadata:
name: kubeshark-secret
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
stringData:
LICENSE: ''
SCRIPTING_ENV: '{}'
---
# Source: kubeshark/templates/13-secret.yaml
kind: Secret
apiVersion: v1
metadata:
name: kubeshark-saml-x509-crt-secret
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
stringData:
AUTH_SAML_X509_CRT: |
---
# Source: kubeshark/templates/13-secret.yaml
kind: Secret
apiVersion: v1
metadata:
name: kubeshark-saml-x509-key-secret
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
stringData:
AUTH_SAML_X509_KEY: |
---
# Source: kubeshark/templates/11-nginx-config-map.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeshark-nginx-config-map
namespace: default
labels:
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
data:
default.conf: |
server {
listen 8080;
listen [::]:8080;
access_log /dev/stdout;
error_log /dev/stdout;
client_body_buffer_size 64k;
client_header_buffer_size 32k;
large_client_header_buffers 8 64k;
location /api {
rewrite ^/api(.*)$ $1 break;
proxy_pass http://kubeshark-hub;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $http_host;
proxy_set_header Upgrade websocket;
proxy_set_header Connection Upgrade;
proxy_set_header Authorization $http_authorization;
proxy_pass_header Authorization;
proxy_connect_timeout 4s;
proxy_read_timeout 120s;
proxy_send_timeout 12s;
proxy_pass_request_headers on;
}
location /saml {
rewrite ^/saml(.*)$ /saml$1 break;
proxy_pass http://kubeshark-hub;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $http_host;
proxy_connect_timeout 4s;
proxy_read_timeout 120s;
proxy_send_timeout 12s;
proxy_pass_request_headers on;
}
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
expires -1;
add_header Cache-Control no-cache;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
---
# Source: kubeshark/templates/12-config-map.yaml
kind: ConfigMap
apiVersion: v1
metadata:
name: kubeshark-config-map
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
data:
POD_REGEX: '.*'
NAMESPACES: ''
SCRIPTING_SCRIPTS: '{}'
INGRESS_ENABLED: 'false'
INGRESS_HOST: 'ks.svc.cluster.local'
PROXY_FRONT_PORT: '8899'
AUTH_ENABLED: ''
AUTH_TYPE: 'saml'
AUTH_SAML_IDP_METADATA_URL: ''
AUTH_SAML_ROLE_ATTRIBUTE: 'role'
AUTH_SAML_ROLES: '{"admin":{"canDownloadPCAP":true,"canReplayTraffic":true,"canUpdateTargetedPods":true,"canUseScripting":true,"filter":"","showAdminConsoleLink":true}}'
TELEMETRY_DISABLED: ''
REPLAY_DISABLED: ''
SCRIPTING_DISABLED: ''
TARGETED_PODS_UPDATE_DISABLED: ''
RECORDING_DISABLED: ''
GLOBAL_FILTER: ""
TRAFFIC_SAMPLE_RATE: '100'
JSON_TTL: '5m'
PCAP_TTL: '10s'
PCAP_ERROR_TTL: '60s'
TIMEZONE: ' '
---
# Source: kubeshark/templates/02-cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role-default
namespace: default
rules:
- apiGroups:
- ""
- extensions
- apps
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
verbs:
- list
- get
- watch
---
# Source: kubeshark/templates/03-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role-binding-default
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubeshark-cluster-role-default
subjects:
- kind: ServiceAccount
name: kubeshark-service-account
namespace: default
---
# Source: kubeshark/templates/02-cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-config-role
namespace: default
rules:
- apiGroups:
- ""
- v1
resourceNames:
- kubeshark-secret
- kubeshark-config-map
resources:
- secrets
- configmaps
verbs:
- get
- watch
- update
- patch
---
# Source: kubeshark/templates/03-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-config-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubeshark-self-config-role
subjects:
- kind: ServiceAccount
name: kubeshark-service-account
namespace: default
---
# Source: kubeshark/templates/05-hub-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub
namespace: default
spec:
ports:
- name: kubeshark-hub
port: 80
targetPort: 8080
selector:
app.kubeshark.co/app: hub
type: ClusterIP
---
# Source: kubeshark/templates/07-front-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front
namespace: default
spec:
ports:
- name: kubeshark-front
port: 80
targetPort: 8080
selector:
app.kubeshark.co/app: front
type: ClusterIP
---
# Source: kubeshark/templates/15-worker-service-metrics.yaml
kind: Service
apiVersion: v1
metadata:
name: kubeshark-worker-metrics
namespace: default
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '49100'
spec:
selector:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
ports:
- name: metrics
protocol: TCP
port: 49100
targetPort: 49100
---
# Source: kubeshark/templates/09-worker-daemon-set.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app.kubeshark.co/app: worker
sidecar.istio.io/inject: "false"
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-worker-daemon-set
namespace: default
spec:
selector:
matchLabels:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
template:
metadata:
labels:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
initContainers:
- name: load-pf-ring
image: kubeshark/pf-ring-module:all
imagePullPolicy: Always
securityContext:
capabilities:
add:
- SYS_MODULE
drop:
- ALL
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
containers:
- command:
- ./worker
- -i
- any
- -port
- '31001'
- -metrics-port
- '49100'
- -unixsocket
- -servicemesh
- -procfs
- /hostproc
- -kernel-module
image: 'docker.io/kubeshark/worker:v52.2.0'
imagePullPolicy: Always
name: sniffer
ports:
- containerPort: 49100
protocol: TCP
name: metrics
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: TCP_STREAM_CHANNEL_TIMEOUT_MS
value: '10000'
- name: KUBESHARK_CLOUD_API_URL
value: 'https://api.kubeshark.co'
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
securityContext:
capabilities:
add:
- NET_RAW
- NET_ADMIN
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
drop:
- ALL
readinessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 5
tcpSocket:
port: 31001
livenessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 5
tcpSocket:
port: 31001
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
- mountPath: /app/data
name: data
- command:
- ./tracer
- -procfs
- /hostproc
image: 'docker.io/kubeshark/worker:v52.2.0'
imagePullPolicy: Always
name: tracer
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_PTRACE
- SYS_RESOURCE
- IPC_LOCK
drop:
- ALL
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
- mountPath: /app/data
name: data
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
volumes:
- hostPath:
path: /proc
name: proc
- hostPath:
path: /sys
name: sys
- name: lib-modules
hostPath:
path: /lib/modules
- name: data
emptyDir:
sizeLimit: 500Mi
---
# Source: kubeshark/templates/04-hub-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub
namespace: default
spec:
replicas: 1 # Set the desired number of replicas
selector:
matchLabels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
template:
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
spec:
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
containers:
- name: kubeshark-hub
command:
- ./hub
- -port
- "8080"
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBESHARK_CLOUD_API_URL
value: 'https://api.kubeshark.co'
image: 'docker.io/kubeshark/hub:v52.2.0'
imagePullPolicy: Always
readinessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 8080
livenessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 8080
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: saml-x509-volume
mountPath: "/etc/saml/x509"
readOnly: true
volumes:
- name: saml-x509-volume
projected:
sources:
- secret:
name: kubeshark-saml-x509-crt-secret
items:
- key: AUTH_SAML_X509_CRT
path: kubeshark.crt
- secret:
name: kubeshark-saml-x509-key-secret
items:
- key: AUTH_SAML_X509_KEY
path: kubeshark.key
---
# Source: kubeshark/templates/06-front-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front
namespace: default
spec:
replicas: 1 # Set the desired number of replicas
selector:
matchLabels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
template:
metadata:
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-52.2.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.2.0"
app.kubernetes.io/managed-by: Helm
spec:
containers:
- env:
- name: REACT_APP_DEFAULT_FILTER
value: ' '
- name: REACT_APP_AUTH_ENABLED
value: 'false'
- name: REACT_APP_AUTH_TYPE
value: 'saml'
- name: REACT_APP_AUTH_SAML_IDP_METADATA_URL
value: ' '
- name: REACT_APP_TIMEZONE
value: ' '
- name: REACT_APP_REPLAY_DISABLED
value: 'false'
- name: REACT_APP_SCRIPTING_DISABLED
value: 'false'
- name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED
value: 'false'
- name: REACT_APP_RECORDING_DISABLED
value: 'false'
image: 'docker.io/kubeshark/front:v52.2.0'
imagePullPolicy: Always
name: kubeshark-front
livenessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 8080
readinessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 3
tcpSocket:
port: 8080
timeoutSeconds: 1
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
readOnly: true
volumes:
- name: nginx-config
configMap:
name: kubeshark-nginx-config-map
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account

View File

@@ -0,0 +1,25 @@
grafana:
additionalDataSources: []
prometheus:
prometheusSpec:
scrapeInterval: 10s
evaluationInterval: 30s
additionalScrapeConfigs: |
- job_name: 'kubeshark-worker-metrics'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_pod_name]
target_label: pod
- source_labels: [__meta_kubernetes_pod_node_name]
target_label: node
- source_labels: [__meta_kubernetes_endpoint_port_name]
action: keep
regex: ^metrics$
- source_labels: [__address__, __meta_kubernetes_endpoint_port_number]
action: replace
regex: ([^:]+)(?::\d+)?
replacement: $1:49100
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)

View File

@@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: kubeshark-tls
namespace: default
spec:
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
secretName: cert-kubeshark
dnsNames:
- ks.svc.cluster.local

View File

@@ -0,0 +1,14 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: info@kubeshark.co
privateKeySecretRef:
name: letsencrypt-prod-key
solvers:
- http01:
ingress:
class: kubeshark-ingress-class

15
manifests/tls/run.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/bash
__dir="$(cd -P -- "$(dirname -- "$0")" && pwd -P)"
helm repo add jetstack https://charts.jetstack.io
helm repo update
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.crds.yaml
helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.9.1
kubectl apply -f ${__dir}/cluster-issuer.yaml
kubectl apply -f ${__dir}/certificate.yaml

View File

@@ -9,9 +9,11 @@ import (
var (
Software = "Kubeshark"
Program = "kubeshark"
Description = "The API Traffic Analyzer for Kubernetes"
Website = "https://kubeshark.co"
Ver = "0.0"
Branch = "develop"
Email = "info@kubeshark.co"
Ver = "0.0.0"
Branch = "master"
GitCommitHash = "" // this var is overridden using ldflags in makefile when building
BuildTimestamp = "" // this var is overridden using ldflags in makefile when building
RBACVersion = "v1"

22
misc/fsUtils/globUtils.go Normal file
View File

@@ -0,0 +1,22 @@
package fsUtils
import (
"fmt"
"os"
"path/filepath"
)
func RemoveFilesByExtension(dirPath string, ext string) error {
files, err := filepath.Glob(filepath.Join(dirPath, fmt.Sprintf("/*.%s", ext)))
if err != nil {
return err
}
for _, f := range files {
if err := os.Remove(f); err != nil {
return err
}
}
return nil
}

View File

@@ -14,14 +14,14 @@ import (
)
func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath string) error {
podExactRegex := regexp.MustCompile("^" + kubernetes.SelfResourcesPrefix)
pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.Tap.SelfNamespace})
podExactRegex := regexp.MustCompile("^" + kubernetes.SELF_RESOURCES_PREFIX)
pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.Tap.Release.Namespace})
if err != nil {
return err
}
if len(pods) == 0 {
return fmt.Errorf("No %s pods found in namespace %s", misc.Software, config.Config.Tap.SelfNamespace)
return fmt.Errorf("No %s pods found in namespace %s", misc.Software, config.Config.Tap.Release.Namespace)
}
newZipFile, err := os.Create(filePath)
@@ -60,17 +60,17 @@ func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath strin
}
}
events, err := provider.GetNamespaceEvents(ctx, config.Config.Tap.SelfNamespace)
events, err := provider.GetNamespaceEvents(ctx, config.Config.Tap.Release.Namespace)
if err != nil {
log.Error().Err(err).Msg("Failed to get k8b events!")
} else {
log.Debug().Str("namespace", config.Config.Tap.SelfNamespace).Msg("Successfully read events.")
log.Debug().Str("namespace", config.Config.Tap.Release.Namespace).Msg("Successfully read events.")
}
if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.Tap.SelfNamespace)); err != nil {
if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.Tap.Release.Namespace)); err != nil {
log.Error().Err(err).Msg("Failed write logs!")
} else {
log.Debug().Str("namespace", config.Config.Tap.SelfNamespace).Msg("Successfully added events.")
log.Debug().Str("namespace", config.Config.Tap.Release.Namespace).Msg("Successfully added events.")
}
if err := AddFileToZip(zipWriter, config.ConfigFilePath); err != nil {

View File

@@ -16,7 +16,7 @@ import (
)
func CheckNewerVersion() {
if os.Getenv("KUBESHARK_DISABLE_VERSION_CHECK") != "" {
if os.Getenv(fmt.Sprintf("%s_DISABLE_VERSION_CHECK", strings.ToUpper(misc.Program))) != "" {
return
}
@@ -44,7 +44,7 @@ func CheckNewerVersion() {
} else {
downloadCommand = fmt.Sprintf("sh <(curl -Ls %s/install)", misc.Website)
}
msg := fmt.Sprintf("There is a new release! %v -> %v run:", misc.Ver, latestVersion)
msg := fmt.Sprintf("There is a new release! %v -> %v Please upgrade to the latest release, as new releases are not always backward compatible. Run:", misc.Ver, latestVersion)
log.Warn().Str("command", downloadCommand).Msg(fmt.Sprintf(utils.Yellow, msg))
}
}

View File

@@ -1,168 +0,0 @@
package resources
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/errormessage"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"k8s.io/apimachinery/pkg/util/wait"
)
func CleanUpSelfResources(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfResourcesNamespace string) {
log.Warn().Msg(fmt.Sprintf("Removing %s resources...", misc.Software))
var leftoverResources []string
if isNsRestrictedMode {
leftoverResources = cleanUpRestrictedMode(ctx, kubernetesProvider, selfResourcesNamespace)
} else {
leftoverResources = cleanUpNonRestrictedMode(ctx, cancel, kubernetesProvider, selfResourcesNamespace)
}
if len(leftoverResources) > 0 {
errMsg := "Failed to remove the following resources."
for _, resource := range leftoverResources {
errMsg += "\n- " + resource
}
log.Error().Msg(fmt.Sprintf(utils.Red, errMsg))
}
}
func cleanUpNonRestrictedMode(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string {
leftoverResources := make([]string, 0)
if err := kubernetesProvider.RemoveNamespace(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("Namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
defer waitUntilNamespaceDeleted(ctx, cancel, kubernetesProvider, selfResourcesNamespace)
}
if resources, err := kubernetesProvider.ListManagedClusterRoles(ctx); err != nil {
resourceDesc := "ClusterRoles"
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveClusterRole(ctx, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("ClusterRole %s", resource.Name)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if resources, err := kubernetesProvider.ListManagedClusterRoleBindings(ctx); err != nil {
resourceDesc := "ClusterRoleBindings"
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveClusterRoleBinding(ctx, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("ClusterRoleBinding %s", resource.Name)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
return leftoverResources
}
func waitUntilNamespaceDeleted(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) {
// Call cancel if a terminating signal was received. Allows user to skip the wait.
go func() {
utils.WaitForTermination(ctx, cancel)
}()
if err := kubernetesProvider.WaitUtilNamespaceDeleted(ctx, selfResourcesNamespace); err != nil {
switch {
case ctx.Err() == context.Canceled:
log.Printf("Do nothing. User interrupted the wait")
log.Warn().
Str("namespace", selfResourcesNamespace).
Msg("Did nothing. User interrupted the wait.")
case err == wait.ErrWaitTimeout:
log.Warn().
Str("namespace", selfResourcesNamespace).
Msg("Timed out while deleting the namespace.")
default:
log.Warn().
Err(errormessage.FormatError(err)).
Str("namespace", selfResourcesNamespace).
Msg("Unknown error while deleting the namespace.")
}
}
}
func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string {
leftoverResources := make([]string, 0)
if err := kubernetesProvider.RemoveService(ctx, selfResourcesNamespace, kubernetes.FrontServiceName); err != nil {
resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.FrontServiceName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemoveService(ctx, selfResourcesNamespace, kubernetes.HubServiceName); err != nil {
resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.HubServiceName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemoveDaemonSet(ctx, selfResourcesNamespace, kubernetes.WorkerDaemonSetName); err != nil {
resourceDesc := fmt.Sprintf("DaemonSet %s in namespace %s", kubernetes.WorkerDaemonSetName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if resources, err := kubernetesProvider.ListManagedServiceAccounts(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("ServiceAccounts in namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveServiceAccount(ctx, selfResourcesNamespace, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("ServiceAccount %s in namespace %s", resource.Name, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if resources, err := kubernetesProvider.ListManagedRoles(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("Roles in namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveRole(ctx, selfResourcesNamespace, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("Role %s in namespace %s", resource.Name, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if resources, err := kubernetesProvider.ListManagedRoleBindings(ctx, selfResourcesNamespace); err != nil {
resourceDesc := fmt.Sprintf("RoleBindings in namespace %s", selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
} else {
for _, resource := range resources.Items {
if err := kubernetesProvider.RemoveRoleBinding(ctx, selfResourcesNamespace, resource.Name); err != nil {
resourceDesc := fmt.Sprintf("RoleBinding %s in namespace %s", resource.Name, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
}
}
if err := kubernetesProvider.RemovePod(ctx, selfResourcesNamespace, kubernetes.HubPodName); err != nil {
resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.HubPodName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemovePod(ctx, selfResourcesNamespace, kubernetes.FrontPodName); err != nil {
resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.FrontPodName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
return leftoverResources
}
func handleDeletionError(err error, resourceDesc string, leftoverResources *[]string) {
log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Error while removing %s", resourceDesc))
*leftoverResources = append(*leftoverResources, resourceDesc)
}

View File

@@ -1,125 +0,0 @@
package resources
import (
"context"
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/errormessage"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/rs/zerolog/log"
core "k8s.io/api/core/v1"
)
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, hubResources configStructs.Resources, imagePullPolicy core.PullPolicy, imagePullSecrets []core.LocalObjectReference, debug bool) (bool, error) {
if !isNsRestrictedMode {
if err := createSelfNamespace(ctx, kubernetesProvider, selfNamespace); err != nil {
log.Debug().Err(err).Send()
}
}
selfServiceAccountExists, err := createRBACIfNecessary(ctx, kubernetesProvider, isNsRestrictedMode, selfNamespace, []string{"pods", "services", "endpoints"})
if err != nil {
log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Failed to ensure the resources required for IP resolving. %s will not resolve target IPs to names.", misc.Software))
}
var serviceAccountName string
if selfServiceAccountExists {
serviceAccountName = kubernetes.ServiceAccountName
} else {
serviceAccountName = ""
}
opts := &kubernetes.PodOptions{
Namespace: selfNamespace,
PodName: kubernetes.HubPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: serviceAccountName,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Debug: debug,
}
frontOpts := &kubernetes.PodOptions{
Namespace: selfNamespace,
PodName: kubernetes.FrontPodName,
PodImage: docker.GetWorkerImage(),
ServiceAccountName: serviceAccountName,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Debug: debug,
}
if err := createSelfHubPod(ctx, kubernetesProvider, opts); err != nil {
return selfServiceAccountExists, err
}
if err := createFrontPod(ctx, kubernetesProvider, frontOpts); err != nil {
return selfServiceAccountExists, err
}
// TODO: Why the port values need to be 80?
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.HubServiceName, kubernetes.HubServiceName, 80, 80)
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("service", kubernetes.HubServiceName).Msg("Successfully created a service.")
_, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.FrontServiceName, kubernetes.FrontServiceName, 80, int32(config.Config.Tap.Proxy.Front.DstPort))
if err != nil {
return selfServiceAccountExists, err
}
log.Info().Str("service", kubernetes.FrontServiceName).Msg("Successfully created a service.")
return selfServiceAccountExists, nil
}
func createSelfNamespace(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfNamespace string) error {
_, err := kubernetesProvider.CreateNamespace(ctx, selfNamespace)
return err
}
func createRBACIfNecessary(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, resources []string) (bool, error) {
if !isNsRestrictedMode {
if err := kubernetesProvider.CreateSelfRBAC(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.ClusterRoleName, kubernetes.ClusterRoleBindingName, misc.RBACVersion, resources); err != nil {
return false, err
}
} else {
if err := kubernetesProvider.CreateSelfRBACNamespaceRestricted(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.RoleName, kubernetes.RoleBindingName, misc.RBACVersion); err != nil {
return false, err
}
}
return true, nil
}
func createSelfHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
pod, err := kubernetesProvider.BuildHubPod(opts)
if err != nil {
return err
}
if _, err = kubernetesProvider.CreatePod(ctx, opts.Namespace, pod); err != nil {
return err
}
log.Info().Str("pod", pod.Name).Msg("Successfully created a pod.")
return nil
}
func createFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
pod, err := kubernetesProvider.BuildFrontPod(opts, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
if err != nil {
return err
}
if _, err = kubernetesProvider.CreatePod(ctx, opts.Namespace, pod); err != nil {
return err
}
log.Info().Str("pod", pod.Name).Msg("Successfully created a pod.")
return nil
}

View File

@@ -16,8 +16,15 @@ func Get(url string, client *http.Client) (*http.Response, error) {
// Post - When err is nil, resp always contains a non-nil resp.Body.
// Caller should close resp.Body when done reading from it.
func Post(url, contentType string, body io.Reader, client *http.Client) (*http.Response, error) {
return checkError(client.Post(url, contentType, body))
func Post(url, contentType string, body io.Reader, client *http.Client, licenseKey string) (*http.Response, error) {
req, err := http.NewRequest(http.MethodPost, url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("License-Key", licenseKey)
return checkError(client.Do(req))
}
// Do - When err is nil, resp always contains a non-nil resp.Body.

View File

@@ -2,36 +2,18 @@ package utils
import (
"bytes"
"encoding/json"
"gopkg.in/yaml.v3"
"github.com/goccy/go-yaml"
)
const (
empty = ""
tab = "\t"
)
func PrettyJson(data interface{}) (string, error) {
func PrettyYaml(data interface{}) (result string, err error) {
buffer := new(bytes.Buffer)
encoder := json.NewEncoder(buffer)
encoder.SetIndent(empty, tab)
encoder := yaml.NewEncoder(buffer, yaml.Indent(2))
err := encoder.Encode(data)
err = encoder.Encode(data)
if err != nil {
return empty, err
return
}
return buffer.String(), nil
}
func PrettyYaml(data interface{}) (string, error) {
buffer := new(bytes.Buffer)
encoder := yaml.NewEncoder(buffer)
encoder.SetIndent(0)
err := encoder.Encode(data)
if err != nil {
return empty, err
}
return buffer.String(), nil
result = buffer.String()
return
}

View File

@@ -1,7 +0,0 @@
package utils
import "github.com/docker/go-units"
func HumanReadableToBytes(humanReadableSize string) (int64, error) {
return units.FromHumanSize(humanReadableSize)
}