mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-15 10:29:55 +00:00
Compare commits
503 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
318b35e785 | ||
|
|
fecf290a25 | ||
|
|
a01f7bed74 | ||
|
|
633a17a0e0 | ||
|
|
8fac9a5ad5 | ||
|
|
76c5eb6b59 | ||
|
|
482082ba49 | ||
|
|
6ae379cbff | ||
|
|
3f6c62a7e3 | ||
|
|
717433badb | ||
|
|
a973d6916d | ||
|
|
2ccd716a68 | ||
|
|
0bbbb473ea | ||
|
|
d012ea89b6 | ||
|
|
0f1c9c52ea | ||
|
|
f3a0d35485 | ||
|
|
d6631e8565 | ||
|
|
1669680d10 | ||
|
|
19389fcba7 | ||
|
|
1b027153e3 | ||
|
|
77d16e73e8 | ||
|
|
a73c904a9b | ||
|
|
8f3f136be6 | ||
|
|
897aa44965 | ||
|
|
57093e8a25 | ||
|
|
1fd9dffc60 | ||
|
|
3b315eb89f | ||
|
|
6645e23704 | ||
|
|
b7190162ec | ||
|
|
9570b2e317 | ||
|
|
b98113a2b5 | ||
|
|
9724a0c279 | ||
|
|
47ac96a71b | ||
|
|
4dea643781 | ||
|
|
03a53ad6d5 | ||
|
|
a12a5aec19 | ||
|
|
4931116881 | ||
|
|
eb9a82962f | ||
|
|
bd10e035ff | ||
|
|
25832ce596 | ||
|
|
38a13d19e1 | ||
|
|
a7b9e09f2b | ||
|
|
dcb84e0520 | ||
|
|
773fefae21 | ||
|
|
d640128e85 | ||
|
|
7dcacf14f2 | ||
|
|
fabf30c039 | ||
|
|
e55b62491a | ||
|
|
f5167cbb2a | ||
|
|
349d8b07df | ||
|
|
88f43b94d9 | ||
|
|
cf867fe701 | ||
|
|
635fcabecd | ||
|
|
099b79f3ce | ||
|
|
56b936b8b8 | ||
|
|
352484b5f6 | ||
|
|
eee3030410 | ||
|
|
5231546210 | ||
|
|
d845bb18a3 | ||
|
|
abee96a863 | ||
|
|
efe6b0e7b7 | ||
|
|
bedecdb080 | ||
|
|
c2d10f8cfa | ||
|
|
161a525b67 | ||
|
|
33353ef21e | ||
|
|
c751a8a6ad | ||
|
|
8c9473626e | ||
|
|
1d8fa774d3 | ||
|
|
3be80ddf82 | ||
|
|
6d2b0676f6 | ||
|
|
131b8013ea | ||
|
|
a18ccac258 | ||
|
|
2e75d0f2ab | ||
|
|
398a4b9efc | ||
|
|
f9dd99af1b | ||
|
|
ed0fb34888 | ||
|
|
a4b0138abe | ||
|
|
7dcd9eee95 | ||
|
|
95213b344d | ||
|
|
df1628e1a4 | ||
|
|
43a410b9dd | ||
|
|
7618795fdf | ||
|
|
4ca9bc8fc0 | ||
|
|
9775a70722 | ||
|
|
1218386638 | ||
|
|
2bee926b4b | ||
|
|
ac5bf9b276 | ||
|
|
59026d4ad4 | ||
|
|
25ecc18d39 | ||
|
|
a6eabbbdee | ||
|
|
a914733078 | ||
|
|
59ef0f8f80 | ||
|
|
3c13a8d96b | ||
|
|
dc50ef48fd | ||
|
|
453d27af43 | ||
|
|
c95d63feb0 | ||
|
|
f85c7dfb4b | ||
|
|
0386e57906 | ||
|
|
529ca63a47 | ||
|
|
eec4404038 | ||
|
|
e47a665d68 | ||
|
|
f656acea64 | ||
|
|
000fb91461 | ||
|
|
3c8ee11216 | ||
|
|
631e5f2d24 | ||
|
|
95d6655af6 | ||
|
|
62912d248d | ||
|
|
be8136687b | ||
|
|
3d4606d439 | ||
|
|
46ca7e3ad7 | ||
|
|
e9796bfb24 | ||
|
|
ce7913ce2e | ||
|
|
8f6ef686de | ||
|
|
f2e60cdee1 | ||
|
|
67aa1dac39 | ||
|
|
818a9e2bec | ||
|
|
7eb35a4e11 | ||
|
|
858864e7bc | ||
|
|
ad10212ba5 | ||
|
|
0e3f137a69 | ||
|
|
ef17eb9fbe | ||
|
|
aa7c8f36f5 | ||
|
|
c92f509863 | ||
|
|
0d5bbd53aa | ||
|
|
0b00b1846b | ||
|
|
507099c1ec | ||
|
|
aca3f4ad44 | ||
|
|
f9c66df528 | ||
|
|
1d572e6bff | ||
|
|
46ad335446 | ||
|
|
317357e83b | ||
|
|
d89ef8789f | ||
|
|
773ad78ac7 | ||
|
|
bbcaf74fa7 | ||
|
|
639f1deb51 | ||
|
|
b377bfe35f | ||
|
|
5242d9af07 | ||
|
|
12f8883052 | ||
|
|
7eef5efcd9 | ||
|
|
af47154a8d | ||
|
|
17759d296d | ||
|
|
29de008f22 | ||
|
|
261a0ca1a9 | ||
|
|
e819e9b697 | ||
|
|
a03aa56d07 | ||
|
|
83f437f3f8 | ||
|
|
f5637972f2 | ||
|
|
4cabf13788 | ||
|
|
cd1d7e4a58 | ||
|
|
9b7e2e7144 | ||
|
|
80fa18cbba | ||
|
|
dfbb321084 | ||
|
|
d85dc58f20 | ||
|
|
993b8ae19e | ||
|
|
77f81c8ab3 | ||
|
|
a24b40a0c1 | ||
|
|
4817ed2a80 | ||
|
|
d66ec06928 | ||
|
|
125e3abe6c | ||
|
|
8221c4ef10 | ||
|
|
7f216b2958 | ||
|
|
67006e2fc7 | ||
|
|
d0adbc357f | ||
|
|
8e135d570b | ||
|
|
f21f68a7e0 | ||
|
|
5f13f7d28d | ||
|
|
80d23d62bd | ||
|
|
bba1bbd1fb | ||
|
|
4a6628a3e8 | ||
|
|
bec0b25daa | ||
|
|
9248f07af0 | ||
|
|
a1e05db4b0 | ||
|
|
b3f6fdc831 | ||
|
|
e0c010eb29 | ||
|
|
d9fedc5bec | ||
|
|
d1b4f9dcb1 | ||
|
|
629fb118e8 | ||
|
|
b7ab3da6d2 | ||
|
|
3027fdab40 | ||
|
|
a7692a664d | ||
|
|
696f3fca93 | ||
|
|
36e47e3080 | ||
|
|
994307f45c | ||
|
|
ba9b85bb12 | ||
|
|
6a890e6653 | ||
|
|
22766c2983 | ||
|
|
da1d2c5260 | ||
|
|
7b94c9beff | ||
|
|
f026c3604a | ||
|
|
517c127d93 | ||
|
|
580c612982 | ||
|
|
7a1cd9afbc | ||
|
|
5bbf1e7eb0 | ||
|
|
aa9fb41ee5 | ||
|
|
816f614ebb | ||
|
|
674a554767 | ||
|
|
cc3f8c86ff | ||
|
|
99aff8d513 | ||
|
|
a2e0e013e5 | ||
|
|
41f36ba9c2 | ||
|
|
ecc577ccc8 | ||
|
|
b7b0e3dcee | ||
|
|
1926067bd9 | ||
|
|
1eeed3e58e | ||
|
|
49755671f5 | ||
|
|
223ada3e2b | ||
|
|
1bd8f9b8c5 | ||
|
|
b86f80ebd7 | ||
|
|
3fcc51c5c3 | ||
|
|
783aa03b6a | ||
|
|
68da6a819a | ||
|
|
afa81e7be9 | ||
|
|
d8b87a90e4 | ||
|
|
0f1194bfeb | ||
|
|
3a8817592f | ||
|
|
e800d67e27 | ||
|
|
fc0ec5a840 | ||
|
|
9144d98d04 | ||
|
|
3d5c999be1 | ||
|
|
98173350ec | ||
|
|
16d779449a | ||
|
|
fdaef243e4 | ||
|
|
0a0b0cde36 | ||
|
|
13dd178334 | ||
|
|
d61e6ab8eb | ||
|
|
b6672661ad | ||
|
|
6374f79292 | ||
|
|
8039731daf | ||
|
|
bdbe4888d2 | ||
|
|
88c72cda82 | ||
|
|
ca844394fc | ||
|
|
2513c136de | ||
|
|
3c6307e93f | ||
|
|
1c883c73e4 | ||
|
|
95637bfce8 | ||
|
|
f155e4f1b7 | ||
|
|
32caeb37e4 | ||
|
|
1dfef1be23 | ||
|
|
a0eb85e71d | ||
|
|
ad738387b7 | ||
|
|
c695a3c5e5 | ||
|
|
de154731e9 | ||
|
|
d3789f2bc0 | ||
|
|
84f2ec944d | ||
|
|
193e2ab03e | ||
|
|
a3fea3b610 | ||
|
|
b34cc21bcf | ||
|
|
e0dec54c6a | ||
|
|
17ce638a78 | ||
|
|
4191aa4ce5 | ||
|
|
9069f10d94 | ||
|
|
53697d74ee | ||
|
|
51f3e3b7ce | ||
|
|
2a640c8d38 | ||
|
|
be96d4e099 | ||
|
|
ec616cb32c | ||
|
|
669974d608 | ||
|
|
219fc0a126 | ||
|
|
e70167c694 | ||
|
|
ba126dff51 | ||
|
|
377ff44d71 | ||
|
|
557506096c | ||
|
|
47d5764cdc | ||
|
|
32136520d8 | ||
|
|
19d2822d3e | ||
|
|
a31bb0e6e7 | ||
|
|
7de515dd3a | ||
|
|
5089e9ccb8 | ||
|
|
c837874bbe | ||
|
|
13b35f1672 | ||
|
|
4ec06b7c95 | ||
|
|
df0aea1462 | ||
|
|
64a4c5ce62 | ||
|
|
9c9cefc406 | ||
|
|
db23ff6338 | ||
|
|
a699755858 | ||
|
|
b7efd94414 | ||
|
|
be86ea8ecb | ||
|
|
6ea1073fe9 | ||
|
|
48bf3f25c5 | ||
|
|
28ae2a645b | ||
|
|
b7530a3c6b | ||
|
|
7168b5c515 | ||
|
|
50d29f1e93 | ||
|
|
01656b6c78 | ||
|
|
a16f818bdf | ||
|
|
c88b3b0ba7 | ||
|
|
e7778fe537 | ||
|
|
126f8b48d5 | ||
|
|
b9296d7849 | ||
|
|
cddccd58fa | ||
|
|
3965916837 | ||
|
|
ba1254f7e9 | ||
|
|
df1915cce6 | ||
|
|
88ea7120c4 | ||
|
|
f43a61f891 | ||
|
|
067875d544 | ||
|
|
77ed1fdefe | ||
|
|
e1f8a24897 | ||
|
|
40177b8fa9 | ||
|
|
6d0512fd57 | ||
|
|
75931d9123 | ||
|
|
d6143f5a6a | ||
|
|
a58f72ed87 | ||
|
|
d22e30f86d | ||
|
|
806aa12feb | ||
|
|
30e6d28672 | ||
|
|
ef84f90cd9 | ||
|
|
b49ca767c9 | ||
|
|
d1cc890cad | ||
|
|
a9a75533af | ||
|
|
1aef7be3fb | ||
|
|
c1e812e449 | ||
|
|
c2b73025f3 | ||
|
|
af2086a54d | ||
|
|
359623c538 | ||
|
|
3798bf7a01 | ||
|
|
487f0b9332 | ||
|
|
39c5df64e6 | ||
|
|
22a777ac79 | ||
|
|
06e0def53e | ||
|
|
b88f1c7014 | ||
|
|
f4e2d2f9ca | ||
|
|
f017020f62 | ||
|
|
32ffa6132d | ||
|
|
0bb0c4b256 | ||
|
|
28696d2f5c | ||
|
|
7ab63ec745 | ||
|
|
ddabbac317 | ||
|
|
5a4901f7bd | ||
|
|
5a322fc58a | ||
|
|
53c3dabcbf | ||
|
|
6b6915c7ee | ||
|
|
e819759c2d | ||
|
|
b39c5dd5d3 | ||
|
|
0f402789f1 | ||
|
|
d4fade3599 | ||
|
|
054c4a9e8b | ||
|
|
35c1a88724 | ||
|
|
fe3f93c91b | ||
|
|
24aa4db0bc | ||
|
|
ef44257942 | ||
|
|
0b58558f70 | ||
|
|
cdd306b890 | ||
|
|
3cc9ff8616 | ||
|
|
247498492a | ||
|
|
867c7058a0 | ||
|
|
f1021f61b6 | ||
|
|
9162c4fb64 | ||
|
|
e7fc7b791a | ||
|
|
9914183d7d | ||
|
|
c0751ad4cb | ||
|
|
0aca81fbcb | ||
|
|
24dccab3e4 | ||
|
|
db607aff16 | ||
|
|
ec1728ef91 | ||
|
|
93de6e8934 | ||
|
|
5998d00e6a | ||
|
|
afafb2c625 | ||
|
|
b125860d06 | ||
|
|
68aabf262f | ||
|
|
d279b7272d | ||
|
|
d15e1cca54 | ||
|
|
d8761e1e31 | ||
|
|
a9d2cb5ac2 | ||
|
|
ddcf973e35 | ||
|
|
b6d1804326 | ||
|
|
6dc12af55b | ||
|
|
d78b0b987a | ||
|
|
9889787833 | ||
|
|
8fe0544175 | ||
|
|
09afa1983a | ||
|
|
669b5cb1f2 | ||
|
|
25e0949761 | ||
|
|
fa07f973c0 | ||
|
|
c38bdcd977 | ||
|
|
51a4165304 | ||
|
|
c8cd1f57c4 | ||
|
|
dfde87140a | ||
|
|
64b6368e63 | ||
|
|
6af2d11878 | ||
|
|
2b552b5847 | ||
|
|
72ec983b24 | ||
|
|
2f899a943c | ||
|
|
12f6b04a49 | ||
|
|
f010f349a1 | ||
|
|
26e23dc94f | ||
|
|
6785f024e4 | ||
|
|
92dab2e2f7 | ||
|
|
4da51c40b9 | ||
|
|
18d051af28 | ||
|
|
cef012d1f3 | ||
|
|
4802cca646 | ||
|
|
4117d008a9 | ||
|
|
91e3546196 | ||
|
|
4db2a80675 | ||
|
|
bfa3efd23a | ||
|
|
c48187a02e | ||
|
|
f6d7510a14 | ||
|
|
f9e0c36d5f | ||
|
|
a8dd332ff8 | ||
|
|
8e5df14f49 | ||
|
|
6307871584 | ||
|
|
7e77a76334 | ||
|
|
f2b7df7e02 | ||
|
|
b0af52ba9c | ||
|
|
ddc1dc3d71 | ||
|
|
d99bfea0db | ||
|
|
bed9d06c59 | ||
|
|
aaeb3ca1eb | ||
|
|
7df35e04a8 | ||
|
|
a5be1a8eaa | ||
|
|
8ba3e603a4 | ||
|
|
db51e6dbc2 | ||
|
|
77878e97f5 | ||
|
|
36767eda27 | ||
|
|
6c01078f97 | ||
|
|
6c06307d68 | ||
|
|
2223cad038 | ||
|
|
c1fc4447ef | ||
|
|
ea3eecfa04 | ||
|
|
51968f2aae | ||
|
|
15f7a3559a | ||
|
|
cc9627c884 | ||
|
|
d3f2cdbf0e | ||
|
|
28bfbf4186 | ||
|
|
d3c21a07bb | ||
|
|
510d5e5ed8 | ||
|
|
1070d17e20 | ||
|
|
6b8beb50ad | ||
|
|
68877b254b | ||
|
|
dd91087157 | ||
|
|
cf3ce0180b | ||
|
|
b4dc321829 | ||
|
|
7e893a5b52 | ||
|
|
33dabe8bbf | ||
|
|
ddf354f34e | ||
|
|
88f8998df3 | ||
|
|
fc0f6a8452 | ||
|
|
cc9dbbef2e | ||
|
|
696fed8345 | ||
|
|
c03de2222d | ||
|
|
a028211f0a | ||
|
|
c94a399bc3 | ||
|
|
788bcd4846 | ||
|
|
e2ef9eff05 | ||
|
|
7ce18ecaa9 | ||
|
|
7737bdf4fc | ||
|
|
bed59e12ea | ||
|
|
19723debb2 | ||
|
|
d82df9d670 | ||
|
|
d295cecfc2 | ||
|
|
9c291bbf47 | ||
|
|
cb0e89934d | ||
|
|
820fb64f8d | ||
|
|
62d4c3a86e | ||
|
|
2757b7419f | ||
|
|
3b5cd6c77b | ||
|
|
7e56d45c6b | ||
|
|
0e2bca9729 | ||
|
|
b1a40df069 | ||
|
|
773cf371f3 | ||
|
|
1527f43396 | ||
|
|
c9a2b9eb44 | ||
|
|
2b92bb74c7 | ||
|
|
fb08481909 | ||
|
|
e215870b9d | ||
|
|
426c6450ba | ||
|
|
a3383ee6cc | ||
|
|
bdff836040 | ||
|
|
9f8ecc8e4e | ||
|
|
9cdf1aa68b | ||
|
|
78481d4bcc | ||
|
|
706a2fc9b5 | ||
|
|
1064305934 | ||
|
|
9f1586ab50 | ||
|
|
6d79598c5d | ||
|
|
22bdbda718 | ||
|
|
23e2493890 | ||
|
|
a7905bc1ba | ||
|
|
4831b44dfa | ||
|
|
6817fd70ab | ||
|
|
3803bad6a4 | ||
|
|
d0b621070c | ||
|
|
6add6fb1ec | ||
|
|
e1106e25c4 | ||
|
|
48adf86b25 | ||
|
|
2ea5dc0df0 | ||
|
|
bb0172b151 | ||
|
|
ef7c80df05 | ||
|
|
5bd44b57f4 | ||
|
|
41dacbff1a | ||
|
|
d94ce4dce3 | ||
|
|
65ab0ca668 | ||
|
|
9bc3ea5ffc | ||
|
|
2d17d1a83d | ||
|
|
78c89cc5b4 | ||
|
|
b5c9a31380 | ||
|
|
3dfff2b7a5 | ||
|
|
583a5b97ee | ||
|
|
64aae06fe5 | ||
|
|
1ccaa03fb2 | ||
|
|
3222212367 |
46
.github/static/kubeshark.rb.tmpl
vendored
Normal file
46
.github/static/kubeshark.rb.tmpl
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
# typed: false
|
||||
# frozen_string_literal: true
|
||||
|
||||
class Kubeshark < Formula
|
||||
desc ""
|
||||
homepage "https://github.com/kubeshark/kubeshark"
|
||||
version "${CLEAN_VERSION}"
|
||||
|
||||
on_macos do
|
||||
if Hardware::CPU.arm?
|
||||
url "https://github.com/kubeshark/kubeshark/releases/download/${FULL_VERSION}/kubeshark_darwin_arm64"
|
||||
sha256 "${DARWIN_ARM64_SHA256}"
|
||||
|
||||
def install
|
||||
bin.install "kubeshark_darwin_arm64" => "kubeshark"
|
||||
end
|
||||
end
|
||||
if Hardware::CPU.intel?
|
||||
url "https://github.com/kubeshark/kubeshark/releases/download/${FULL_VERSION}/kubeshark_darwin_amd64"
|
||||
sha256 "${DARWIN_AMD64_SHA256}"
|
||||
|
||||
def install
|
||||
bin.install "kubeshark_darwin_amd64" => "kubeshark"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
on_linux do
|
||||
if Hardware::CPU.intel?
|
||||
url "https://github.com/kubeshark/kubeshark/releases/download/${FULL_VERSION}/kubeshark_linux_amd64"
|
||||
sha256 "${LINUX_AMD64_SHA256}"
|
||||
|
||||
def install
|
||||
bin.install "kubeshark_linux_amd64" => "kubeshark"
|
||||
end
|
||||
end
|
||||
if Hardware::CPU.arm? && Hardware::CPU.is_64_bit?
|
||||
url "https://github.com/kubeshark/kubeshark/releases/download/${FULL_VERSION}/kubeshark_linux_arm64"
|
||||
sha256 "${LINUX_ARM64_SHA256}"
|
||||
|
||||
def install
|
||||
bin.install "kubeshark_linux_arm64" => "kubeshark"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
3
.github/workflows/helm.yml
vendored
3
.github/workflows/helm.yml
vendored
@@ -1,7 +1,8 @@
|
||||
on:
|
||||
push:
|
||||
# Sequence of patterns matched against refs/tags
|
||||
tags:
|
||||
- '*'
|
||||
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
name: Release Helm Charts
|
||||
|
||||
|
||||
201
.github/workflows/mcp-publish.yml
vendored
Normal file
201
.github/workflows/mcp-publish.yml
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
name: MCP Registry Publish
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
release_tag:
|
||||
description: 'Release tag to publish (e.g., v52.13.0)'
|
||||
type: string
|
||||
required: true
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: 'Dry run - generate server.json but skip actual publishing'
|
||||
type: boolean
|
||||
default: true
|
||||
release_tag:
|
||||
description: 'Release tag to publish (e.g., v52.13.0)'
|
||||
type: string
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
mcp-publish:
|
||||
name: Publish to MCP Registry
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write # Required for OIDC authentication with MCP Registry
|
||||
contents: read # Required for checkout
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Determine version
|
||||
id: version
|
||||
shell: bash
|
||||
run: |
|
||||
# inputs.release_tag works for both workflow_call and workflow_dispatch
|
||||
VERSION="${{ inputs.release_tag }}"
|
||||
echo "tag=${VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "Publishing MCP server for version: ${VERSION}"
|
||||
|
||||
- name: Download SHA256 files from release
|
||||
shell: bash
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.tag }}"
|
||||
mkdir -p bin
|
||||
echo "Downloading SHA256 checksums from release ${VERSION}..."
|
||||
for platform in darwin_arm64 darwin_amd64 linux_arm64 linux_amd64 windows_amd64; do
|
||||
url="https://github.com/kubeshark/kubeshark/releases/download/${VERSION}/kubeshark-mcp_${platform}.mcpb.sha256"
|
||||
echo " Fetching ${platform}..."
|
||||
if ! curl -sfL "${url}" -o "bin/kubeshark-mcp_${platform}.mcpb.sha256"; then
|
||||
echo "::warning::Failed to download SHA256 for ${platform}"
|
||||
fi
|
||||
done
|
||||
echo "Downloaded checksums:"
|
||||
ls -la bin/*.sha256 2>/dev/null || echo "No SHA256 files found"
|
||||
|
||||
- name: Generate server.json
|
||||
shell: bash
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.tag }}"
|
||||
CLEAN_VERSION="${VERSION#v}"
|
||||
|
||||
# Read SHA256 hashes
|
||||
get_sha256() {
|
||||
local file="bin/kubeshark-mcp_$1.mcpb.sha256"
|
||||
if [ -f "$file" ]; then
|
||||
awk '{print $1}' "$file"
|
||||
else
|
||||
echo "HASH_NOT_FOUND"
|
||||
fi
|
||||
}
|
||||
|
||||
DARWIN_ARM64_SHA256=$(get_sha256 "darwin_arm64")
|
||||
DARWIN_AMD64_SHA256=$(get_sha256 "darwin_amd64")
|
||||
LINUX_ARM64_SHA256=$(get_sha256 "linux_arm64")
|
||||
LINUX_AMD64_SHA256=$(get_sha256 "linux_amd64")
|
||||
WINDOWS_AMD64_SHA256=$(get_sha256 "windows_amd64")
|
||||
|
||||
echo "SHA256 hashes:"
|
||||
echo " darwin_arm64: ${DARWIN_ARM64_SHA256}"
|
||||
echo " darwin_amd64: ${DARWIN_AMD64_SHA256}"
|
||||
echo " linux_arm64: ${LINUX_ARM64_SHA256}"
|
||||
echo " linux_amd64: ${LINUX_AMD64_SHA256}"
|
||||
echo " windows_amd64: ${WINDOWS_AMD64_SHA256}"
|
||||
|
||||
# Generate server.json using jq for proper formatting
|
||||
jq -n \
|
||||
--arg version "$CLEAN_VERSION" \
|
||||
--arg full_version "$VERSION" \
|
||||
--arg darwin_arm64_sha "$DARWIN_ARM64_SHA256" \
|
||||
--arg darwin_amd64_sha "$DARWIN_AMD64_SHA256" \
|
||||
--arg linux_arm64_sha "$LINUX_ARM64_SHA256" \
|
||||
--arg linux_amd64_sha "$LINUX_AMD64_SHA256" \
|
||||
--arg windows_amd64_sha "$WINDOWS_AMD64_SHA256" \
|
||||
'{
|
||||
"$schema": "https://static.modelcontextprotocol.io/schemas/2025-12-11/server.schema.json",
|
||||
"name": "io.github.kubeshark/mcp",
|
||||
"displayName": "Kubeshark",
|
||||
"description": "Real-time Kubernetes network traffic visibility and API analysis for HTTP, gRPC, Redis, Kafka, DNS.",
|
||||
"icon": "https://raw.githubusercontent.com/kubeshark/assets/refs/heads/master/logo/ico/icon.ico",
|
||||
"repository": { "url": "https://github.com/kubeshark/kubeshark", "source": "github" },
|
||||
"homepage": "https://kubeshark.com",
|
||||
"license": "Apache-2.0",
|
||||
"version": $version,
|
||||
"authors": [{ "name": "Kubeshark", "url": "https://kubeshark.com" }],
|
||||
"categories": ["kubernetes", "networking", "observability", "debugging", "security"],
|
||||
"tags": ["kubernetes", "network", "traffic", "api", "http", "grpc", "kafka", "redis", "dns", "pcap", "wireshark", "tcpdump", "observability", "debugging", "microservices"],
|
||||
"packages": [
|
||||
{ "registryType": "mcpb", "identifier": ("https://github.com/kubeshark/kubeshark/releases/download/" + $full_version + "/kubeshark-mcp_darwin_arm64.mcpb"), "fileSha256": $darwin_arm64_sha, "transport": { "type": "stdio" } },
|
||||
{ "registryType": "mcpb", "identifier": ("https://github.com/kubeshark/kubeshark/releases/download/" + $full_version + "/kubeshark-mcp_darwin_amd64.mcpb"), "fileSha256": $darwin_amd64_sha, "transport": { "type": "stdio" } },
|
||||
{ "registryType": "mcpb", "identifier": ("https://github.com/kubeshark/kubeshark/releases/download/" + $full_version + "/kubeshark-mcp_linux_arm64.mcpb"), "fileSha256": $linux_arm64_sha, "transport": { "type": "stdio" } },
|
||||
{ "registryType": "mcpb", "identifier": ("https://github.com/kubeshark/kubeshark/releases/download/" + $full_version + "/kubeshark-mcp_linux_amd64.mcpb"), "fileSha256": $linux_amd64_sha, "transport": { "type": "stdio" } },
|
||||
{ "registryType": "mcpb", "identifier": ("https://github.com/kubeshark/kubeshark/releases/download/" + $full_version + "/kubeshark-mcp_windows_amd64.mcpb"), "fileSha256": $windows_amd64_sha, "transport": { "type": "stdio" } }
|
||||
],
|
||||
"tools": [
|
||||
{ "name": "check_kubeshark_status", "description": "Check if Kubeshark is currently running in the cluster.", "mode": "proxy" },
|
||||
{ "name": "start_kubeshark", "description": "Deploy Kubeshark to the Kubernetes cluster. Requires --allow-destructive flag.", "mode": "proxy", "destructive": true },
|
||||
{ "name": "stop_kubeshark", "description": "Remove Kubeshark from the Kubernetes cluster. Requires --allow-destructive flag.", "mode": "proxy", "destructive": true },
|
||||
{ "name": "list_workloads", "description": "List pods, services, namespaces, and nodes with observed L7 traffic.", "mode": "all" },
|
||||
{ "name": "list_api_calls", "description": "Query L7 API transactions (HTTP, gRPC, Redis, Kafka, DNS) with KFL filtering.", "mode": "all" },
|
||||
{ "name": "get_api_call", "description": "Get detailed information about a specific API call including headers and body.", "mode": "all" },
|
||||
{ "name": "get_api_stats", "description": "Get aggregated API statistics and metrics.", "mode": "all" },
|
||||
{ "name": "list_l4_flows", "description": "List L4 (TCP/UDP) network flows with traffic statistics.", "mode": "all" },
|
||||
{ "name": "get_l4_flow_summary", "description": "Get L4 connectivity summary including top talkers and cross-namespace traffic.", "mode": "all" },
|
||||
{ "name": "list_snapshots", "description": "List all PCAP snapshots.", "mode": "all" },
|
||||
{ "name": "create_snapshot", "description": "Create a new PCAP snapshot of captured traffic.", "mode": "all" },
|
||||
{ "name": "get_dissection_status", "description": "Check L7 protocol parsing status.", "mode": "all" },
|
||||
{ "name": "enable_dissection", "description": "Enable L7 protocol dissection.", "mode": "all" },
|
||||
{ "name": "disable_dissection", "description": "Disable L7 protocol dissection.", "mode": "all" }
|
||||
],
|
||||
"prompts": [
|
||||
{ "name": "analyze_traffic", "description": "Analyze API traffic patterns and identify issues" },
|
||||
{ "name": "find_errors", "description": "Find and summarize API errors and failures" },
|
||||
{ "name": "trace_request", "description": "Trace a request path through microservices" },
|
||||
{ "name": "show_topology", "description": "Show service communication topology" },
|
||||
{ "name": "latency_analysis", "description": "Analyze latency patterns and identify slow endpoints" },
|
||||
{ "name": "security_audit", "description": "Audit traffic for security concerns" },
|
||||
{ "name": "compare_traffic", "description": "Compare traffic patterns between time periods" },
|
||||
{ "name": "debug_connection", "description": "Debug connectivity issues between services" }
|
||||
],
|
||||
"configuration": {
|
||||
"properties": {
|
||||
"url": { "type": "string", "description": "Direct URL to Kubeshark Hub (e.g., https://kubeshark.example.com). When set, connects directly without kubectl/proxy.", "examples": ["https://kubeshark.example.com", "http://localhost:8899"] },
|
||||
"kubeconfig": { "type": "string", "description": "Path to kubeconfig file for proxy mode.", "examples": ["~/.kube/config", "/path/to/.kube/config"] },
|
||||
"allow-destructive": { "type": "boolean", "description": "Enable destructive operations (start_kubeshark, stop_kubeshark). Default: false for safety.", "default": false }
|
||||
}
|
||||
},
|
||||
"modes": {
|
||||
"url": { "description": "Connect directly to an existing Kubeshark deployment via URL. Cluster management tools are disabled.", "args": ["mcp", "--url", "${url}"] },
|
||||
"proxy": { "description": "Connect via kubectl port-forward. Requires kubeconfig access to the cluster.", "args": ["mcp", "--kubeconfig", "${kubeconfig}"] },
|
||||
"proxy-destructive": { "description": "Proxy mode with destructive operations enabled.", "args": ["mcp", "--kubeconfig", "${kubeconfig}", "--allow-destructive"] }
|
||||
}
|
||||
}' > mcp/server.json
|
||||
|
||||
echo ""
|
||||
echo "Generated server.json:"
|
||||
cat mcp/server.json
|
||||
|
||||
- name: Install mcp-publisher
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Installing mcp-publisher..."
|
||||
curl -sfL "https://github.com/modelcontextprotocol/registry/releases/latest/download/mcp-publisher_linux_amd64.tar.gz" | tar xz
|
||||
chmod +x mcp-publisher
|
||||
sudo mv mcp-publisher /usr/local/bin/
|
||||
echo "mcp-publisher installed successfully"
|
||||
|
||||
- name: Login to MCP Registry
|
||||
if: github.event_name != 'workflow_dispatch' || github.event.inputs.dry_run != 'true'
|
||||
shell: bash
|
||||
run: mcp-publisher login github
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Publish to MCP Registry
|
||||
if: github.event_name != 'workflow_dispatch' || github.event.inputs.dry_run != 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
cd mcp
|
||||
echo "Publishing to MCP Registry..."
|
||||
if ! mcp-publisher publish; then
|
||||
echo "::error::Failed to publish to MCP Registry"
|
||||
exit 1
|
||||
fi
|
||||
echo "Successfully published to MCP Registry"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Dry-run summary
|
||||
if: github.event_name == 'workflow_dispatch' && github.event.inputs.dry_run == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
echo "=============================================="
|
||||
echo "DRY RUN - Would publish the following server.json"
|
||||
echo "=============================================="
|
||||
cat mcp/server.json
|
||||
echo ""
|
||||
echo "=============================================="
|
||||
echo "SHA256 checksums downloaded:"
|
||||
echo "=============================================="
|
||||
cat bin/*.sha256 2>/dev/null || echo "No SHA256 files found"
|
||||
66
.github/workflows/release.yml
vendored
66
.github/workflows/release.yml
vendored
@@ -1,7 +1,8 @@
|
||||
on:
|
||||
push:
|
||||
# Sequence of patterns matched against refs/tags
|
||||
tags:
|
||||
- '*'
|
||||
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
name: Release
|
||||
|
||||
@@ -13,6 +14,8 @@ jobs:
|
||||
release:
|
||||
name: Build and publish a new release
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.version.outputs.tag }}
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
@@ -40,50 +43,35 @@ jobs:
|
||||
run: |
|
||||
echo '${{ steps.version.outputs.tag }}' >> bin/version.txt
|
||||
|
||||
- name: Create MCP Registry artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
cd bin
|
||||
# Create .mcpb copies for MCP Registry (URL must contain "mcp")
|
||||
for f in kubeshark_linux_amd64 kubeshark_linux_arm64 kubeshark_darwin_amd64 kubeshark_darwin_arm64; do
|
||||
if [ -f "$f" ]; then
|
||||
cp "$f" "${f/kubeshark_/kubeshark-mcp_}.mcpb"
|
||||
shasum -a 256 "${f/kubeshark_/kubeshark-mcp_}.mcpb" > "${f/kubeshark_/kubeshark-mcp_}.mcpb.sha256"
|
||||
fi
|
||||
done
|
||||
# Handle Windows executable
|
||||
if [ -f "kubeshark.exe" ]; then
|
||||
cp kubeshark.exe kubeshark-mcp_windows_amd64.mcpb
|
||||
shasum -a 256 kubeshark-mcp_windows_amd64.mcpb > kubeshark-mcp_windows_amd64.mcpb.sha256
|
||||
fi
|
||||
|
||||
- name: Release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
artifacts: "bin/*"
|
||||
tag: ${{ steps.version.outputs.tag }}
|
||||
prerelease: true
|
||||
prerelease: false
|
||||
bodyFile: 'bin/README.md'
|
||||
|
||||
brew-tap:
|
||||
name: Create Homebrew formulae
|
||||
runs-on: ubuntu-latest
|
||||
mcp-publish:
|
||||
name: Publish to MCP Registry
|
||||
needs: [release]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Version
|
||||
id: version
|
||||
shell: bash
|
||||
run: |
|
||||
{
|
||||
echo "tag=${GITHUB_REF#refs/*/}"
|
||||
echo "build_timestamp=$(date +%s)"
|
||||
echo "branch=${GITHUB_REF#refs/heads/}"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Fetch all tags
|
||||
run: git fetch --force --tags
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: ${{ env.GITHUB_REF_NAME }}
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.HOMEBREW_TOKEN }}
|
||||
VER: ${{ steps.version.outputs.tag }}
|
||||
BUILD_TIMESTAMP: ${{ steps.version.outputs.build_timestamp }}
|
||||
uses: ./.github/workflows/mcp-publish.yml
|
||||
with:
|
||||
release_tag: ${{ needs.release.outputs.version }}
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -63,4 +63,4 @@ bin
|
||||
scripts/
|
||||
|
||||
# CWD config YAML
|
||||
kubeshark.yaml
|
||||
kubeshark.yaml
|
||||
@@ -7,7 +7,7 @@ Please read and follow the guidelines below.
|
||||
|
||||
## Communication
|
||||
|
||||
* Before starting work on a major feature, please reach out to us via [GitHub](https://github.com/kubeshark/kubeshark), [Discord](https://discord.gg/WkvRGMUcx7), [Slack](https://join.slack.com/t/kubeshark/shared_invite/zt-1k3sybpq9-uAhFkuPJiJftKniqrGHGhg), [email](mailto:info@kubeshark.co), etc. We will make sure no one else is already working on it. A _major feature_ is defined as any change that is > 100 LOC altered (not including tests), or changes any user-facing behavior
|
||||
* Before starting work on a major feature, please reach out to us via [GitHub](https://github.com/kubeshark/kubeshark), [Discord](https://discord.gg/WkvRGMUcx7), [Slack](https://join.slack.com/t/kubeshark/shared_invite/zt-1k3sybpq9-uAhFkuPJiJftKniqrGHGhg), [email](mailto:info@kubeshark.com), etc. We will make sure no one else is already working on it. A _major feature_ is defined as any change that is > 100 LOC altered (not including tests), or changes any user-facing behavior
|
||||
* Small patches and bug fixes don't need prior communication.
|
||||
|
||||
## Contribution Requirements
|
||||
|
||||
175
Makefile
175
Makefile
@@ -9,12 +9,12 @@ COMMIT_HASH=$(shell git rev-parse HEAD)
|
||||
GIT_BRANCH=$(shell git branch --show-current | tr '[:upper:]' '[:lower:]')
|
||||
GIT_VERSION=$(shell git branch --show-current | tr '[:upper:]' '[:lower:]')
|
||||
BUILD_TIMESTAMP=$(shell date +%s)
|
||||
export VER?=0.0
|
||||
export VER?=0.0.0
|
||||
|
||||
help: ## Print this help message.
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
build-debug: ## Build for debuging.
|
||||
build-debug: ## Build for debugging.
|
||||
export CGO_ENABLED=1
|
||||
export GCLFAGS='-gcflags="all=-N -l"'
|
||||
${MAKE} build-base
|
||||
@@ -40,6 +40,21 @@ build-base: ## Build binary (select the platform via GOOS / GOARCH env variables
|
||||
-o bin/kubeshark_$(SUFFIX) kubeshark.go && \
|
||||
cd bin && shasum -a 256 kubeshark_${SUFFIX} > kubeshark_${SUFFIX}.sha256
|
||||
|
||||
build-brew: ## Build binary for brew/core CI
|
||||
go build ${GCLFAGS} -ldflags="${LDFLAGS_EXT} \
|
||||
-X 'github.com/kubeshark/kubeshark/misc.GitCommitHash=$(COMMIT_HASH)' \
|
||||
-X 'github.com/kubeshark/kubeshark/misc.Branch=$(GIT_BRANCH)' \
|
||||
-X 'github.com/kubeshark/kubeshark/misc.BuildTimestamp=$(BUILD_TIMESTAMP)' \
|
||||
-X 'github.com/kubeshark/kubeshark/misc.Platform=$(SUFFIX)' \
|
||||
-X 'github.com/kubeshark/kubeshark/misc.Ver=$(VER)'" \
|
||||
-o kubeshark kubeshark.go
|
||||
|
||||
build-windows-amd64:
|
||||
$(MAKE) build GOOS=windows GOARCH=amd64 && \
|
||||
mv ./bin/kubeshark_windows_amd64 ./bin/kubeshark.exe && \
|
||||
rm bin/kubeshark_windows_amd64.sha256 && \
|
||||
cd bin && shasum -a 256 kubeshark.exe > kubeshark.exe.sha256
|
||||
|
||||
build-all: ## Build for all supported platforms.
|
||||
export CGO_ENABLED=0
|
||||
echo "Compiling for every OS and Platform" && \
|
||||
@@ -48,8 +63,7 @@ build-all: ## Build for all supported platforms.
|
||||
$(MAKE) build GOOS=linux GOARCH=arm64 && \
|
||||
$(MAKE) build GOOS=darwin GOARCH=amd64 && \
|
||||
$(MAKE) build GOOS=darwin GOARCH=arm64 && \
|
||||
$(MAKE) build GOOS=windows GOARCH=amd64 && \
|
||||
mv ./bin/kubeshark_windows_amd64 ./bin/kubeshark.exe && \
|
||||
$(MAKE) build-windows-amd64 && \
|
||||
echo "---------" && \
|
||||
find ./bin -ls
|
||||
|
||||
@@ -60,6 +74,69 @@ clean: ## Clean all build artifacts.
|
||||
test: ## Run cli tests.
|
||||
@go test ./... -coverpkg=./... -race -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-integration: ## Run integration tests (requires Kubernetes cluster).
|
||||
@echo "Running integration tests..."
|
||||
@LOG_FILE=$$(mktemp /tmp/integration-test.XXXXXX.log); \
|
||||
go test -tags=integration -timeout $${INTEGRATION_TIMEOUT:-5m} -v ./integration/... 2>&1 | tee $$LOG_FILE; \
|
||||
status=$$?; \
|
||||
echo ""; \
|
||||
echo "========================================"; \
|
||||
echo " INTEGRATION TEST SUMMARY"; \
|
||||
echo "========================================"; \
|
||||
grep -E "^(--- PASS|--- FAIL|--- SKIP)" $$LOG_FILE || true; \
|
||||
echo "----------------------------------------"; \
|
||||
pass=$$(grep -c "^--- PASS" $$LOG_FILE 2>/dev/null || true); \
|
||||
fail=$$(grep -c "^--- FAIL" $$LOG_FILE 2>/dev/null || true); \
|
||||
skip=$$(grep -c "^--- SKIP" $$LOG_FILE 2>/dev/null || true); \
|
||||
echo "PASSED: $${pass:-0}"; \
|
||||
echo "FAILED: $${fail:-0}"; \
|
||||
echo "SKIPPED: $${skip:-0}"; \
|
||||
echo "========================================"; \
|
||||
rm -f $$LOG_FILE; \
|
||||
exit $$status
|
||||
|
||||
test-integration-mcp: ## Run only MCP integration tests.
|
||||
@echo "Running MCP integration tests..."
|
||||
@LOG_FILE=$$(mktemp /tmp/integration-test.XXXXXX.log); \
|
||||
go test -tags=integration -timeout $${INTEGRATION_TIMEOUT:-5m} -v ./integration/ -run "MCP" 2>&1 | tee $$LOG_FILE; \
|
||||
status=$$?; \
|
||||
echo ""; \
|
||||
echo "========================================"; \
|
||||
echo " INTEGRATION TEST SUMMARY"; \
|
||||
echo "========================================"; \
|
||||
grep -E "^(--- PASS|--- FAIL|--- SKIP)" $$LOG_FILE || true; \
|
||||
echo "----------------------------------------"; \
|
||||
pass=$$(grep -c "^--- PASS" $$LOG_FILE 2>/dev/null || true); \
|
||||
fail=$$(grep -c "^--- FAIL" $$LOG_FILE 2>/dev/null || true); \
|
||||
skip=$$(grep -c "^--- SKIP" $$LOG_FILE 2>/dev/null || true); \
|
||||
echo "PASSED: $${pass:-0}"; \
|
||||
echo "FAILED: $${fail:-0}"; \
|
||||
echo "SKIPPED: $${skip:-0}"; \
|
||||
echo "========================================"; \
|
||||
rm -f $$LOG_FILE; \
|
||||
exit $$status
|
||||
|
||||
test-integration-short: ## Run quick integration tests (skips long-running tests).
|
||||
@echo "Running quick integration tests..."
|
||||
@LOG_FILE=$$(mktemp /tmp/integration-test.XXXXXX.log); \
|
||||
go test -tags=integration -timeout $${INTEGRATION_TIMEOUT:-2m} -short -v ./integration/... 2>&1 | tee $$LOG_FILE; \
|
||||
status=$$?; \
|
||||
echo ""; \
|
||||
echo "========================================"; \
|
||||
echo " INTEGRATION TEST SUMMARY"; \
|
||||
echo "========================================"; \
|
||||
grep -E "^(--- PASS|--- FAIL|--- SKIP)" $$LOG_FILE || true; \
|
||||
echo "----------------------------------------"; \
|
||||
pass=$$(grep -c "^--- PASS" $$LOG_FILE 2>/dev/null || true); \
|
||||
fail=$$(grep -c "^--- FAIL" $$LOG_FILE 2>/dev/null || true); \
|
||||
skip=$$(grep -c "^--- SKIP" $$LOG_FILE 2>/dev/null || true); \
|
||||
echo "PASSED: $${pass:-0}"; \
|
||||
echo "FAILED: $${fail:-0}"; \
|
||||
echo "SKIPPED: $${skip:-0}"; \
|
||||
echo "========================================"; \
|
||||
rm -f $$LOG_FILE; \
|
||||
exit $$status
|
||||
|
||||
lint: ## Lint the source code.
|
||||
golangci-lint run
|
||||
|
||||
@@ -70,21 +147,42 @@ kubectl-view-kubeshark-resources: ## This command outputs all Kubernetes resourc
|
||||
./kubectl.sh view-kubeshark-resources
|
||||
|
||||
generate-helm-values: ## Generate the Helm values from config.yaml
|
||||
./bin/kubeshark__ config > ./helm-chart/values.yaml
|
||||
# [ -f ~/.kubeshark/config.yaml ] && mv ~/.kubeshark/config.yaml ~/.kubeshark/config.yaml.old
|
||||
bin/kubeshark__ config>helm-chart/values.yaml
|
||||
# [ -f ~/.kubeshark/config.yaml.old ] && mv ~/.kubeshark/config.yaml.old ~/.kubeshark/config.yaml
|
||||
# sed -i 's/^license:.*/license: ""/' helm-chart/values.yaml && sed -i '1i # find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md' helm-chart/values.yaml
|
||||
|
||||
generate-manifests: ## Generate the manifests from the Helm chart using default configuration
|
||||
helm template kubeshark -n default ./helm-chart > ./manifests/complete.yaml
|
||||
|
||||
logs-worker:
|
||||
logs-sniffer:
|
||||
export LOGS_POD_PREFIX=kubeshark-worker-
|
||||
export LOGS_CONTAINER='-c sniffer'
|
||||
export LOGS_FOLLOW=
|
||||
${MAKE} logs
|
||||
|
||||
logs-worker-follow:
|
||||
logs-sniffer-follow:
|
||||
export LOGS_POD_PREFIX=kubeshark-worker-
|
||||
export LOGS_CONTAINER='-c sniffer'
|
||||
export LOGS_FOLLOW=--follow
|
||||
${MAKE} logs
|
||||
|
||||
logs-tracer:
|
||||
export LOGS_POD_PREFIX=kubeshark-worker-
|
||||
export LOGS_CONTAINER='-c tracer'
|
||||
export LOGS_FOLLOW=
|
||||
${MAKE} logs
|
||||
|
||||
logs-tracer-follow:
|
||||
export LOGS_POD_PREFIX=kubeshark-worker-
|
||||
export LOGS_CONTAINER='-c tracer'
|
||||
export LOGS_FOLLOW=--follow
|
||||
${MAKE} logs
|
||||
|
||||
logs-worker: logs-sniffer
|
||||
|
||||
logs-worker-follow: logs-sniffer-follow
|
||||
|
||||
logs-hub:
|
||||
export LOGS_POD_PREFIX=kubeshark-hub
|
||||
export LOGS_FOLLOW=
|
||||
@@ -106,7 +204,7 @@ logs-front-follow:
|
||||
${MAKE} logs
|
||||
|
||||
logs:
|
||||
kubectl logs $$(kubectl get pods | awk '$$1 ~ /^$(LOGS_POD_PREFIX)/' | awk 'END {print $$1}') $(LOGS_FOLLOW)
|
||||
kubectl logs $$(kubectl get pods | awk '$$1 ~ /^$(LOGS_POD_PREFIX)/' | awk 'END {print $$1}') $(LOGS_CONTAINER) $(LOGS_FOLLOW)
|
||||
|
||||
ssh-node:
|
||||
kubectl ssh node $$(kubectl get nodes | awk 'END {print $$1}')
|
||||
@@ -127,22 +225,13 @@ exec:
|
||||
kubectl exec --stdin --tty $$(kubectl get pods | awk '$$1 ~ /^$(EXEC_POD_PREFIX)/' | awk 'END {print $$1}') -- /bin/sh
|
||||
|
||||
helm-install:
|
||||
cd helm-chart && helm install kubeshark . && cd ..
|
||||
|
||||
helm-install-canary:
|
||||
cd helm-chart && helm install kubeshark . --set tap.docker.tag=canary && cd ..
|
||||
|
||||
helm-install-dev:
|
||||
cd helm-chart && helm install kubeshark . --set tap.docker.tag=dev && cd ..
|
||||
cd helm-chart && helm install kubeshark . --set tap.docker.tag=$(TAG) && cd ..
|
||||
|
||||
helm-install-debug:
|
||||
cd helm-chart && helm install kubeshark . --set tap.debug=true && cd ..
|
||||
cd helm-chart && helm install kubeshark . --set tap.docker.tag=$(TAG) --set tap.debug=true && cd ..
|
||||
|
||||
helm-install-debug-canary:
|
||||
cd helm-chart && helm install kubeshark . --set tap.debug=true --set tap.docker.tag=canary && cd ..
|
||||
|
||||
helm-install-debug-dev:
|
||||
cd helm-chart && helm install kubeshark . --set tap.debug=true --set tap.docker.tag=dev && cd ..
|
||||
helm-install-profile:
|
||||
cd helm-chart && helm install kubeshark . --set tap.docker.tag=$(TAG) --set tap.pprof.enabled=true && cd ..
|
||||
|
||||
helm-uninstall:
|
||||
helm uninstall kubeshark
|
||||
@@ -150,5 +239,45 @@ helm-uninstall:
|
||||
proxy:
|
||||
kubeshark proxy
|
||||
|
||||
port-forward-worker:
|
||||
kubectl port-forward $$(kubectl get pods | awk '$$1 ~ /^$(LOGS_POD_PREFIX)/' | awk 'END {print $$1}') $(LOGS_FOLLOW) 8897:8897
|
||||
port-forward:
|
||||
kubectl port-forward $$(kubectl get pods | awk '$$1 ~ /^$(POD_PREFIX)/' | awk 'END {print $$1}') $(SRC_PORT):$(DST_PORT)
|
||||
|
||||
release:
|
||||
@cd ../worker && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../tracer && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../hub && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../kubeshark && git checkout master && git pull && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make
|
||||
@if [ "$(shell uname)" = "Darwin" ]; then \
|
||||
codesign --sign - --force --preserve-metadata=entitlements,requirements,flags,runtime ./bin/kubeshark__; \
|
||||
fi
|
||||
@make generate-helm-values && make generate-manifests
|
||||
@git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push
|
||||
@git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@rm -rf ../kubeshark.github.io/charts/chart && mkdir ../kubeshark.github.io/charts/chart && cp -r helm-chart/ ../kubeshark.github.io/charts/chart/
|
||||
@cd ../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
|
||||
@cd ../kubeshark
|
||||
|
||||
release-dry-run:
|
||||
@cd ../worker && git checkout master && git pull
|
||||
@cd ../tracer && git checkout master && git pull
|
||||
@cd ../hub && git checkout master && git pull
|
||||
@cd ../front && git checkout master && git pull
|
||||
@cd ../kubeshark && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make
|
||||
@if [ "$(shell uname)" = "Darwin" ]; then \
|
||||
codesign --sign - --force --preserve-metadata=entitlements,requirements,flags,runtime ./bin/kubeshark__; \
|
||||
fi
|
||||
@make generate-helm-values && make generate-manifests
|
||||
@rm -rf ../kubeshark.github.io/charts/chart && mkdir ../kubeshark.github.io/charts/chart && cp -r helm-chart/ ../kubeshark.github.io/charts/chart/
|
||||
@cd ../kubeshark.github.io/
|
||||
@cd ../kubeshark
|
||||
|
||||
branch:
|
||||
@cd ../worker && git checkout master && git pull && git checkout -b $(name); git push --set-upstream origin $(name)
|
||||
@cd ../hub && git checkout master && git pull && git checkout -b $(name); git push --set-upstream origin $(name)
|
||||
@cd ../front && git checkout master && git pull && git checkout -b $(name); git push --set-upstream origin $(name)
|
||||
|
||||
switch-to-branch:
|
||||
@cd ../worker && git checkout $(name)
|
||||
@cd ../hub && git checkout $(name)
|
||||
@cd ../front && git checkout $(name)
|
||||
|
||||
153
README.md
153
README.md
@@ -1,79 +1,132 @@
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/kubeshark/assets/master/svg/kubeshark-logo.svg" alt="Kubeshark: Traffic analyzer for Kubernetes." height="128px"/>
|
||||
<img src="https://raw.githubusercontent.com/kubeshark/assets/master/svg/kubeshark-logo.svg" alt="Kubeshark" height="120px"/>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/kubeshark/kubeshark/releases/latest">
|
||||
<img alt="GitHub Latest Release" src="https://img.shields.io/github/v/release/kubeshark/kubeshark?logo=GitHub&style=flat-square">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/kubeshark/worker">
|
||||
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/kubeshark/kubeshark?color=%23099cec&logo=Docker&style=flat-square">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/kubeshark/worker">
|
||||
<img alt="Image size" src="https://img.shields.io/docker/image-size/kubeshark/kubeshark/latest?logo=Docker&style=flat-square">
|
||||
</a>
|
||||
<a href="https://discord.gg/WkvRGMUcx7">
|
||||
<img alt="Discord" src="https://img.shields.io/discord/1042559155224973352?logo=Discord&style=flat-square&label=discord">
|
||||
</a>
|
||||
<a href="https://join.slack.com/t/kubeshark/shared_invite/zt-1m90td3n7-VHxN_~V5kVp80SfQW3SfpA">
|
||||
<img alt="Slack" src="https://img.shields.io/badge/slack-join_chat-green?logo=Slack&style=flat-square&label=slack">
|
||||
</a>
|
||||
<a href="https://github.com/kubeshark/kubeshark/releases/latest"><img alt="Release" src="https://img.shields.io/github/v/release/kubeshark/kubeshark?logo=GitHub&style=flat-square"></a>
|
||||
<a href="https://hub.docker.com/r/kubeshark/worker"><img alt="Docker pulls" src="https://img.shields.io/docker/pulls/kubeshark/worker?color=%23099cec&logo=Docker&style=flat-square"></a>
|
||||
<a href="https://discord.gg/WkvRGMUcx7"><img alt="Discord" src="https://img.shields.io/discord/1042559155224973352?logo=Discord&style=flat-square&label=discord"></a>
|
||||
<a href="https://join.slack.com/t/kubeshark/shared_invite/zt-3jdcdgxdv-1qNkhBh9c6CFoE7bSPkpBQ"><img alt="Slack" src="https://img.shields.io/badge/slack-join_chat-green?logo=Slack&style=flat-square"></a>
|
||||
</p>
|
||||
|
||||
<p align="center"><b>Network Intelligence for Kubernetes</b></p>
|
||||
|
||||
<p align="center">
|
||||
<b>
|
||||
<span>NEW: </span>
|
||||
<a href="https://kubeshark.co/traffic-recording">Traffic Recording and Offline Investigation</a>, and
|
||||
<a href="https://kubeshark.co/self-hosting">Self-hosting with Ingress and Authentication</a>.
|
||||
</b>
|
||||
<a href="https://demo.kubeshark.com/">Live Demo</a> · <a href="https://docs.kubeshark.com">Docs</a>
|
||||
</p>
|
||||
|
||||
**Kubeshark** is an API Traffic Analyzer for [**Kubernetes**](https://kubernetes.io/) providing real-time, protocol-level visibility into Kubernetes’ internal network, capturing and monitoring all traffic and payloads going in, out and across containers, pods, nodes and clusters.
|
||||
---
|
||||
|
||||

|
||||
* **Cluster-wide, real-time visibility into every packet, API call, and service interaction.**
|
||||
* Replay any moment in time.
|
||||
* Resolve incidents at the speed of LLMs. 100% on-premises.
|
||||
|
||||
Think [TCPDump](https://en.wikipedia.org/wiki/Tcpdump) and [Wireshark](https://www.wireshark.org/) re-invented for Kubernetes
|
||||

|
||||
|
||||
## Getting Started
|
||||
---
|
||||
|
||||
Download **Kubeshark**'s binary distribution [latest release](https://github.com/kubeshark/kubeshark/releases/latest) and run following one of these examples:
|
||||
## Get Started
|
||||
|
||||
```shell
|
||||
kubeshark tap
|
||||
```bash
|
||||
helm repo add kubeshark https://helm.kubeshark.com
|
||||
helm install kubeshark kubeshark/kubeshark
|
||||
```
|
||||
|
||||
```shell
|
||||
kubeshark tap -n sock-shop "(catalo*|front-end*)"
|
||||
```
|
||||
Dashboard opens automatically. You're capturing traffic.
|
||||
|
||||
Running any of the :point_up: above commands will open the [Web UI](https://docs.kubeshark.co/en/ui) in your browser which streams the traffic in your Kubernetes cluster in real-time.
|
||||
**With AI** — connect your assistant and debug with natural language:
|
||||
|
||||
### Homebrew
|
||||
|
||||
[Homebrew](https://brew.sh/) :beer: users can add Kubeshark formulae with:
|
||||
|
||||
```shell
|
||||
brew tap kubeshark/kubeshark
|
||||
```
|
||||
|
||||
and install Kubeshark CLI with:
|
||||
|
||||
```shell
|
||||
```bash
|
||||
brew install kubeshark
|
||||
claude mcp add kubeshark -- kubeshark mcp
|
||||
```
|
||||
|
||||
## Building From Source
|
||||
> *"Why did checkout fail at 2:15 PM?"*
|
||||
> *"Which services have error rates above 1%?"*
|
||||
|
||||
Clone this repository and run `make` command to build it. After the build is complete, the executable can be found at `./bin/kubeshark__`.
|
||||
[MCP setup guide →](https://docs.kubeshark.com/en/mcp)
|
||||
|
||||
## Documentation
|
||||
---
|
||||
|
||||
To learn more, read the [documentation](https://docs.kubeshark.co).
|
||||
## Why Kubeshark
|
||||
|
||||
- **Instant root cause** — trace requests across services, see exact errors
|
||||
- **Zero instrumentation** — no code changes, no SDKs, just deploy
|
||||
- **Full payload capture** — request/response bodies, headers, timing
|
||||
- **TLS decryption** — see encrypted traffic without managing keys
|
||||
- **AI-ready** — query traffic with natural language via MCP
|
||||
|
||||
---
|
||||
|
||||
### Traffic Analysis and API Dissection
|
||||
|
||||
Capture and inspect every API call across your cluster—HTTP, gRPC, Redis, Kafka, DNS, and more. Request/response matching with full payloads, parsed according to protocol specifications. Headers, timing, and complete context. Zero instrumentation required.
|
||||
|
||||

|
||||
|
||||
[Learn more →](https://docs.kubeshark.com/en/v2/l7_api_dissection)
|
||||
|
||||
### L4/L7 Workload Map
|
||||
|
||||
Visualize how your services communicate. See dependencies, traffic flow, and identify anomalies at a glance.
|
||||
|
||||

|
||||
|
||||
[Learn more →](https://docs.kubeshark.com/en/v2/service_map)
|
||||
|
||||
### AI-Powered Root Cause Analysis
|
||||
|
||||
Resolve production issues in minutes instead of hours. Connect your AI assistant and investigate incidents using natural language. Build network-aware AI agents for forensics, monitoring, compliance, and security.
|
||||
|
||||
> *"Why did checkout fail at 2:15 PM?"*
|
||||
> *"Which services have error rates above 1%?"*
|
||||
> *"Trace request abc123 through all services"*
|
||||
|
||||
Works with Claude Code, Cursor, and any MCP-compatible AI.
|
||||
|
||||
[MCP setup guide →](https://docs.kubeshark.com/en/mcp)
|
||||
|
||||
### Traffic Retention
|
||||
|
||||
Retain every packet. Take snapshots. Export PCAP files. Replay any moment in time.
|
||||
|
||||

|
||||
|
||||
[Snapshots guide →](https://docs.kubeshark.com/en/v2/traffic_snapshots)
|
||||
|
||||
---
|
||||
|
||||
## Features
|
||||
|
||||
| Feature | Description |
|
||||
|---------|-------------|
|
||||
| [**Raw Capture**](https://docs.kubeshark.com/en/v2/raw_capture) | Continuous cluster-wide packet capture with minimal overhead |
|
||||
| [**Traffic Snapshots**](https://docs.kubeshark.com/en/v2/traffic_snapshots) | Point-in-time snapshots, export as PCAP for Wireshark |
|
||||
| [**L7 API Dissection**](https://docs.kubeshark.com/en/v2/l7_api_dissection) | Request/response matching with full payloads and protocol parsing |
|
||||
| [**Protocol Support**](https://docs.kubeshark.com/en/protocols) | HTTP, gRPC, GraphQL, Redis, Kafka, DNS, and more |
|
||||
| [**TLS Decryption**](https://docs.kubeshark.com/en/encrypted_traffic) | eBPF-based decryption without key management |
|
||||
| [**AI-Powered Analysis**](https://docs.kubeshark.com/en/v2/ai_powered_analysis) | Query traffic with Claude, Cursor, or any MCP-compatible AI |
|
||||
| [**Display Filters**](https://docs.kubeshark.com/en/v2/kfl2) | Wireshark-inspired display filters for precise traffic analysis |
|
||||
| [**100% On-Premises**](https://docs.kubeshark.com/en/air_gapped) | Air-gapped support, no external dependencies |
|
||||
|
||||
---
|
||||
|
||||
## Install
|
||||
|
||||
| Method | Command |
|
||||
|--------|---------|
|
||||
| Helm | `helm repo add kubeshark https://helm.kubeshark.com && helm install kubeshark kubeshark/kubeshark` |
|
||||
| Homebrew | `brew install kubeshark && kubeshark tap` |
|
||||
| Binary | [Download](https://github.com/kubeshark/kubeshark/releases/latest) |
|
||||
|
||||
[Installation guide →](https://docs.kubeshark.com/en/install)
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
We :heart: pull requests! See [CONTRIBUTING.md](CONTRIBUTING.md) for the contribution guide.
|
||||
We welcome contributions. See [CONTRIBUTING.md](CONTRIBUTING.md).
|
||||
|
||||
## Code of Conduct
|
||||
## License
|
||||
|
||||
This project is for everyone. We ask that our users and contributors take a few minutes to review our [Code of Conduct](CODE_OF_CONDUCT.md).
|
||||
[Apache-2.0](LICENSE)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Kubeshark release _VER_
|
||||
Kubeshark CHANGELOG is now part of [Kubeshark wiki](https://github.com/kubeshark/kubeshark/wiki/CHANGELOG)
|
||||
Release notes coming soon ..
|
||||
|
||||
## Download Kubeshark for your platform
|
||||
|
||||
@@ -10,7 +10,7 @@ curl -Lo kubeshark https://github.com/kubeshark/kubeshark/releases/download/_VER
|
||||
|
||||
**Mac** (AArch64/Apple M1 silicon)
|
||||
```
|
||||
rm -f kubeshark && curl -Lo kubeshark https://github.com/kubeshark/kubeshark/releases/download/_VER_/kubeshark_darwin_arm64 && chmod 755 kubeshark
|
||||
curl -Lo kubeshark https://github.com/kubeshark/kubeshark/releases/download/_VER_/kubeshark_darwin_arm64 && chmod 755 kubeshark
|
||||
```
|
||||
|
||||
**Linux** (x86-64)
|
||||
|
||||
@@ -22,7 +22,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(errormessage.FormatError(err)).
|
||||
Msg(fmt.Sprintf("Error occured while running K8s proxy. Try setting different port using --%s", proxyPortLabel))
|
||||
Msg(fmt.Sprintf("Error occurred while running K8s proxy. Try setting different port using --%s", proxyPortLabel))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
|
||||
log.Error().
|
||||
Str("pod-regex", podRegex.String()).
|
||||
Err(errormessage.FormatError(err)).
|
||||
Msg(fmt.Sprintf("Error occured while running port forward. Try setting different port using --%s", proxyPortLabel))
|
||||
Msg(fmt.Sprintf("Error occurred while running port forward. Try setting different port using --%s", proxyPortLabel))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provid
|
||||
}
|
||||
dotDir := misc.GetDotFolderPath()
|
||||
filePath := path.Join(dotDir, fmt.Sprintf("%s_logs_%s.zip", misc.Program, time.Now().Format("2006_01_02__15_04_05")))
|
||||
if err := fsUtils.DumpLogs(ctx, kubernetesProvider, filePath); err != nil {
|
||||
if err := fsUtils.DumpLogs(ctx, kubernetesProvider, filePath, config.Config.Logs.Grep); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to dump logs.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
@@ -18,7 +17,12 @@ var configCmd = &cobra.Command{
|
||||
Short: fmt.Sprintf("Generate %s config with default values", misc.Software),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if config.Config.Config.Regenerate {
|
||||
if err := config.WriteConfig(&config.Config); err != nil {
|
||||
defaultConfig := config.CreateDefaultConfig()
|
||||
if err := defaults.Set(&defaultConfig); err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return nil
|
||||
}
|
||||
if err := config.WriteConfig(&defaultConfig); err != nil {
|
||||
log.Error().Err(err).Msg("Failed generating config with defaults.")
|
||||
return nil
|
||||
}
|
||||
@@ -47,5 +51,5 @@ func init() {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
configCmd.Flags().BoolP(configStructs.RegenerateConfigName, "r", defaultConfig.Config.Regenerate, fmt.Sprintf("Regenerate the config file with default values to path %s", path.Join(misc.GetDotFolderPath(), "config.yaml")))
|
||||
configCmd.Flags().BoolP(configStructs.RegenerateConfigName, "r", defaultConfig.Config.Regenerate, fmt.Sprintf("Regenerate the config file with default values to path %s", config.GetConfigFilePath(nil)))
|
||||
}
|
||||
|
||||
155
cmd/console.go
155
cmd/console.go
@@ -36,73 +36,80 @@ func init() {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
|
||||
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
|
||||
consoleCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark")
|
||||
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark")
|
||||
consoleCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
}
|
||||
|
||||
func runConsole() {
|
||||
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err != nil || response.StatusCode != 200 {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
|
||||
runProxy(false, true)
|
||||
}
|
||||
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.Port).Msg("Connecting to:")
|
||||
u := url.URL{
|
||||
Scheme: "ws",
|
||||
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.Port),
|
||||
Path: "/scripts/logs",
|
||||
}
|
||||
headers := http.Header{}
|
||||
headers.Set("License-Key", config.Config.License)
|
||||
|
||||
c, _, err := websocket.DefaultDialer.Dial(u.String(), headers)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
for {
|
||||
_, message, err := c.ReadMessage()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
msg := string(message)
|
||||
if strings.Contains(msg, ":ERROR]") {
|
||||
msg = fmt.Sprintf(utils.Red, msg)
|
||||
fmt.Fprintln(os.Stderr, msg)
|
||||
} else {
|
||||
fmt.Fprintln(os.Stdout, msg)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
func runConsoleWithoutProxy() {
|
||||
log.Info().Msg("Starting scripting console ...")
|
||||
time.Sleep(5 * time.Second)
|
||||
hubUrl := kubernetes.GetHubUrl()
|
||||
for {
|
||||
|
||||
// Attempt to connect to the Hub every second
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err != nil || response.StatusCode != 200 {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub."))
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
log.Info().Str("host", config.Config.Tap.Proxy.Host).Str("url", hubUrl).Msg("Connecting to:")
|
||||
u := url.URL{
|
||||
Scheme: "ws",
|
||||
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Front.Port),
|
||||
Path: "/api/scripts/logs",
|
||||
}
|
||||
headers := http.Header{}
|
||||
headers.Set(utils.X_KUBESHARK_CAPTURE_HEADER_KEY, utils.X_KUBESHARK_CAPTURE_HEADER_IGNORE_VALUE)
|
||||
headers.Set("License-Key", config.Config.License)
|
||||
|
||||
c, _, err := websocket.DefaultDialer.Dial(u.String(), headers)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Websocket dial error, retrying in 5 seconds...")
|
||||
time.Sleep(5 * time.Second) // Delay before retrying
|
||||
continue
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
for {
|
||||
_, message, err := c.ReadMessage()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error reading websocket message, reconnecting...")
|
||||
break // Break to reconnect
|
||||
}
|
||||
|
||||
msg := string(message)
|
||||
if strings.Contains(msg, ":ERROR]") {
|
||||
msg = fmt.Sprintf(utils.Red, msg)
|
||||
fmt.Fprintln(os.Stderr, msg)
|
||||
} else {
|
||||
fmt.Fprintln(os.Stdout, msg)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Connection closed, reconnecting..."))
|
||||
time.Sleep(5 * time.Second) // Delay before reconnecting
|
||||
continue // Reconnect after error
|
||||
case <-interrupt:
|
||||
log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Received interrupt, exiting..."))
|
||||
|
||||
err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
@@ -113,3 +120,37 @@ func runConsole() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runConsole() {
|
||||
go runConsoleWithoutProxy()
|
||||
|
||||
// Create interrupt channel and setup signal handling once
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
done := make(chan struct{})
|
||||
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-interrupt:
|
||||
// Handle interrupt and exit gracefully
|
||||
log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Received interrupt, exiting..."))
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
// Attempt to connect to the Hub every second
|
||||
hubUrl := kubernetes.GetHubUrl()
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err != nil || response.StatusCode != 200 {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
|
||||
runProxy(false, true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/internal/connect"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var exportCmd = &cobra.Command{
|
||||
Use: "export",
|
||||
Short: "Exports the captured traffic into a TAR file that contains PCAP files",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runExport()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(exportCmd)
|
||||
|
||||
defaultTapConfig := configStructs.TapConfig{}
|
||||
if err := defaults.Set(&defaultTapConfig); err != nil {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
exportCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
|
||||
exportCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
|
||||
exportCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
}
|
||||
|
||||
func runExport() {
|
||||
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err != nil || response.StatusCode != 200 {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
|
||||
runProxy(false, true)
|
||||
}
|
||||
|
||||
dstPath, err := filepath.Abs(fmt.Sprintf("./%d.tar.gz", time.Now().Unix()))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
out, err := os.Create(dstPath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
connector := connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
connector.PostPcapsMerge(out)
|
||||
}
|
||||
@@ -30,7 +30,7 @@ var logsCmd = &cobra.Command{
|
||||
|
||||
log.Debug().Str("logs-path", config.Config.Logs.FilePath()).Msg("Using this logs path...")
|
||||
|
||||
if dumpLogsErr := fsUtils.DumpLogs(ctx, kubernetesProvider, config.Config.Logs.FilePath()); dumpLogsErr != nil {
|
||||
if dumpLogsErr := fsUtils.DumpLogs(ctx, kubernetesProvider, config.Config.Logs.FilePath(), config.Config.Logs.Grep); dumpLogsErr != nil {
|
||||
log.Error().Err(dumpLogsErr).Msg("Failed to dump logs.")
|
||||
}
|
||||
|
||||
@@ -47,4 +47,5 @@ func init() {
|
||||
}
|
||||
|
||||
logsCmd.Flags().StringP(configStructs.FileLogsName, "f", defaultLogsConfig.FileStr, fmt.Sprintf("Path for zip file (default current <pwd>\\%s_logs.zip)", misc.Program))
|
||||
logsCmd.Flags().StringP(configStructs.GrepLogsName, "g", defaultLogsConfig.Grep, "Regexp to do grepping on the logs")
|
||||
}
|
||||
|
||||
125
cmd/mcp.go
Normal file
125
cmd/mcp.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var mcpURL string
|
||||
var mcpKubeconfig string
|
||||
var mcpListTools bool
|
||||
var mcpConfig bool
|
||||
var mcpAllowDestructive bool
|
||||
|
||||
var mcpCmd = &cobra.Command{
|
||||
Use: "mcp",
|
||||
Short: "Run MCP (Model Context Protocol) server for AI assistant integration",
|
||||
Long: `Run an MCP server over stdio that exposes Kubeshark's L7 API visibility
|
||||
to AI assistants like Claude Desktop.
|
||||
|
||||
TOOLS PROVIDED:
|
||||
|
||||
Cluster Management (work without Kubeshark running):
|
||||
- check_kubeshark_status: Check if Kubeshark is running in the cluster
|
||||
- start_kubeshark: Start Kubeshark to capture traffic
|
||||
- stop_kubeshark: Stop Kubeshark and clean up resources
|
||||
|
||||
Traffic Analysis (require Kubeshark running):
|
||||
- list_workloads: Discover pods, services, namespaces, and nodes with L7 traffic
|
||||
- list_api_calls: Query L7 API transactions (HTTP, gRPC, etc.)
|
||||
- get_api_call: Get detailed information about a specific API call
|
||||
- get_api_stats: Get aggregated API statistics
|
||||
|
||||
CONFIGURATION:
|
||||
|
||||
To use with Claude Desktop, add to your claude_desktop_config.json
|
||||
(typically at ~/Library/Application Support/Claude/claude_desktop_config.json):
|
||||
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "/path/to/kubeshark",
|
||||
"args": ["mcp", "--kubeconfig", "/Users/YOUR_USERNAME/.kube/config"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DIRECT URL MODE:
|
||||
|
||||
If Kubeshark is already running and accessible via URL (e.g., exposed via ingress),
|
||||
you can connect directly without needing kubectl/kubeconfig:
|
||||
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "/path/to/kubeshark",
|
||||
"args": ["mcp", "--url", "https://kubeshark.example.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
In URL mode, destructive tools (start/stop) are disabled since Kubeshark is
|
||||
managed externally. The check_kubeshark_status tool remains available to confirm connectivity.
|
||||
|
||||
DESTRUCTIVE OPERATIONS:
|
||||
|
||||
By default, destructive operations (start_kubeshark, stop_kubeshark) are disabled
|
||||
to prevent accidental cluster modifications. To enable them, use --allow-destructive:
|
||||
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "/path/to/kubeshark",
|
||||
"args": ["mcp", "--allow-destructive", "--kubeconfig", "/path/to/.kube/config"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CUSTOM SETTINGS:
|
||||
|
||||
To use custom settings when starting Kubeshark, use the --set flag:
|
||||
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "/path/to/kubeshark",
|
||||
"args": ["mcp", "--set", "tap.docker.tag=v52.3"],
|
||||
...
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Multiple --set flags can be used for different settings.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Handle --mcp-config flag
|
||||
if mcpConfig {
|
||||
printMCPConfig(mcpURL, mcpKubeconfig)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set kubeconfig path if provided
|
||||
if mcpKubeconfig != "" {
|
||||
config.Config.Kube.ConfigPathStr = mcpKubeconfig
|
||||
}
|
||||
|
||||
// Handle --list-tools flag
|
||||
if mcpListTools {
|
||||
listMCPTools(mcpURL)
|
||||
return nil
|
||||
}
|
||||
|
||||
setFlags, _ := cmd.Flags().GetStringSlice(config.SetCommandName)
|
||||
runMCPWithConfig(setFlags, mcpURL, mcpAllowDestructive)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(mcpCmd)
|
||||
|
||||
mcpCmd.Flags().StringVar(&mcpURL, "url", "", "Direct URL to Kubeshark (e.g., https://kubeshark.example.com). When set, connects directly without kubectl/proxy and disables start/stop tools.")
|
||||
mcpCmd.Flags().StringVar(&mcpKubeconfig, "kubeconfig", "", "Path to kubeconfig file (e.g., /Users/me/.kube/config)")
|
||||
mcpCmd.Flags().BoolVar(&mcpListTools, "list-tools", false, "List available MCP tools and exit")
|
||||
mcpCmd.Flags().BoolVar(&mcpConfig, "mcp-config", false, "Print MCP client configuration JSON and exit")
|
||||
mcpCmd.Flags().BoolVar(&mcpAllowDestructive, "allow-destructive", false, "Enable destructive operations (start_kubeshark, stop_kubeshark). Without this flag, only read-only traffic analysis tools are available.")
|
||||
}
|
||||
1071
cmd/mcpRunner.go
Normal file
1071
cmd/mcpRunner.go
Normal file
File diff suppressed because it is too large
Load Diff
495
cmd/mcp_test.go
Normal file
495
cmd/mcp_test.go
Normal file
@@ -0,0 +1,495 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func newTestMCPServer() *mcpServer {
|
||||
return &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}}
|
||||
}
|
||||
|
||||
func sendRequest(s *mcpServer, method string, id any, params any) string {
|
||||
req := jsonRPCRequest{
|
||||
JSONRPC: "2.0",
|
||||
ID: id,
|
||||
Method: method,
|
||||
}
|
||||
if params != nil {
|
||||
paramsBytes, _ := json.Marshal(params)
|
||||
req.Params = paramsBytes
|
||||
}
|
||||
|
||||
s.handleRequest(&req)
|
||||
|
||||
output := s.stdout.(*bytes.Buffer).String()
|
||||
s.stdout.(*bytes.Buffer).Reset()
|
||||
return output
|
||||
}
|
||||
|
||||
func parseResponse(t *testing.T, output string) jsonRPCResponse {
|
||||
var resp jsonRPCResponse
|
||||
if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &resp); err != nil {
|
||||
t.Fatalf("Failed to parse response: %v\nOutput: %s", err, output)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
func TestMCP_Initialize(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
resp := parseResponse(t, sendRequest(s, "initialize", 1, nil))
|
||||
|
||||
if resp.ID != float64(1) || resp.Error != nil {
|
||||
t.Fatalf("Expected ID 1 with no error, got ID=%v, error=%v", resp.ID, resp.Error)
|
||||
}
|
||||
|
||||
result := resp.Result.(map[string]any)
|
||||
if result["protocolVersion"] != "2024-11-05" {
|
||||
t.Errorf("Expected protocolVersion 2024-11-05, got %v", result["protocolVersion"])
|
||||
}
|
||||
if result["serverInfo"].(map[string]any)["name"] != "kubeshark-mcp" {
|
||||
t.Error("Expected server name kubeshark-mcp")
|
||||
}
|
||||
if !strings.Contains(result["instructions"].(string), "check_kubeshark_status") {
|
||||
t.Error("Instructions should mention check_kubeshark_status")
|
||||
}
|
||||
if _, ok := result["capabilities"].(map[string]any)["prompts"]; !ok {
|
||||
t.Error("Expected prompts capability")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_Ping(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "ping", 42, nil))
|
||||
if resp.ID != float64(42) || resp.Error != nil || len(resp.Result.(map[string]any)) != 0 {
|
||||
t.Errorf("Expected ID 42, no error, empty result")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_InitializedNotification(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
for _, method := range []string{"initialized", "notifications/initialized"} {
|
||||
if output := sendRequest(s, method, nil, nil); output != "" {
|
||||
t.Errorf("Expected no output for %s, got: %s", method, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_UnknownMethod(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "unknown/method", 1, nil))
|
||||
if resp.Error == nil || resp.Error.Code != -32601 {
|
||||
t.Fatalf("Expected error code -32601, got %v", resp.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_PromptsList(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "prompts/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
prompts := resp.Result.(map[string]any)["prompts"].([]any)
|
||||
if len(prompts) != 1 || prompts[0].(map[string]any)["name"] != "kubeshark_usage" {
|
||||
t.Error("Expected 1 prompt named 'kubeshark_usage'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_PromptsGet(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "prompts/get", 1, map[string]any{"name": "kubeshark_usage"}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
messages := resp.Result.(map[string]any)["messages"].([]any)
|
||||
if len(messages) == 0 {
|
||||
t.Fatal("Expected at least one message")
|
||||
}
|
||||
text := messages[0].(map[string]any)["content"].(map[string]any)["text"].(string)
|
||||
for _, phrase := range []string{"check_kubeshark_status", "start_kubeshark", "stop_kubeshark"} {
|
||||
if !strings.Contains(text, phrase) {
|
||||
t.Errorf("Prompt should contain '%s'", phrase)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_PromptsGet_UnknownPrompt(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "prompts/get", 1, map[string]any{"name": "unknown"}))
|
||||
if resp.Error == nil || resp.Error.Code != -32602 {
|
||||
t.Fatalf("Expected error code -32602, got %v", resp.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsList_CLIOnly(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "tools/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
tools := resp.Result.(map[string]any)["tools"].([]any)
|
||||
if len(tools) != 1 || tools[0].(map[string]any)["name"] != "check_kubeshark_status" {
|
||||
t.Error("Expected only check_kubeshark_status tool")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsList_WithDestructive(t *testing.T) {
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}, allowDestructive: true}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
tools := resp.Result.(map[string]any)["tools"].([]any)
|
||||
toolNames := make(map[string]bool)
|
||||
for _, tool := range tools {
|
||||
toolNames[tool.(map[string]any)["name"].(string)] = true
|
||||
}
|
||||
for _, expected := range []string{"check_kubeshark_status", "start_kubeshark", "stop_kubeshark"} {
|
||||
if !toolNames[expected] {
|
||||
t.Errorf("Missing expected tool: %s", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsList_WithHubBackend(t *testing.T) {
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/" || r.URL.Path == "" {
|
||||
_, _ = w.Write([]byte(`{"name":"hub","tools":[{"name":"list_workloads","description":"","inputSchema":{}},{"name":"list_api_calls","description":"","inputSchema":{}}]}`))
|
||||
}
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}, hubBaseURL: mockServer.URL, backendInitialized: true, allowDestructive: true}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
tools := resp.Result.(map[string]any)["tools"].([]any)
|
||||
// Should have CLI tools (3) + Hub tools (2) = 5 tools
|
||||
if len(tools) < 5 {
|
||||
t.Errorf("Expected at least 5 tools, got %d", len(tools))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsCallUnknownTool(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "unknown"}))
|
||||
if !resp.Result.(map[string]any)["isError"].(bool) {
|
||||
t.Error("Expected isError=true for unknown tool")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsCallInvalidParams(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
req := jsonRPCRequest{JSONRPC: "2.0", ID: 1, Method: "tools/call", Params: json.RawMessage(`"invalid"`)}
|
||||
s.handleRequest(&req)
|
||||
resp := parseResponse(t, s.stdout.(*bytes.Buffer).String())
|
||||
if resp.Error == nil || resp.Error.Code != -32602 {
|
||||
t.Fatalf("Expected error code -32602")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_CheckKubesharkStatus(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
args map[string]any
|
||||
}{
|
||||
{"no_config", map[string]any{}},
|
||||
{"with_namespace", map[string]any{"release_namespace": "custom-ns"}},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "tools/call", 1, mcpCallToolParams{Name: "check_kubeshark_status", Arguments: tc.args}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
content := resp.Result.(map[string]any)["content"].([]any)
|
||||
if len(content) == 0 || content[0].(map[string]any)["text"].(string) == "" {
|
||||
t.Error("Expected non-empty response")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newTestMCPServerWithMockBackend(handler http.HandlerFunc) (*mcpServer, *httptest.Server) {
|
||||
mockServer := httptest.NewServer(handler)
|
||||
return &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}, hubBaseURL: mockServer.URL, backendInitialized: true}, mockServer
|
||||
}
|
||||
|
||||
type hubToolCallRequest struct {
|
||||
Tool string `json:"tool"`
|
||||
Arguments map[string]any `json:"arguments"`
|
||||
}
|
||||
|
||||
func newMockHubHandler(t *testing.T, handler func(req hubToolCallRequest) (string, int)) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/tools/call" || r.Method != http.MethodPost {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
var req hubToolCallRequest
|
||||
_ = json.NewDecoder(r.Body).Decode(&req)
|
||||
resp, status := handler(req)
|
||||
w.WriteHeader(status)
|
||||
_, _ = w.Write([]byte(resp))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ListWorkloads(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
if req.Tool != "list_workloads" {
|
||||
t.Errorf("Expected tool 'list_workloads', got %s", req.Tool)
|
||||
}
|
||||
return `{"workloads": [{"name": "test-pod"}]}`, http.StatusOK
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads", Arguments: map[string]any{"type": "pod"}}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
text := resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
if !strings.Contains(text, "test-pod") {
|
||||
t.Errorf("Expected 'test-pod' in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ListAPICalls(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
if req.Tool != "list_api_calls" {
|
||||
t.Errorf("Expected tool 'list_api_calls', got %s", req.Tool)
|
||||
}
|
||||
return `{"calls": [{"id": "123", "path": "/api/users"}]}`, http.StatusOK
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_api_calls", Arguments: map[string]any{"proto": "http"}}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
if !strings.Contains(resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string), "/api/users") {
|
||||
t.Error("Expected '/api/users' in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetAPICall(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
if req.Tool != "get_api_call" || req.Arguments["id"] != "abc123" {
|
||||
t.Errorf("Expected get_api_call with id=abc123")
|
||||
}
|
||||
return `{"id": "abc123", "path": "/api/orders"}`, http.StatusOK
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "get_api_call", Arguments: map[string]any{"id": "abc123"}}))
|
||||
if resp.Error != nil || !strings.Contains(resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string), "abc123") {
|
||||
t.Error("Expected response containing 'abc123'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetAPICall_MissingID(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
return `{"error": "id is required"}`, http.StatusBadRequest
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "get_api_call", Arguments: map[string]any{}}))
|
||||
if !resp.Result.(map[string]any)["isError"].(bool) {
|
||||
t.Error("Expected isError=true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetAPIStats(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
if req.Tool != "get_api_stats" {
|
||||
t.Errorf("Expected get_api_stats")
|
||||
}
|
||||
return `{"stats": {"total_calls": 1000}}`, http.StatusOK
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "get_api_stats", Arguments: map[string]any{"ns": "prod"}}))
|
||||
if resp.Error != nil || !strings.Contains(resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string), "total_calls") {
|
||||
t.Error("Expected 'total_calls' in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_APITools_BackendError(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads"}))
|
||||
if !resp.Result.(map[string]any)["isError"].(bool) {
|
||||
t.Error("Expected isError=true for backend error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_APITools_BackendConnectionError(t *testing.T) {
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}, hubBaseURL: "http://localhost:99999", backendInitialized: true}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads"}))
|
||||
if !resp.Result.(map[string]any)["isError"].(bool) {
|
||||
t.Error("Expected isError=true for connection error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_RunLoop_ParseError(t *testing.T) {
|
||||
output := &bytes.Buffer{}
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: strings.NewReader("invalid\n"), stdout: output}
|
||||
s.run()
|
||||
if resp := parseResponse(t, output.String()); resp.Error == nil || resp.Error.Code != -32700 {
|
||||
t.Fatalf("Expected error code -32700")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_RunLoop_MultipleRequests(t *testing.T) {
|
||||
output := &bytes.Buffer{}
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: strings.NewReader(`{"jsonrpc":"2.0","id":1,"method":"ping"}
|
||||
{"jsonrpc":"2.0","id":2,"method":"ping"}
|
||||
`), stdout: output}
|
||||
s.run()
|
||||
if lines := strings.Split(strings.TrimSpace(output.String()), "\n"); len(lines) != 2 {
|
||||
t.Fatalf("Expected 2 responses, got %d", len(lines))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_RunLoop_EmptyLines(t *testing.T) {
|
||||
output := &bytes.Buffer{}
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: strings.NewReader("\n\n{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"ping\"}\n"), stdout: output}
|
||||
s.run()
|
||||
if lines := strings.Split(strings.TrimSpace(output.String()), "\n"); len(lines) != 1 {
|
||||
t.Fatalf("Expected 1 response, got %d", len(lines))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ResponseFormat(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
// Numeric ID
|
||||
if resp := parseResponse(t, sendRequest(s, "ping", 123, nil)); resp.ID != float64(123) || resp.JSONRPC != "2.0" {
|
||||
t.Errorf("Expected ID 123 and jsonrpc 2.0")
|
||||
}
|
||||
// String ID
|
||||
if resp := parseResponse(t, sendRequest(s, "ping", "str", nil)); resp.ID != "str" {
|
||||
t.Errorf("Expected ID 'str'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolCallResult_ContentFormat(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte(`{"data": "test"}`))
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads"}))
|
||||
content := resp.Result.(map[string]any)["content"].([]any)
|
||||
if len(content) == 0 || content[0].(map[string]any)["type"] != "text" {
|
||||
t.Error("Expected content with type=text")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_CommandArgs(t *testing.T) {
|
||||
// Test start command args building
|
||||
for _, tc := range []struct {
|
||||
args map[string]any
|
||||
expected string
|
||||
}{
|
||||
{map[string]any{}, "tap --set headless=true"},
|
||||
{map[string]any{"pod_regex": "nginx.*"}, "tap nginx.* --set headless=true"},
|
||||
{map[string]any{"namespaces": "default"}, "tap -n default --set headless=true"},
|
||||
{map[string]any{"release_namespace": "ks"}, "tap -s ks --set headless=true"},
|
||||
} {
|
||||
cmdArgs := []string{"tap"}
|
||||
if v, _ := tc.args["pod_regex"].(string); v != "" {
|
||||
cmdArgs = append(cmdArgs, v)
|
||||
}
|
||||
if v, _ := tc.args["namespaces"].(string); v != "" {
|
||||
for _, ns := range strings.Split(v, ",") {
|
||||
cmdArgs = append(cmdArgs, "-n", strings.TrimSpace(ns))
|
||||
}
|
||||
}
|
||||
if v, _ := tc.args["release_namespace"].(string); v != "" {
|
||||
cmdArgs = append(cmdArgs, "-s", v)
|
||||
}
|
||||
cmdArgs = append(cmdArgs, "--set", "headless=true")
|
||||
if got := strings.Join(cmdArgs, " "); got != tc.expected {
|
||||
t.Errorf("Expected %q, got %q", tc.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_PrettyPrintJSON(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte(`{"key":"value"}`))
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads"}))
|
||||
text := resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
if !strings.Contains(text, "\n") {
|
||||
t.Error("Expected pretty-printed JSON")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_SpecialCharsAndEdgeCases(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte(`{}`))
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
// Test special chars, empty args, nil args
|
||||
for _, args := range []map[string]any{
|
||||
{"path": "/api?id=123"},
|
||||
{"id": "abc/123"},
|
||||
{},
|
||||
nil,
|
||||
} {
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads", Arguments: args}))
|
||||
if resp.Error != nil {
|
||||
t.Errorf("Unexpected error with args %v: %v", args, resp.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_BackendInitialization_Concurrent(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
done := make(chan bool, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() { s.ensureBackendConnection(); done <- true }()
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_FullConversation(t *testing.T) {
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/" {
|
||||
_, _ = w.Write([]byte(`{"name":"hub","tools":[{"name":"list_workloads","description":"","inputSchema":{}}]}`))
|
||||
} else if r.URL.Path == "/tools/call" {
|
||||
_, _ = w.Write([]byte(`{"data":"ok"}`))
|
||||
}
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
input := `{"jsonrpc":"2.0","id":1,"method":"initialize"}
|
||||
{"jsonrpc":"2.0","method":"notifications/initialized"}
|
||||
{"jsonrpc":"2.0","id":2,"method":"tools/list"}
|
||||
{"jsonrpc":"2.0","id":3,"method":"tools/call","params":{"name":"list_workloads","arguments":{}}}
|
||||
`
|
||||
output := &bytes.Buffer{}
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: strings.NewReader(input), stdout: output, hubBaseURL: mockServer.URL, backendInitialized: true}
|
||||
s.run()
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(output.String()), "\n")
|
||||
if len(lines) != 3 { // 3 responses (notification has no response)
|
||||
t.Errorf("Expected 3 responses, got %d", len(lines))
|
||||
}
|
||||
for i, line := range lines {
|
||||
var resp jsonRPCResponse
|
||||
if err := json.Unmarshal([]byte(line), &resp); err != nil || resp.Error != nil {
|
||||
t.Errorf("Response %d: parse error or unexpected error", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
110
cmd/pcapDump.go
Normal file
110
cmd/pcapDump.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/homedir"
|
||||
)
|
||||
|
||||
// pcapDumpCmd represents the consolidated pcapdump command
|
||||
var pcapDumpCmd = &cobra.Command{
|
||||
Use: "pcapdump",
|
||||
Short: "Store all captured traffic (including decrypted TLS) in a PCAP file.",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Retrieve the kubeconfig path from the flag
|
||||
kubeconfig, _ := cmd.Flags().GetString(configStructs.PcapKubeconfig)
|
||||
|
||||
// If kubeconfig is not provided, use the default location
|
||||
if kubeconfig == "" {
|
||||
if home := homedir.HomeDir(); home != "" {
|
||||
kubeconfig = filepath.Join(home, ".kube", "config")
|
||||
} else {
|
||||
return errors.New("kubeconfig flag not provided and no home directory available for default config location")
|
||||
}
|
||||
}
|
||||
|
||||
debugEnabled, _ := cmd.Flags().GetBool("debug")
|
||||
if debugEnabled {
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
log.Debug().Msg("Debug logging enabled")
|
||||
} else {
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
}
|
||||
|
||||
// Use the current context in kubeconfig
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error building kubeconfig: %w", err)
|
||||
}
|
||||
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating Kubernetes client: %w", err)
|
||||
}
|
||||
|
||||
// Parse the `--time` flag
|
||||
timeIntervalStr, _ := cmd.Flags().GetString("time")
|
||||
var cutoffTime *time.Time // Use a pointer to distinguish between provided and not provided
|
||||
if timeIntervalStr != "" {
|
||||
duration, err := time.ParseDuration(timeIntervalStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid format %w", err)
|
||||
}
|
||||
tempCutoffTime := time.Now().Add(-duration)
|
||||
cutoffTime = &tempCutoffTime
|
||||
}
|
||||
|
||||
// Test the dest dir if provided
|
||||
destDir, _ := cmd.Flags().GetString(configStructs.PcapDest)
|
||||
if destDir != "" {
|
||||
info, err := os.Stat(destDir)
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("Directory does not exist: %s", destDir)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error checking dest directory: %w", err)
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return fmt.Errorf("Dest path is not a directory: %s", destDir)
|
||||
}
|
||||
tempFile, err := os.CreateTemp(destDir, "write-test-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Directory %s is not writable", destDir)
|
||||
}
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}
|
||||
|
||||
log.Info().Msg("Copying PCAP files")
|
||||
err = copyPcapFiles(clientset, config, destDir, cutoffTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(pcapDumpCmd)
|
||||
|
||||
defaultPcapDumpConfig := configStructs.PcapDumpConfig{}
|
||||
if err := defaults.Set(&defaultPcapDumpConfig); err != nil {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapTime, "", "Time interval (e.g., 10m, 1h) in the past for which the pcaps are copied")
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapDest, "", "Local destination path for copied PCAP files (can not be used together with --enabled)")
|
||||
pcapDumpCmd.Flags().String(configStructs.PcapKubeconfig, "", "Path for kubeconfig (if not provided the default location will be checked)")
|
||||
pcapDumpCmd.Flags().Bool("debug", false, "Enable debug logging")
|
||||
}
|
||||
371
cmd/pcapDumpRunner.go
Normal file
371
cmd/pcapDumpRunner.go
Normal file
@@ -0,0 +1,371 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kubeshark/gopacket/pcapgo"
|
||||
"github.com/rs/zerolog/log"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientk8s "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
|
||||
const (
|
||||
label = "app.kubeshark.com/app=worker"
|
||||
srcDir = "pcapdump"
|
||||
maxSnaplen uint32 = 262144
|
||||
maxTimePerFile = time.Minute * 5
|
||||
)
|
||||
|
||||
// PodFileInfo represents information about a pod, its namespace, and associated files
|
||||
type PodFileInfo struct {
|
||||
Pod corev1.Pod
|
||||
SrcDir string
|
||||
Files []string
|
||||
CopiedFiles []string
|
||||
}
|
||||
|
||||
// listWorkerPods fetches all worker pods from multiple namespaces
|
||||
func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespaces []string) ([]*PodFileInfo, error) {
|
||||
var podFileInfos []*PodFileInfo
|
||||
var errs []error
|
||||
labelSelector := label
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
// List all pods matching the label in the current namespace
|
||||
pods, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to list worker pods in namespace %s: %w", namespace, err))
|
||||
continue
|
||||
}
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
podFileInfos = append(podFileInfos, &PodFileInfo{
|
||||
Pod: pod,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return podFileInfos, errors.Join(errs...)
|
||||
}
|
||||
|
||||
// listFilesInPodDir lists all files in the specified directory inside the pod across multiple namespaces
|
||||
func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, config *rest.Config, pod *PodFileInfo, cutoffTime *time.Time) error {
|
||||
nodeName := pod.Pod.Spec.NodeName
|
||||
srcFilePath := filepath.Join("data", nodeName, srcDir)
|
||||
|
||||
cmd := []string{"ls", srcFilePath}
|
||||
req := clientset.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(pod.Pod.Name).
|
||||
Namespace(pod.Pod.Namespace).
|
||||
SubResource("exec").
|
||||
Param("container", "sniffer").
|
||||
Param("stdout", "true").
|
||||
Param("stderr", "true").
|
||||
Param("command", cmd[0]).
|
||||
Param("command", cmd[1])
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stdoutBuf bytes.Buffer
|
||||
var stderrBuf bytes.Buffer
|
||||
|
||||
// Execute the command to list files
|
||||
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdout: &stdoutBuf,
|
||||
Stderr: &stderrBuf,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Split the output (file names) into a list
|
||||
files := strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n")
|
||||
if len(files) == 0 {
|
||||
// No files were found in the target dir for this pod
|
||||
return nil
|
||||
}
|
||||
|
||||
var filteredFiles []string
|
||||
var fileProcessingErrs []error
|
||||
// Filter files based on cutoff time if provided
|
||||
for _, file := range files {
|
||||
if cutoffTime != nil {
|
||||
parts := strings.Split(file, "-")
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
timestampStr := parts[len(parts)-2] + parts[len(parts)-1][:6] // Extract YYYYMMDDHHMMSS
|
||||
fileTime, err := time.Parse("20060102150405", timestampStr)
|
||||
if err != nil {
|
||||
fileProcessingErrs = append(fileProcessingErrs, fmt.Errorf("failed parse file timestamp %s: %w", file, err))
|
||||
continue
|
||||
}
|
||||
|
||||
if fileTime.Before(*cutoffTime) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Add file to filtered list
|
||||
filteredFiles = append(filteredFiles, file)
|
||||
}
|
||||
|
||||
pod.SrcDir = srcDir
|
||||
pod.Files = filteredFiles
|
||||
|
||||
return errors.Join(fileProcessingErrs...)
|
||||
}
|
||||
|
||||
// copyFileFromPod copies a single file from a pod to a local destination
|
||||
func copyFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, pod *PodFileInfo, srcFile, destFile string) error {
|
||||
// Construct the complete path using /data, the node name, srcDir, and srcFile
|
||||
nodeName := pod.Pod.Spec.NodeName
|
||||
srcFilePath := filepath.Join("data", nodeName, srcDir, srcFile)
|
||||
|
||||
// Execute the `cat` command to read the file at the srcFilePath
|
||||
cmd := []string{"cat", srcFilePath}
|
||||
req := clientset.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(pod.Pod.Name).
|
||||
Namespace(pod.Pod.Namespace).
|
||||
SubResource("exec").
|
||||
Param("container", "sniffer").
|
||||
Param("stdout", "true").
|
||||
Param("stderr", "true").
|
||||
Param("command", cmd[0]).
|
||||
Param("command", cmd[1])
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize executor for pod %s in namespace %s: %w", pod.Pod.Name, pod.Pod.Namespace, err)
|
||||
}
|
||||
|
||||
// Create the local file to write the content to
|
||||
outFile, err := os.Create(destFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create destination file: %w", err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
// Capture stderr for error logging
|
||||
var stderrBuf bytes.Buffer
|
||||
|
||||
// Stream the file content from the pod to the local file
|
||||
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdout: outFile,
|
||||
Stderr: &stderrBuf,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergePCAPs(outputFile string, inputFiles []string) error {
|
||||
// Create the output file
|
||||
f, err := os.Create(outputFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
bufWriter := bufio.NewWriterSize(f, 4*1024*1024)
|
||||
defer bufWriter.Flush()
|
||||
|
||||
// Create the PCAP writer
|
||||
writer := pcapgo.NewWriter(bufWriter)
|
||||
err = writer.WriteFileHeader(maxSnaplen, 1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write PCAP file header: %w", err)
|
||||
}
|
||||
|
||||
var mergingErrs []error
|
||||
|
||||
for _, inputFile := range inputFiles {
|
||||
// Open the input file
|
||||
file, err := os.Open(inputFile)
|
||||
if err != nil {
|
||||
mergingErrs = append(mergingErrs, fmt.Errorf("failed to open %s: %w", inputFile, err))
|
||||
continue
|
||||
}
|
||||
|
||||
fileInfo, err := file.Stat()
|
||||
if err != nil {
|
||||
mergingErrs = append(mergingErrs, fmt.Errorf("failed to stat file %s: %w", inputFile, err))
|
||||
file.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
if fileInfo.Size() == 0 {
|
||||
// Skip empty files
|
||||
log.Debug().Msgf("Skipped empty file: %s", inputFile)
|
||||
file.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// Create the PCAP reader for the input file
|
||||
reader, err := pcapgo.NewReader(file)
|
||||
if err != nil {
|
||||
mergingErrs = append(mergingErrs, fmt.Errorf("failed to create pcapng reader for %v: %w", file.Name(), err))
|
||||
file.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
for {
|
||||
// Read packet data
|
||||
data, ci, err := reader.ReadPacketData()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
break
|
||||
}
|
||||
mergingErrs = append(mergingErrs, fmt.Errorf("error reading packet from file %s: %w", file.Name(), err))
|
||||
break
|
||||
}
|
||||
|
||||
// Write the packet to the output file
|
||||
err = writer.WritePacket(ci, data)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("Error writing packet to output file")
|
||||
mergingErrs = append(mergingErrs, fmt.Errorf("error writing packet to output file: %w", err))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
file.Close()
|
||||
}
|
||||
|
||||
log.Debug().Err(errors.Join(mergingErrs...))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyPcapFiles(clientset *kubernetes.Clientset, config *rest.Config, destDir string, cutoffTime *time.Time) error {
|
||||
// List all namespaces
|
||||
namespaceList, err := clientset.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var targetNamespaces []string
|
||||
for _, ns := range namespaceList.Items {
|
||||
targetNamespaces = append(targetNamespaces, ns.Name)
|
||||
}
|
||||
|
||||
// List all worker pods
|
||||
workerPods, err := listWorkerPods(context.Background(), clientset, targetNamespaces)
|
||||
if err != nil {
|
||||
if len(workerPods) == 0 {
|
||||
return err
|
||||
}
|
||||
log.Debug().Err(err).Msg("error while listing worker pods")
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Launch a goroutine for each pod
|
||||
for _, pod := range workerPods {
|
||||
wg.Add(1)
|
||||
|
||||
go func(pod *PodFileInfo) {
|
||||
defer wg.Done()
|
||||
|
||||
// List files for the current pod
|
||||
err := listFilesInPodDir(context.Background(), clientset, config, pod, cutoffTime)
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msgf("error listing files in pod %s", pod.Pod.Name)
|
||||
return
|
||||
}
|
||||
|
||||
// Copy files from the pod
|
||||
for _, file := range pod.Files {
|
||||
destFile := filepath.Join(destDir, file)
|
||||
|
||||
// Add a timeout context for file copy
|
||||
ctx, cancel := context.WithTimeout(context.Background(), maxTimePerFile)
|
||||
err := copyFileFromPod(ctx, clientset, config, pod, file, destFile)
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msgf("error copying file %s from pod %s in namespace %s", file, pod.Pod.Name, pod.Pod.Namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info().Msgf("Copied file %s from pod %s to %s", file, pod.Pod.Name, destFile)
|
||||
pod.CopiedFiles = append(pod.CopiedFiles, destFile)
|
||||
}
|
||||
}(pod)
|
||||
}
|
||||
|
||||
// Wait for all goroutines to complete
|
||||
wg.Wait()
|
||||
|
||||
var copiedFiles []string
|
||||
for _, pod := range workerPods {
|
||||
copiedFiles = append(copiedFiles, pod.CopiedFiles...)
|
||||
}
|
||||
|
||||
if len(copiedFiles) == 0 {
|
||||
log.Info().Msg("No pcaps available to copy on the workers")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generate a temporary filename for the merged file
|
||||
tempMergedFile := copiedFiles[0] + "_temp"
|
||||
|
||||
// Merge PCAP files
|
||||
err = mergePCAPs(tempMergedFile, copiedFiles)
|
||||
if err != nil {
|
||||
os.Remove(tempMergedFile)
|
||||
return fmt.Errorf("error merging files: %w", err)
|
||||
}
|
||||
|
||||
// Remove the original files after merging
|
||||
for _, file := range copiedFiles {
|
||||
if err = os.Remove(file); err != nil {
|
||||
log.Debug().Err(err).Msgf("error removing file %s", file)
|
||||
}
|
||||
}
|
||||
|
||||
clusterID, err := getClusterID(clientset)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get cluster ID: %w", err)
|
||||
}
|
||||
timestamp := time.Now().Format("2006-01-02_15-04")
|
||||
// Rename the temp file to the final name
|
||||
finalMergedFile := filepath.Join(destDir, fmt.Sprintf("%s-%s.pcap", clusterID, timestamp))
|
||||
err = os.Rename(tempMergedFile, finalMergedFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Msgf("Merged file created: %s", finalMergedFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getClusterID(clientset *kubernetes.Clientset) (string, error) {
|
||||
namespace, err := clientset.CoreV1().Namespaces().Get(context.TODO(), "kube-system", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get kube-system namespace UID: %w", err)
|
||||
}
|
||||
return string(namespace.UID), nil
|
||||
}
|
||||
32
cmd/pprof.go
Normal file
32
cmd/pprof.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var pprofCmd = &cobra.Command{
|
||||
Use: "pprof",
|
||||
Short: "Select a Kubeshark container and open the pprof web UI in the browser",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runPprof()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(pprofCmd)
|
||||
|
||||
defaultTapConfig := configStructs.TapConfig{}
|
||||
if err := defaults.Set(&defaultTapConfig); err != nil {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
pprofCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the proxy/port-forward")
|
||||
pprofCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
|
||||
pprofCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
pprofCmd.Flags().Uint16(configStructs.PprofPortLabel, defaultTapConfig.Pprof.Port, "Provide a custom port for the pprof server")
|
||||
pprofCmd.Flags().String(configStructs.PprofViewLabel, defaultTapConfig.Pprof.View, "Change the default view of the pprof web interface")
|
||||
}
|
||||
176
cmd/pprofRunner.go
Normal file
176
cmd/pprofRunner.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-cmd/cmd"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rivo/tview"
|
||||
"github.com/rs/zerolog/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func runPprof() {
|
||||
runProxy(false, true)
|
||||
|
||||
provider, err := getKubernetesProviderForCli(false, false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
hubPods, err := provider.ListPodsByAppLabel(ctx, config.Config.Tap.Release.Namespace, map[string]string{kubernetes.AppLabelKey: "hub"})
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Msg("Failed to list hub pods!")
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
workerPods, err := provider.ListPodsByAppLabel(ctx, config.Config.Tap.Release.Namespace, map[string]string{kubernetes.AppLabelKey: "worker"})
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Msg("Failed to list worker pods!")
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
fullscreen := true
|
||||
|
||||
app := tview.NewApplication()
|
||||
list := tview.NewList()
|
||||
|
||||
var currentCmd *cmd.Cmd
|
||||
|
||||
i := 48
|
||||
for _, pod := range hubPods {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
log.Info().Str("pod", pod.Name).Str("container", container.Name).Send()
|
||||
homeUrl := fmt.Sprintf("%s/debug/pprof/", kubernetes.GetHubUrl())
|
||||
modal := buildNewModal(
|
||||
pod,
|
||||
container,
|
||||
homeUrl,
|
||||
app,
|
||||
list,
|
||||
fullscreen,
|
||||
currentCmd,
|
||||
)
|
||||
list.AddItem(fmt.Sprintf("pod: %s container: %s", pod.Name, container.Name), pod.Spec.NodeName, rune(i), func() {
|
||||
app.SetRoot(modal, fullscreen)
|
||||
})
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range workerPods {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
log.Info().Str("pod", pod.Name).Str("container", container.Name).Send()
|
||||
homeUrl := fmt.Sprintf("%s/pprof/%s/%s/", kubernetes.GetHubUrl(), pod.Status.HostIP, container.Name)
|
||||
modal := buildNewModal(
|
||||
pod,
|
||||
container,
|
||||
homeUrl,
|
||||
app,
|
||||
list,
|
||||
fullscreen,
|
||||
currentCmd,
|
||||
)
|
||||
list.AddItem(fmt.Sprintf("pod: %s container: %s", pod.Name, container.Name), pod.Spec.NodeName, rune(i), func() {
|
||||
app.SetRoot(modal, fullscreen)
|
||||
})
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
list.AddItem("Quit", "Press to exit", 'q', func() {
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
app.Stop()
|
||||
})
|
||||
|
||||
if err := app.SetRoot(list, fullscreen).EnableMouse(true).Run(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func buildNewModal(
|
||||
pod v1.Pod,
|
||||
container v1.Container,
|
||||
homeUrl string,
|
||||
app *tview.Application,
|
||||
list *tview.List,
|
||||
fullscreen bool,
|
||||
currentCmd *cmd.Cmd,
|
||||
) *tview.Modal {
|
||||
return tview.NewModal().
|
||||
SetText(fmt.Sprintf("pod: %s container: %s", pod.Name, container.Name)).
|
||||
AddButtons([]string{
|
||||
"Open Debug Home Page",
|
||||
"Profile: CPU",
|
||||
"Profile: Memory",
|
||||
"Profile: Goroutine",
|
||||
"Cancel",
|
||||
}).
|
||||
SetDoneFunc(func(buttonIndex int, buttonLabel string) {
|
||||
var err error
|
||||
port := fmt.Sprintf(":%d", config.Config.Tap.Pprof.Port)
|
||||
view := fmt.Sprintf("http://localhost%s/ui/%s", port, config.Config.Tap.Pprof.View)
|
||||
|
||||
switch buttonLabel {
|
||||
case "Open Debug Home Page":
|
||||
utils.OpenBrowser(homeUrl)
|
||||
case "Profile: CPU":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
currentCmd = cmd.NewCmd("go", "tool", "pprof", "-http", port, "-no_browser", fmt.Sprintf("%sprofile", homeUrl))
|
||||
currentCmd.Start()
|
||||
utils.OpenBrowser(view)
|
||||
case "Profile: Memory":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
currentCmd = cmd.NewCmd("go", "tool", "pprof", "-http", port, "-no_browser", fmt.Sprintf("%sheap", homeUrl))
|
||||
currentCmd.Start()
|
||||
utils.OpenBrowser(view)
|
||||
case "Profile: Goroutine":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
currentCmd = cmd.NewCmd("go", "tool", "pprof", "-http", port, "-no_browser", fmt.Sprintf("%sgoroutine", homeUrl))
|
||||
currentCmd.Start()
|
||||
utils.OpenBrowser(view)
|
||||
case "Cancel":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
app.SetRoot(list, fullscreen)
|
||||
}
|
||||
})
|
||||
}
|
||||
125
cmd/pro.go
125
cmd/pro.go
@@ -1,125 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/internal/connect"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var proCmd = &cobra.Command{
|
||||
Use: "pro",
|
||||
Short: "Acquire a Pro license",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
acquireLicense()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
PRO_URL = "https://console.kubeshark.co/cli"
|
||||
PRO_PORT = 5252
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(proCmd)
|
||||
|
||||
defaultTapConfig := configStructs.TapConfig{}
|
||||
if err := defaults.Set(&defaultTapConfig); err != nil {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
|
||||
proCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
|
||||
}
|
||||
|
||||
func acquireLicense() {
|
||||
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err != nil || response.StatusCode != 200 {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
|
||||
runProxy(false, true)
|
||||
}
|
||||
|
||||
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
|
||||
log.Info().Str("url", PRO_URL).Msg("Opening in the browser:")
|
||||
utils.OpenBrowser(PRO_URL)
|
||||
|
||||
runLicenseRecieverServer()
|
||||
}
|
||||
|
||||
func updateLicense(licenseKey string) {
|
||||
log.Info().Str("key", licenseKey).Msg("Received license:")
|
||||
|
||||
config.Config.License = licenseKey
|
||||
err := config.WriteConfig(&config.Config)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
|
||||
connector.PostLicenseSingle(config.Config.License)
|
||||
|
||||
log.Info().Msg("Updated the license. Exiting.")
|
||||
|
||||
go func() {
|
||||
time.Sleep(2 * time.Second)
|
||||
os.Exit(0)
|
||||
}()
|
||||
}
|
||||
|
||||
func runLicenseRecieverServer() {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
ginApp := gin.New()
|
||||
ginApp.Use(func(c *gin.Context) {
|
||||
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
c.Writer.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With, x-session-token")
|
||||
c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT, DELETE")
|
||||
c.Writer.Header().Set("Access-Control-Expose-Headers", "Content-Disposition")
|
||||
|
||||
if c.Request.Method == "OPTIONS" {
|
||||
c.AbortWithStatus(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
c.Next()
|
||||
})
|
||||
|
||||
ginApp.POST("/", func(c *gin.Context) {
|
||||
data, err := io.ReadAll(c.Request.Body)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
c.AbortWithStatus(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
licenseKey := string(data)
|
||||
|
||||
updateLicense(licenseKey)
|
||||
})
|
||||
|
||||
go func() {
|
||||
if err := ginApp.Run(fmt.Sprintf(":%d", PRO_PORT)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info().Msg("Alternatively enter your license key:")
|
||||
|
||||
var licenseKey string
|
||||
fmt.Scanf("%s", &licenseKey)
|
||||
|
||||
updateLicense(licenseKey)
|
||||
}
|
||||
@@ -24,8 +24,7 @@ func init() {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
|
||||
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
|
||||
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the proxy/port-forward")
|
||||
proxyCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
|
||||
proxyCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
}
|
||||
|
||||
@@ -63,38 +63,8 @@ func runProxy(block bool, noBrowser bool) {
|
||||
|
||||
var establishedProxy bool
|
||||
|
||||
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err == nil && response.StatusCode == 200 {
|
||||
log.Info().
|
||||
Str("service", kubernetes.HubServiceName).
|
||||
Int("port", int(config.Config.Tap.Proxy.Hub.Port)).
|
||||
Msg("Found a running service.")
|
||||
|
||||
okToOpen("Hub", hubUrl, true)
|
||||
} else {
|
||||
startProxyReportErrorIfAny(
|
||||
kubernetesProvider,
|
||||
ctx,
|
||||
kubernetes.HubServiceName,
|
||||
kubernetes.HubPodName,
|
||||
configStructs.ProxyHubPortLabel,
|
||||
config.Config.Tap.Proxy.Hub.Port,
|
||||
configStructs.ContainerPort,
|
||||
"/echo",
|
||||
)
|
||||
connector := connect.NewConnector(hubUrl, connect.DefaultRetries, connect.DefaultTimeout)
|
||||
if err := connector.TestConnection("/echo"); err != nil {
|
||||
log.Error().Msg(fmt.Sprintf(utils.Red, "Couldn't connect to Hub."))
|
||||
return
|
||||
}
|
||||
|
||||
establishedProxy = true
|
||||
okToOpen("Hub", hubUrl, true)
|
||||
}
|
||||
|
||||
frontUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
|
||||
response, err = http.Get(fmt.Sprintf("%s/", frontUrl))
|
||||
response, err := http.Get(fmt.Sprintf("%s/", frontUrl))
|
||||
if err == nil && response.StatusCode == 200 {
|
||||
log.Info().
|
||||
Str("service", kubernetes.FrontServiceName).
|
||||
@@ -122,10 +92,10 @@ func runProxy(block bool, noBrowser bool) {
|
||||
establishedProxy = true
|
||||
okToOpen("Kubeshark", frontUrl, noBrowser)
|
||||
}
|
||||
|
||||
if establishedProxy && block {
|
||||
utils.WaitForTermination(ctx, cancel)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func okToOpen(name string, url string, noBrowser bool) {
|
||||
|
||||
@@ -33,6 +33,7 @@ func init() {
|
||||
|
||||
rootCmd.PersistentFlags().StringSlice(config.SetCommandName, []string{}, fmt.Sprintf("Override values using --%s", config.SetCommandName))
|
||||
rootCmd.PersistentFlags().BoolP(config.DebugFlag, "d", false, "Enable debug mode")
|
||||
rootCmd.PersistentFlags().String(config.ConfigPathFlag, "", fmt.Sprintf("Set the config path, default: %s", config.GetConfigFilePath(nil)))
|
||||
}
|
||||
|
||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||
|
||||
288
cmd/scripts.go
288
cmd/scripts.go
@@ -2,24 +2,30 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/internal/connect"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
var scriptsCmd = &cobra.Command{
|
||||
Use: "scripts",
|
||||
Short: "Watch the `scripting.source` directory for changes and update the scripts",
|
||||
Short: "Watch the `scripting.source` and/or `scripting.sources` folders for changes and update the scripts",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
runScripts()
|
||||
return nil
|
||||
@@ -34,30 +40,151 @@ func init() {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
|
||||
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
|
||||
scriptsCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark")
|
||||
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark")
|
||||
scriptsCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
}
|
||||
|
||||
func runScripts() {
|
||||
if config.Config.Scripting.Source == "" {
|
||||
log.Error().Msg("`scripting.source` field is empty.")
|
||||
if config.Config.Scripting.Source == "" && len(config.Config.Scripting.Sources) == 0 {
|
||||
log.Error().Msg("Both `scripting.source` and `scripting.sources` fields are empty.")
|
||||
return
|
||||
}
|
||||
|
||||
hubUrl := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
|
||||
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
|
||||
if err != nil || response.StatusCode != 200 {
|
||||
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
|
||||
runProxy(false, true)
|
||||
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
watchConfigMap(ctx, kubernetesProvider)
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
watchScripts(ctx, kubernetesProvider, true)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
<-signalChan
|
||||
log.Debug().Msg("Received interrupt, stopping watchers.")
|
||||
cancel()
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
watchScripts(true)
|
||||
}
|
||||
|
||||
func watchScripts(block bool) {
|
||||
func createScript(provider *kubernetes.Provider, script misc.ConfigMapScript) (index int64, err error) {
|
||||
const maxRetries = 5
|
||||
var scripts map[int64]misc.ConfigMapScript
|
||||
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
scripts, err = kubernetes.ConfigGetScripts(provider)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
script.Active = kubernetes.IsActiveScript(provider, script.Title)
|
||||
index = 0
|
||||
if script.Title != "New Script" {
|
||||
for i, v := range scripts {
|
||||
if index <= i {
|
||||
index = i + 1
|
||||
}
|
||||
if v.Title == script.Title {
|
||||
index = int64(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
scripts[index] = script
|
||||
|
||||
log.Info().Str("title", script.Title).Bool("Active", script.Active).Int64("Index", index).Msg("Creating script")
|
||||
var data []byte
|
||||
data, err = json.Marshal(scripts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = kubernetes.SetConfig(provider, kubernetes.CONFIG_SCRIPTING_SCRIPTS, string(data))
|
||||
if err == nil {
|
||||
return index, nil
|
||||
}
|
||||
|
||||
if k8serrors.IsConflict(err) {
|
||||
log.Debug().Err(err).Msg("Conflict detected, retrying update...")
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
log.Error().Msg("Max retries reached for creating script due to conflicts.")
|
||||
return 0, errors.New("max retries reached due to conflicts while creating script")
|
||||
}
|
||||
|
||||
func updateScript(provider *kubernetes.Provider, index int64, script misc.ConfigMapScript) (err error) {
|
||||
var scripts map[int64]misc.ConfigMapScript
|
||||
scripts, err = kubernetes.ConfigGetScripts(provider)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
script.Active = kubernetes.IsActiveScript(provider, script.Title)
|
||||
scripts[index] = script
|
||||
|
||||
var data []byte
|
||||
data, err = json.Marshal(scripts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = kubernetes.SetConfig(provider, kubernetes.CONFIG_SCRIPTING_SCRIPTS, string(data))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func deleteScript(provider *kubernetes.Provider, index int64) (err error) {
|
||||
var scripts map[int64]misc.ConfigMapScript
|
||||
scripts, err = kubernetes.ConfigGetScripts(provider)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = kubernetes.DeleteActiveScriptByTitle(provider, scripts[index].Title)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
delete(scripts, index)
|
||||
|
||||
var data []byte
|
||||
data, err = json.Marshal(scripts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = kubernetes.SetConfig(provider, kubernetes.CONFIG_SCRIPTING_SCRIPTS, string(data))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func watchScripts(ctx context.Context, provider *kubernetes.Provider, block bool) {
|
||||
files := make(map[string]int64)
|
||||
|
||||
scripts, err := config.Config.Scripting.GetScripts()
|
||||
@@ -67,10 +194,10 @@ func watchScripts(block bool) {
|
||||
}
|
||||
|
||||
for _, script := range scripts {
|
||||
index, err := connector.PostScript(script)
|
||||
index, err := createScript(provider, script.ConfigMap())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
files[script.Path] = index
|
||||
@@ -85,11 +212,37 @@ func watchScripts(block bool) {
|
||||
defer watcher.Close()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
|
||||
go func() {
|
||||
<-signalChan
|
||||
log.Debug().Msg("Received interrupt, stopping script watch.")
|
||||
cancel()
|
||||
watcher.Close()
|
||||
}()
|
||||
|
||||
if err := watcher.Add(config.Config.Scripting.Source); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to add scripting source to watcher")
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Debug().Msg("Script watcher exiting gracefully.")
|
||||
return
|
||||
|
||||
// watch for events
|
||||
case event := <-watcher.Events:
|
||||
if !strings.HasSuffix(event.Name, "js") {
|
||||
log.Info().Str("file", event.Name).Msg("Ignoring file")
|
||||
continue
|
||||
}
|
||||
switch event.Op {
|
||||
case fsnotify.Create:
|
||||
script, err := misc.ReadScriptFile(event.Name)
|
||||
@@ -98,7 +251,7 @@ func watchScripts(block bool) {
|
||||
continue
|
||||
}
|
||||
|
||||
index, err := connector.PostScript(script)
|
||||
index, err := createScript(provider, script.ConfigMap())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
@@ -114,7 +267,7 @@ func watchScripts(block bool) {
|
||||
continue
|
||||
}
|
||||
|
||||
err = connector.PutScript(script, index)
|
||||
err = updateScript(provider, index, script.ConfigMap())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
@@ -122,7 +275,7 @@ func watchScripts(block bool) {
|
||||
|
||||
case fsnotify.Rename:
|
||||
index := files[event.Name]
|
||||
err := connector.DeleteScript(index)
|
||||
err := deleteScript(provider, index)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
@@ -132,9 +285,12 @@ func watchScripts(block bool) {
|
||||
// pass
|
||||
}
|
||||
|
||||
// watch for errors
|
||||
case err := <-watcher.Errors:
|
||||
log.Error().Err(err).Send()
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
log.Info().Msg("Watcher errors channel closed.")
|
||||
return
|
||||
}
|
||||
log.Error().Err(err).Msg("Watcher error encountered")
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -143,11 +299,85 @@ func watchScripts(block bool) {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
|
||||
log.Info().Str("directory", config.Config.Scripting.Source).Msg("Watching scripts against changes:")
|
||||
for _, source := range config.Config.Scripting.Sources {
|
||||
if err := watcher.Add(source); err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().Str("folder", config.Config.Scripting.Source).Interface("folders", config.Config.Scripting.Sources).Msg("Watching scripts against changes:")
|
||||
|
||||
if block {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
utils.WaitForTermination(ctx, cancel)
|
||||
<-ctx.Done()
|
||||
}
|
||||
}
|
||||
|
||||
func watchConfigMap(ctx context.Context, provider *kubernetes.Provider) {
|
||||
clientset := provider.GetClientSet()
|
||||
configMapName := kubernetes.SELF_RESOURCES_PREFIX + kubernetes.SUFFIX_CONFIG_MAP
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Info().Msg("ConfigMap watcher exiting gracefully.")
|
||||
return
|
||||
|
||||
default:
|
||||
watcher, err := clientset.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Watch(context.TODO(), metav1.ListOptions{
|
||||
FieldSelector: "metadata.name=" + configMapName,
|
||||
})
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("ConfigMap not found, retrying in 5 seconds...")
|
||||
time.Sleep(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create a goroutine to process events
|
||||
watcherClosed := make(chan struct{})
|
||||
go func() {
|
||||
defer close(watcherClosed)
|
||||
for event := range watcher.ResultChan() {
|
||||
if event.Type == watch.Added {
|
||||
log.Info().Msg("ConfigMap created or modified")
|
||||
runScriptsSync(provider)
|
||||
} else if event.Type == watch.Deleted {
|
||||
log.Warn().Msg("ConfigMap deleted, waiting for recreation...")
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for either context cancellation or watcher completion
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
watcher.Stop()
|
||||
log.Info().Msg("ConfigMap watcher stopping due to context cancellation")
|
||||
return
|
||||
case <-watcherClosed:
|
||||
log.Info().Msg("Watcher closed, restarting...")
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runScriptsSync(provider *kubernetes.Provider) {
|
||||
files := make(map[string]int64)
|
||||
|
||||
scripts, err := config.Config.Scripting.GetScripts()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
for _, script := range scripts {
|
||||
index, err := createScript(provider, script.ConfigMap())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
files[script.Path] = index
|
||||
}
|
||||
log.Info().Msg("Synchronized scripts with ConfigMap.")
|
||||
}
|
||||
|
||||
12
cmd/tap.go
12
cmd/tap.go
@@ -2,13 +2,11 @@ package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/errormessage"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -47,19 +45,21 @@ func init() {
|
||||
tapCmd.Flags().StringP(configStructs.DockerTagLabel, "t", defaultTapConfig.Docker.Tag, "The tag of the Docker images that are going to be pulled")
|
||||
tapCmd.Flags().String(configStructs.DockerImagePullPolicy, defaultTapConfig.Docker.ImagePullPolicy, "ImagePullPolicy for the Docker images")
|
||||
tapCmd.Flags().StringSlice(configStructs.DockerImagePullSecrets, defaultTapConfig.Docker.ImagePullSecrets, "ImagePullSecrets for the Docker images")
|
||||
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
|
||||
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
|
||||
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the proxy/port-forward")
|
||||
tapCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
|
||||
tapCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultTapConfig.Namespaces, "Namespaces selector")
|
||||
tapCmd.Flags().StringSliceP(configStructs.ExcludedNamespacesLabel, "e", defaultTapConfig.ExcludedNamespaces, "Excluded namespaces")
|
||||
tapCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
tapCmd.Flags().Bool(configStructs.PersistentStorageLabel, defaultTapConfig.PersistentStorage, "Enable persistent storage (PersistentVolumeClaim)")
|
||||
tapCmd.Flags().Bool(configStructs.PersistentStorageStaticLabel, defaultTapConfig.PersistentStorageStatic, "Persistent storage static provision")
|
||||
tapCmd.Flags().String(configStructs.EfsFileSytemIdAndPathLabel, defaultTapConfig.EfsFileSytemIdAndPath, "EFS file system ID")
|
||||
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit (per node)")
|
||||
tapCmd.Flags().String(configStructs.StorageClassLabel, defaultTapConfig.StorageClass, "Override the default storage class of the PersistentVolumeClaim (per node)")
|
||||
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
|
||||
tapCmd.Flags().StringP(configStructs.PcapLabel, "p", defaultTapConfig.Pcap, fmt.Sprintf("Capture from a PCAP snapshot of %s (.tar.gz) using your Docker Daemon instead of Kubernetes. TAR path from the file system or an S3 URI (object, folder or the bucket)", misc.Software))
|
||||
tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS")
|
||||
tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries")
|
||||
tapCmd.Flags().Bool(configStructs.IgnoreTaintedLabel, defaultTapConfig.IgnoreTainted, "Ignore tainted pods while running Worker DaemonSet")
|
||||
tapCmd.Flags().Bool(configStructs.IngressEnabledLabel, defaultTapConfig.Ingress.Enabled, "Enable Ingress")
|
||||
tapCmd.Flags().Bool(configStructs.TelemetryEnabledLabel, defaultTapConfig.Telemetry.Enabled, "Enable/disable Telemetry")
|
||||
tapCmd.Flags().Bool(configStructs.ResourceGuardEnabledLabel, defaultTapConfig.ResourceGuard.Enabled, "Enable/disable resource guard")
|
||||
tapCmd.Flags().Bool(configStructs.WatchdogEnabled, defaultTapConfig.Watchdog.Enabled, "Enable/disable watchdog")
|
||||
}
|
||||
|
||||
@@ -1,539 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
awsConfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
s3Types "github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/kubeshark/kubeshark/internal/connect"
|
||||
"github.com/kubeshark/kubeshark/kubernetes"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func logPullingImage(image string, reader io.ReadCloser) {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
text := scanner.Text()
|
||||
var data map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(text), &data); err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
continue
|
||||
}
|
||||
|
||||
var id string
|
||||
if val, ok := data["id"]; ok {
|
||||
id = val.(string)
|
||||
}
|
||||
|
||||
var status string
|
||||
if val, ok := data["status"]; ok {
|
||||
status = val.(string)
|
||||
}
|
||||
|
||||
var progress string
|
||||
if val, ok := data["progress"]; ok {
|
||||
progress = val.(string)
|
||||
}
|
||||
|
||||
e := log.Info()
|
||||
if image != "" {
|
||||
e = e.Str("image", image)
|
||||
}
|
||||
|
||||
if progress != "" {
|
||||
e = e.Str("progress", progress)
|
||||
}
|
||||
|
||||
e.Msg(fmt.Sprintf("[%-12s] %-18s", id, status))
|
||||
}
|
||||
}
|
||||
|
||||
func pullImages(ctx context.Context, cli *client.Client, imageFront string, imageHub string, imageWorker string) error {
|
||||
log.Info().Msg("Pulling images...")
|
||||
readerFront, err := cli.ImagePull(ctx, imageFront, types.ImagePullOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readerFront.Close()
|
||||
logPullingImage(imageFront, readerFront)
|
||||
|
||||
readerHub, err := cli.ImagePull(ctx, imageHub, types.ImagePullOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readerHub.Close()
|
||||
logPullingImage(imageHub, readerHub)
|
||||
|
||||
readerWorker, err := cli.ImagePull(ctx, imageWorker, types.ImagePullOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer readerWorker.Close()
|
||||
logPullingImage(imageWorker, readerWorker)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanUpOldContainers(
|
||||
ctx context.Context,
|
||||
cli *client.Client,
|
||||
nameFront string,
|
||||
nameHub string,
|
||||
nameWorker string,
|
||||
) error {
|
||||
containers, err := cli.ContainerList(ctx, types.ContainerListOptions{All: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, container := range containers {
|
||||
f := fmt.Sprintf("/%s", nameFront)
|
||||
h := fmt.Sprintf("/%s", nameHub)
|
||||
w := fmt.Sprintf("/%s", nameWorker)
|
||||
if utils.Contains(container.Names, f) || utils.Contains(container.Names, h) || utils.Contains(container.Names, w) {
|
||||
err = cli.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createAndStartContainers(
|
||||
ctx context.Context,
|
||||
cli *client.Client,
|
||||
imageFront string,
|
||||
imageHub string,
|
||||
imageWorker string,
|
||||
tarReader io.Reader,
|
||||
) (
|
||||
respFront container.ContainerCreateCreatedBody,
|
||||
respHub container.ContainerCreateCreatedBody,
|
||||
respWorker container.ContainerCreateCreatedBody,
|
||||
workerIPAddr string,
|
||||
err error,
|
||||
) {
|
||||
log.Info().Msg("Creating containers...")
|
||||
|
||||
nameFront := fmt.Sprintf("%s-front", misc.Program)
|
||||
nameHub := fmt.Sprintf("%s-hub", misc.Program)
|
||||
nameWorker := fmt.Sprintf("%s-worker", misc.Program)
|
||||
|
||||
err = cleanUpOldContainers(ctx, cli, nameFront, nameHub, nameWorker)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hostIP := "0.0.0.0"
|
||||
|
||||
hostConfigFront := &container.HostConfig{
|
||||
PortBindings: nat.PortMap{
|
||||
nat.Port(fmt.Sprintf("%d/tcp", configStructs.ContainerPort)): []nat.PortBinding{
|
||||
{
|
||||
HostIP: hostIP,
|
||||
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.Port),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
respFront, err = cli.ContainerCreate(ctx, &container.Config{
|
||||
Image: imageFront,
|
||||
Tty: false,
|
||||
Env: []string{
|
||||
"REACT_APP_DEFAULT_FILTER= ",
|
||||
"REACT_APP_HUB_HOST= ",
|
||||
fmt.Sprintf("REACT_APP_HUB_PORT=:%d", config.Config.Tap.Proxy.Hub.Port),
|
||||
},
|
||||
}, hostConfigFront, nil, nil, nameFront)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hostConfigHub := &container.HostConfig{
|
||||
PortBindings: nat.PortMap{
|
||||
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): []nat.PortBinding{
|
||||
{
|
||||
HostIP: hostIP,
|
||||
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrvPort)}
|
||||
if config.DebugMode {
|
||||
cmdHub = append(cmdHub, fmt.Sprintf("-%s", config.DebugFlag))
|
||||
}
|
||||
|
||||
respHub, err = cli.ContainerCreate(ctx, &container.Config{
|
||||
Image: imageHub,
|
||||
Cmd: cmdHub,
|
||||
Tty: false,
|
||||
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): {}},
|
||||
}, hostConfigHub, nil, nil, nameHub)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.SrvPort)}
|
||||
if config.DebugMode {
|
||||
cmdWorker = append(cmdWorker, fmt.Sprintf("-%s", config.DebugFlag))
|
||||
}
|
||||
|
||||
respWorker, err = cli.ContainerCreate(ctx, &container.Config{
|
||||
Image: imageWorker,
|
||||
Cmd: cmdWorker,
|
||||
Tty: false,
|
||||
}, nil, nil, nil, nameWorker)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = cli.CopyToContainer(ctx, respWorker.ID, "/app/import", tarReader, types.CopyToContainerOptions{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().Msg("Starting containers...")
|
||||
|
||||
if err = cli.ContainerStart(ctx, respFront.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = cli.ContainerStart(ctx, respHub.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = cli.ContainerStart(ctx, respWorker.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var containerWorker types.ContainerJSON
|
||||
containerWorker, err = cli.ContainerInspect(ctx, respWorker.ID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
workerIPAddr = containerWorker.NetworkSettings.IPAddress
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func stopAndRemoveContainers(
|
||||
ctx context.Context,
|
||||
cli *client.Client,
|
||||
respFront container.ContainerCreateCreatedBody,
|
||||
respHub container.ContainerCreateCreatedBody,
|
||||
respWorker container.ContainerCreateCreatedBody,
|
||||
) (err error) {
|
||||
log.Warn().Msg("Stopping containers...")
|
||||
err = cli.ContainerStop(ctx, respFront.ID, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = cli.ContainerStop(ctx, respHub.ID, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = cli.ContainerStop(ctx, respWorker.ID, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Warn().Msg("Removing containers...")
|
||||
err = cli.ContainerRemove(ctx, respFront.ID, types.ContainerRemoveOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = cli.ContainerRemove(ctx, respHub.ID, types.ContainerRemoveOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = cli.ContainerRemove(ctx, respWorker.ID, types.ContainerRemoveOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func downloadTarFromS3(s3Url string) (tarPath string, err error) {
|
||||
u, err := url.Parse(s3Url)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
bucket := u.Host
|
||||
key := u.Path[1:]
|
||||
|
||||
var cfg aws.Config
|
||||
cfg, err = awsConfig.LoadDefaultConfig(context.TODO())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
client := s3.NewFromConfig(cfg)
|
||||
|
||||
var listObjectsOutput *s3.ListObjectsV2Output
|
||||
listObjectsOutput, err = client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(bucket),
|
||||
Prefix: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var file *os.File
|
||||
file, err = os.CreateTemp(os.TempDir(), fmt.Sprintf("%s_*.%s", strings.TrimSuffix(filepath.Base(key), filepath.Ext(key)), filepath.Ext(key)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
log.Info().Str("bucket", bucket).Str("key", key).Msg("Downloading from S3")
|
||||
|
||||
downloader := manager.NewDownloader(client)
|
||||
_, err = downloader.Download(context.TODO(), file, &s3.GetObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
log.Info().Err(err).Msg("S3 object is not found. Assuming URL is not a single object. Listing the objects in given folder or the bucket to download...")
|
||||
|
||||
var tempDirPath string
|
||||
tempDirPath, err = os.MkdirTemp(os.TempDir(), "kubeshark_*")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, object := range listObjectsOutput.Contents {
|
||||
wg.Add(1)
|
||||
go func(object s3Types.Object) {
|
||||
defer wg.Done()
|
||||
objectKey := *object.Key
|
||||
|
||||
fullPath := filepath.Join(tempDirPath, objectKey)
|
||||
err = os.MkdirAll(filepath.Dir(fullPath), os.ModePerm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var objectFile *os.File
|
||||
objectFile, err = os.Create(fullPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer objectFile.Close()
|
||||
|
||||
log.Info().Str("bucket", bucket).Str("key", objectKey).Msg("Downloading from S3")
|
||||
|
||||
downloader := manager.NewDownloader(client)
|
||||
_, err = downloader.Download(context.TODO(), objectFile, &s3.GetObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(objectKey),
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}(object)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
tarPath, err = tarDirectory(tempDirPath)
|
||||
return
|
||||
}
|
||||
|
||||
tarPath = file.Name()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func tarDirectory(dirPath string) (string, error) {
|
||||
tarPath := fmt.Sprintf("%s.tar.gz", dirPath)
|
||||
|
||||
var file *os.File
|
||||
file, err := os.Create(tarPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
gzipWriter := gzip.NewWriter(file)
|
||||
defer gzipWriter.Close()
|
||||
|
||||
tarWriter := tar.NewWriter(gzipWriter)
|
||||
defer tarWriter.Close()
|
||||
|
||||
walker := func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
header := &tar.Header{
|
||||
Name: path[len(dirPath)+1:],
|
||||
Size: stat.Size(),
|
||||
Mode: int64(stat.Mode()),
|
||||
ModTime: stat.ModTime(),
|
||||
}
|
||||
|
||||
err = tarWriter.WriteHeader(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(tarWriter, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err = filepath.Walk(dirPath, walker)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return tarPath, nil
|
||||
}
|
||||
|
||||
func pcap(tarPath string) error {
|
||||
if strings.HasPrefix(tarPath, "s3://") {
|
||||
var err error
|
||||
tarPath, err = downloadTarFromS3(tarPath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed downloading from S3")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.Info().Str("tar-path", tarPath).Msg("Openning")
|
||||
|
||||
docker.SetRegistry(config.Config.Tap.Docker.Registry)
|
||||
docker.SetTag(config.Config.Tap.Docker.Tag)
|
||||
|
||||
ctx := context.Background()
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return err
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
imageFront := docker.GetFrontImage()
|
||||
imageHub := docker.GetHubImage()
|
||||
imageWorker := docker.GetWorkerImage()
|
||||
|
||||
err = pullImages(ctx, cli, imageFront, imageHub, imageWorker)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return err
|
||||
}
|
||||
|
||||
tarFile, err := os.Open(tarPath)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return err
|
||||
}
|
||||
defer tarFile.Close()
|
||||
tarReader := bufio.NewReader(tarFile)
|
||||
|
||||
respFront, respHub, respWorker, workerIPAddr, err := createAndStartContainers(
|
||||
ctx,
|
||||
cli,
|
||||
imageFront,
|
||||
imageHub,
|
||||
imageWorker,
|
||||
tarReader,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return err
|
||||
}
|
||||
|
||||
workerPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "docker",
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
PodIP: workerIPAddr,
|
||||
Phase: v1.PodRunning,
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Ready: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
connector.PostWorkerPodToHub(workerPod)
|
||||
|
||||
// License
|
||||
if config.Config.License != "" {
|
||||
connector.PostLicense(config.Config.License)
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("url", kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)).
|
||||
Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
|
||||
|
||||
url := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Front.Port)
|
||||
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
|
||||
|
||||
if !config.Config.HeadlessMode {
|
||||
utils.OpenBrowser(url)
|
||||
}
|
||||
|
||||
ctxC, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
utils.WaitForTermination(ctxC, cancel)
|
||||
|
||||
err = stopAndRemoveContainers(ctx, cli, respFront, respHub, respWorker)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
142
cmd/tapRunner.go
142
cmd/tapRunner.go
@@ -2,14 +2,14 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kubeshark/kubeshark/docker"
|
||||
"github.com/kubeshark/kubeshark/internal/connect"
|
||||
"github.com/kubeshark/kubeshark/kubernetes/helm"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
@@ -31,7 +31,6 @@ type tapState struct {
|
||||
}
|
||||
|
||||
var state tapState
|
||||
var connector *connect.Connector
|
||||
|
||||
type Readiness struct {
|
||||
Hub bool
|
||||
@@ -45,30 +44,15 @@ var ready *Readiness
|
||||
func tap() {
|
||||
ready = &Readiness{}
|
||||
state.startTime = time.Now()
|
||||
docker.SetRegistry(config.Config.Tap.Docker.Registry)
|
||||
docker.SetTag(config.Config.Tap.Docker.Tag)
|
||||
log.Info().Str("registry", docker.GetRegistry()).Str("tag", docker.GetTag()).Msg("Using Docker:")
|
||||
if config.Config.Tap.Pcap != "" {
|
||||
err := pcap(config.Config.Tap.Pcap)
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !config.Config.Tap.PersistentStorage {
|
||||
config.Config.Tap.StorageLimit = "200Mi"
|
||||
log.Warn().Msg("Storage limit cannot be modified while persistentstorage is set to false!")
|
||||
}
|
||||
log.Info().Str("registry", config.Config.Tap.Docker.Registry).Str("tag", config.Config.Tap.Docker.Tag).Msg("Using Docker:")
|
||||
|
||||
log.Info().
|
||||
Str("limit", config.Config.Tap.StorageLimit).
|
||||
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP/UDP streams will be removed once the limit is reached.", misc.Software))
|
||||
|
||||
connector = connect.NewConnector(kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
|
||||
|
||||
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -100,18 +84,24 @@ func tap() {
|
||||
config.Config.Tap.Release.Namespace,
|
||||
).Install()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
os.Exit(1)
|
||||
if err.Error() != "cannot re-use a name that is still in use" {
|
||||
log.Error().Err(err).Send()
|
||||
os.Exit(1)
|
||||
}
|
||||
log.Info().Msg("Found an existing installation, skipping Helm install...")
|
||||
|
||||
updateConfig(kubernetesProvider)
|
||||
postFrontStarted(ctx, kubernetesProvider, cancel)
|
||||
} else {
|
||||
log.Info().Msgf("Installed the Helm release: %s", rel.Name)
|
||||
|
||||
go watchHubEvents(ctx, kubernetesProvider, cancel)
|
||||
go watchHubPod(ctx, kubernetesProvider, cancel)
|
||||
go watchFrontPod(ctx, kubernetesProvider, cancel)
|
||||
}
|
||||
|
||||
defer finishTapExecution(kubernetesProvider)
|
||||
|
||||
go watchHubEvents(ctx, kubernetesProvider, cancel)
|
||||
go watchHubPod(ctx, kubernetesProvider, cancel)
|
||||
go watchFrontPod(ctx, kubernetesProvider, cancel)
|
||||
|
||||
// block until exit signal or error
|
||||
utils.WaitForTermination(ctx, cancel)
|
||||
|
||||
@@ -199,7 +189,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
|
||||
ready.Lock()
|
||||
ready.Hub = true
|
||||
ready.Unlock()
|
||||
postHubStarted(ctx, kubernetesProvider, cancel, false)
|
||||
log.Info().Str("pod", kubernetes.HubPodName).Msg("Ready.")
|
||||
}
|
||||
|
||||
ready.Lock()
|
||||
@@ -289,6 +279,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
|
||||
ready.Lock()
|
||||
ready.Front = true
|
||||
ready.Unlock()
|
||||
log.Info().Str("pod", kubernetes.FrontPodName).Msg("Ready.")
|
||||
}
|
||||
|
||||
ready.Lock()
|
||||
@@ -405,56 +396,6 @@ func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider
|
||||
}
|
||||
}
|
||||
|
||||
func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, update bool) {
|
||||
startProxyReportErrorIfAny(
|
||||
kubernetesProvider,
|
||||
ctx,
|
||||
kubernetes.HubServiceName,
|
||||
kubernetes.HubPodName,
|
||||
configStructs.ProxyHubPortLabel,
|
||||
config.Config.Tap.Proxy.Hub.Port,
|
||||
configStructs.ContainerPort,
|
||||
"/echo",
|
||||
)
|
||||
|
||||
if update {
|
||||
// Pod regex
|
||||
connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces)
|
||||
|
||||
// License
|
||||
if config.Config.License != "" {
|
||||
connector.PostLicense(config.Config.License)
|
||||
}
|
||||
|
||||
// Scripting
|
||||
connector.PostEnv(config.Config.Scripting.Env)
|
||||
|
||||
scripts, err := config.Config.Scripting.GetScripts()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
|
||||
for _, script := range scripts {
|
||||
_, err = connector.PostScript(script)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
}
|
||||
|
||||
connector.PostScriptDone()
|
||||
}
|
||||
|
||||
if !update && !config.Config.Tap.Ingress.Enabled {
|
||||
// Hub proxy URL
|
||||
url := kubernetes.GetProxyOnPort(config.Config.Tap.Proxy.Hub.Port)
|
||||
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
|
||||
}
|
||||
|
||||
if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts {
|
||||
watchScripts(false)
|
||||
}
|
||||
}
|
||||
|
||||
func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||
startProxyReportErrorIfAny(
|
||||
kubernetesProvider,
|
||||
@@ -478,4 +419,51 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid
|
||||
if !config.Config.HeadlessMode {
|
||||
utils.OpenBrowser(url)
|
||||
}
|
||||
|
||||
for !ready.Hub {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
|
||||
if (config.Config.Scripting.Source != "" || len(config.Config.Scripting.Sources) > 0) && config.Config.Scripting.WatchScripts {
|
||||
watchScripts(ctx, kubernetesProvider, false)
|
||||
}
|
||||
|
||||
if config.Config.Scripting.Console {
|
||||
go runConsoleWithoutProxy()
|
||||
}
|
||||
}
|
||||
|
||||
func updateConfig(kubernetesProvider *kubernetes.Provider) {
|
||||
_, _ = kubernetes.SetSecret(kubernetesProvider, kubernetes.SECRET_LICENSE, config.Config.License)
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_POD_REGEX, config.Config.Tap.PodRegexStr)
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_NAMESPACES, strings.Join(config.Config.Tap.Namespaces, ","))
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_EXCLUDED_NAMESPACES, strings.Join(config.Config.Tap.ExcludedNamespaces, ","))
|
||||
|
||||
data, err := json.Marshal(config.Config.Scripting.Env)
|
||||
if err != nil {
|
||||
log.Error().Str("config", kubernetes.CONFIG_SCRIPTING_ENV).Err(err).Send()
|
||||
return
|
||||
} else {
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_SCRIPTING_ENV, string(data))
|
||||
}
|
||||
|
||||
ingressEnabled := ""
|
||||
if config.Config.Tap.Ingress.Enabled {
|
||||
ingressEnabled = "true"
|
||||
}
|
||||
|
||||
authEnabled := ""
|
||||
if config.Config.Tap.Auth.Enabled {
|
||||
authEnabled = "true"
|
||||
}
|
||||
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_INGRESS_ENABLED, ingressEnabled)
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_INGRESS_HOST, config.Config.Tap.Ingress.Host)
|
||||
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_PROXY_FRONT_PORT, fmt.Sprint(config.Config.Tap.Proxy.Front.Port))
|
||||
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_AUTH_ENABLED, authEnabled)
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_AUTH_TYPE, config.Config.Tap.Auth.Type)
|
||||
_, _ = kubernetes.SetConfig(kubernetesProvider, kubernetes.CONFIG_AUTH_SAML_IDP_METADATA_URL, config.Config.Tap.Auth.Saml.IdpMetadataUrl)
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/goccy/go-yaml"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/misc/version"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
@@ -19,7 +20,6 @@ import (
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -28,6 +28,7 @@ const (
|
||||
FieldNameTag = "yaml"
|
||||
ReadonlyTag = "readonly"
|
||||
DebugFlag = "debug"
|
||||
ConfigPathFlag = "config-path"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -41,7 +42,7 @@ func InitConfig(cmd *cobra.Command) error {
|
||||
var err error
|
||||
DebugMode, err = cmd.Flags().GetBool(DebugFlag)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg(fmt.Sprintf("Can't recieve '%s' flag", DebugFlag))
|
||||
log.Error().Err(err).Msg(fmt.Sprintf("Can't receive '%s' flag", DebugFlag))
|
||||
}
|
||||
|
||||
if DebugMode {
|
||||
@@ -57,12 +58,16 @@ func InitConfig(cmd *cobra.Command) error {
|
||||
"pro",
|
||||
"manifests",
|
||||
"license",
|
||||
"mcp",
|
||||
}, cmd.Use) {
|
||||
go version.CheckNewerVersion()
|
||||
}
|
||||
|
||||
Config = CreateDefaultConfig()
|
||||
Config.Tap.Debug = DebugMode
|
||||
if DebugMode {
|
||||
Config.LogLevel = "debug"
|
||||
}
|
||||
cmdName = cmd.Name()
|
||||
if utils.Contains([]string{
|
||||
"clean",
|
||||
@@ -70,6 +75,7 @@ func InitConfig(cmd *cobra.Command) error {
|
||||
"pro",
|
||||
"proxy",
|
||||
"scripts",
|
||||
"pprof",
|
||||
}, cmdName) {
|
||||
cmdName = "tap"
|
||||
}
|
||||
@@ -78,7 +84,7 @@ func InitConfig(cmd *cobra.Command) error {
|
||||
return err
|
||||
}
|
||||
|
||||
ConfigFilePath = path.Join(misc.GetDotFolderPath(), "config.yaml")
|
||||
ConfigFilePath = GetConfigFilePath(cmd)
|
||||
if err := loadConfigFile(&Config, utils.Contains([]string{
|
||||
"manifests",
|
||||
"license",
|
||||
@@ -130,22 +136,46 @@ func WriteConfig(config *ConfigStruct) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadConfigFile(config *ConfigStruct, silent bool) error {
|
||||
func GetConfigFilePath(cmd *cobra.Command) string {
|
||||
defaultConfigPath := path.Join(misc.GetDotFolderPath(), "config.yaml")
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
return defaultConfigPath
|
||||
}
|
||||
|
||||
if cmd != nil {
|
||||
configPathOverride, err := cmd.Flags().GetString(ConfigPathFlag)
|
||||
if err == nil {
|
||||
if configPathOverride != "" {
|
||||
resolvedConfigPath, err := filepath.Abs(configPathOverride)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("--config-path flag path cannot be resolved")
|
||||
} else {
|
||||
return resolvedConfigPath
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Error().Err(err).Msg("--config-path flag parser error")
|
||||
}
|
||||
}
|
||||
|
||||
cwdConfig := filepath.Join(cwd, fmt.Sprintf("%s.yaml", misc.Program))
|
||||
reader, err := os.Open(cwdConfig)
|
||||
if err != nil {
|
||||
reader, err = os.Open(ConfigFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return defaultConfigPath
|
||||
} else {
|
||||
ConfigFilePath = cwdConfig
|
||||
reader.Close()
|
||||
return cwdConfig
|
||||
}
|
||||
}
|
||||
|
||||
func loadConfigFile(config *ConfigStruct, silent bool) error {
|
||||
reader, err := os.Open(ConfigFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
@@ -171,9 +201,14 @@ func initFlag(f *pflag.Flag) {
|
||||
|
||||
flagPath = append(flagPath, strings.Split(f.Name, "-")...)
|
||||
|
||||
flagPathJoined := strings.Join(flagPath, ".")
|
||||
if strings.HasSuffix(flagPathJoined, ".config.path") {
|
||||
return
|
||||
}
|
||||
|
||||
sliceValue, isSliceValue := f.Value.(pflag.SliceValue)
|
||||
if !isSliceValue {
|
||||
if err := mergeFlagValue(configElemValue, flagPath, strings.Join(flagPath, "."), f.Value.String()); err != nil {
|
||||
if err := mergeFlagValue(configElemValue, flagPath, flagPathJoined, f.Value.String()); err != nil {
|
||||
log.Warn().Err(err).Send()
|
||||
}
|
||||
return
|
||||
@@ -186,7 +221,7 @@ func initFlag(f *pflag.Flag) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := mergeFlagValues(configElemValue, flagPath, strings.Join(flagPath, "."), sliceValue.GetSlice()); err != nil {
|
||||
if err := mergeFlagValues(configElemValue, flagPath, flagPathJoined, sliceValue.GetSlice()); err != nil {
|
||||
log.Warn().Err(err).Send()
|
||||
}
|
||||
}
|
||||
@@ -222,7 +257,7 @@ func mergeSetFlag(configElemValue reflect.Value, setValues []string) error {
|
||||
}
|
||||
|
||||
if len(setErrors) > 0 {
|
||||
return fmt.Errorf(strings.Join(setErrors, "\n"))
|
||||
return errors.New(strings.Join(setErrors, "\n"))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -10,29 +10,162 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
KubeConfigPathConfigName = "kube-configpath"
|
||||
KubeConfigPathConfigName = "kube-configPath"
|
||||
)
|
||||
|
||||
func CreateDefaultConfig() ConfigStruct {
|
||||
return ConfigStruct{
|
||||
Tap: configStructs.TapConfig{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"linux"},
|
||||
NodeSelectorTerms: configStructs.NodeSelectorTermsConfig{
|
||||
Workers: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"linux"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Hub: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"linux"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Front: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"linux"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dex: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "kubernetes.io/os",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"linux"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Tolerations: configStructs.TolerationsConfig{
|
||||
Workers: []v1.Toleration{
|
||||
{
|
||||
Effect: v1.TaintEffect("NoExecute"),
|
||||
Operator: v1.TolerationOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: configStructs.SecurityContextConfig{
|
||||
Privileged: true,
|
||||
// Capabilities used only when running in unprivileged mode
|
||||
Capabilities: configStructs.CapabilitiesConfig{
|
||||
NetworkCapture: []string{
|
||||
// NET_RAW is required to listen the network traffic
|
||||
"NET_RAW",
|
||||
// NET_ADMIN is required to listen the network traffic
|
||||
"NET_ADMIN",
|
||||
},
|
||||
ServiceMeshCapture: []string{
|
||||
// SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
|
||||
"SYS_ADMIN",
|
||||
// SYS_PTRACE is required to set netns to other process + to open libssl.so of other process
|
||||
"SYS_PTRACE",
|
||||
// DAC_OVERRIDE is required to read /proc/PID/environ
|
||||
"DAC_OVERRIDE",
|
||||
},
|
||||
EBPFCapture: []string{
|
||||
// SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
|
||||
"SYS_ADMIN",
|
||||
// SYS_PTRACE is required to set netns to other process + to open libssl.so of other process
|
||||
"SYS_PTRACE",
|
||||
// SYS_RESOURCE is required to change rlimits for eBPF
|
||||
"SYS_RESOURCE",
|
||||
// IPC_LOCK is required for ebpf perf buffers allocations after some amount of size buffer size:
|
||||
// https://github.com/kubeshark/tracer/blob/13e24725ba8b98216dd0e553262e6d9c56dce5fa/main.go#L82)
|
||||
"IPC_LOCK",
|
||||
},
|
||||
},
|
||||
},
|
||||
Auth: configStructs.AuthConfig{
|
||||
Saml: configStructs.SamlConfig{
|
||||
RoleAttribute: "role",
|
||||
Roles: map[string]configStructs.Role{
|
||||
"admin": {
|
||||
Filter: "",
|
||||
CanDownloadPCAP: true,
|
||||
CanUseScripting: true,
|
||||
ScriptingPermissions: configStructs.ScriptingPermissions{
|
||||
CanSave: true,
|
||||
CanActivate: true,
|
||||
CanDelete: true,
|
||||
},
|
||||
CanUpdateTargetedPods: true,
|
||||
CanStopTrafficCapturing: true,
|
||||
CanControlDissection: true,
|
||||
ShowAdminConsoleLink: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
EnabledDissectors: []string{
|
||||
"amqp",
|
||||
"dns",
|
||||
"http",
|
||||
"icmp",
|
||||
"kafka",
|
||||
"redis",
|
||||
// "sctp",
|
||||
// "syscall",
|
||||
// "tcp",
|
||||
// "udp",
|
||||
"ws",
|
||||
// "tlsx",
|
||||
"ldap",
|
||||
"radius",
|
||||
"diameter",
|
||||
"udp-flow",
|
||||
"tcp-flow",
|
||||
"udp-conn",
|
||||
"tcp-conn",
|
||||
},
|
||||
PortMapping: configStructs.PortMapping{
|
||||
HTTP: []uint16{80, 443, 8080},
|
||||
AMQP: []uint16{5671, 5672},
|
||||
KAFKA: []uint16{9092},
|
||||
REDIS: []uint16{6379},
|
||||
LDAP: []uint16{389},
|
||||
DIAMETER: []uint16{3868},
|
||||
},
|
||||
Dashboard: configStructs.DashboardConfig{
|
||||
CompleteStreamingEnabled: true,
|
||||
},
|
||||
Capture: configStructs.CaptureConfig{
|
||||
Dissection: configStructs.DissectionConfig{
|
||||
Enabled: true,
|
||||
StopAfter: "5m",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type KubeConfig struct {
|
||||
ConfigPathStr string `yaml:"configpath" json:"configpath"`
|
||||
ConfigPathStr string `yaml:"configPath" json:"configPath"`
|
||||
Context string `yaml:"context" json:"context"`
|
||||
}
|
||||
|
||||
@@ -41,15 +174,24 @@ type ManifestsConfig struct {
|
||||
}
|
||||
|
||||
type ConfigStruct struct {
|
||||
Tap configStructs.TapConfig `yaml:"tap" json:"tap"`
|
||||
Logs configStructs.LogsConfig `yaml:"logs" json:"logs"`
|
||||
Config configStructs.ConfigConfig `yaml:"config,omitempty" json:"config,omitempty"`
|
||||
Kube KubeConfig `yaml:"kube" json:"kube"`
|
||||
DumpLogs bool `yaml:"dumplogs" json:"dumplogs" default:"false"`
|
||||
HeadlessMode bool `yaml:"headless" json:"headless" default:"false"`
|
||||
License string `yaml:"license" json:"license" default:""`
|
||||
Scripting configStructs.ScriptingConfig `yaml:"scripting" json:"scripting"`
|
||||
Manifests ManifestsConfig `yaml:"manifests,omitempty" json:"manifests,omitempty"`
|
||||
Tap configStructs.TapConfig `yaml:"tap" json:"tap"`
|
||||
Logs configStructs.LogsConfig `yaml:"logs" json:"logs"`
|
||||
Config configStructs.ConfigConfig `yaml:"config,omitempty" json:"config,omitempty"`
|
||||
PcapDump configStructs.PcapDumpConfig `yaml:"pcapdump" json:"pcapdump"`
|
||||
Kube KubeConfig `yaml:"kube" json:"kube"`
|
||||
DumpLogs bool `yaml:"dumpLogs" json:"dumpLogs" default:"false"`
|
||||
HeadlessMode bool `yaml:"headless" json:"headless" default:"false"`
|
||||
License string `yaml:"license" json:"license" default:""`
|
||||
CloudApiUrl string `yaml:"cloudApiUrl" json:"cloudApiUrl" default:"https://api.kubeshark.com"`
|
||||
CloudLicenseEnabled bool `yaml:"cloudLicenseEnabled" json:"cloudLicenseEnabled" default:"true"`
|
||||
DemoModeEnabled bool `yaml:"demoModeEnabled" json:"demoModeEnabled" default:"false"`
|
||||
SupportChatEnabled bool `yaml:"supportChatEnabled" json:"supportChatEnabled" default:"false"`
|
||||
BetaEnabled bool `yaml:"betaEnabled" json:"betaEnabled" default:"false"`
|
||||
InternetConnectivity bool `yaml:"internetConnectivity" json:"internetConnectivity" default:"true"`
|
||||
Scripting configStructs.ScriptingConfig `yaml:"scripting" json:"scripting"`
|
||||
Manifests ManifestsConfig `yaml:"manifests,omitempty" json:"manifests,omitempty"`
|
||||
Timezone string `yaml:"timezone" json:"timezone"`
|
||||
LogLevel string `yaml:"logLevel" json:"logLevel" default:"warning"`
|
||||
}
|
||||
|
||||
func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy {
|
||||
|
||||
@@ -10,10 +10,12 @@ import (
|
||||
|
||||
const (
|
||||
FileLogsName = "file"
|
||||
GrepLogsName = "grep"
|
||||
)
|
||||
|
||||
type LogsConfig struct {
|
||||
FileStr string `yaml:"file" json:"file"`
|
||||
Grep string `yaml:"grep" json:"grep"`
|
||||
}
|
||||
|
||||
func (config *LogsConfig) Validate() error {
|
||||
|
||||
@@ -1,46 +1,93 @@
|
||||
package configStructs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type ScriptingConfig struct {
|
||||
Env map[string]interface{} `yaml:"env" json:"env"`
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
Env map[string]interface{} `yaml:"env" json:"env" default:"{}"`
|
||||
Source string `yaml:"source" json:"source" default:""`
|
||||
WatchScripts bool `yaml:"watchscripts" json:"watchscripts" default:"true"`
|
||||
Sources []string `yaml:"sources" json:"sources" default:"[]"`
|
||||
WatchScripts bool `yaml:"watchScripts" json:"watchScripts" default:"true"`
|
||||
Active []string `yaml:"active" json:"active" default:"[]"`
|
||||
Console bool `yaml:"console" json:"console" default:"true"`
|
||||
}
|
||||
|
||||
func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error) {
|
||||
if config.Source == "" {
|
||||
return
|
||||
// Check if both Source and Sources are empty
|
||||
if config.Source == "" && len(config.Sources) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var files []fs.DirEntry
|
||||
files, err = os.ReadDir(config.Source)
|
||||
if err != nil {
|
||||
return
|
||||
var allFiles []struct {
|
||||
Source string
|
||||
File fs.DirEntry
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
// Handle single Source directory
|
||||
if config.Source != "" {
|
||||
files, err := os.ReadDir(config.Source)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read directory %s: %v", config.Source, err)
|
||||
}
|
||||
for _, file := range files {
|
||||
allFiles = append(allFiles, struct {
|
||||
Source string
|
||||
File fs.DirEntry
|
||||
}{Source: config.Source, File: file})
|
||||
}
|
||||
}
|
||||
|
||||
// Handle multiple Sources directories
|
||||
if len(config.Sources) > 0 {
|
||||
for _, source := range config.Sources {
|
||||
files, err := os.ReadDir(source)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read directory %s: %v", source, err)
|
||||
}
|
||||
for _, file := range files {
|
||||
allFiles = append(allFiles, struct {
|
||||
Source string
|
||||
File fs.DirEntry
|
||||
}{Source: source, File: file})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over all collected files
|
||||
for _, f := range allFiles {
|
||||
if f.File.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Construct the full path based on the relevant source directory
|
||||
path := filepath.Join(f.Source, f.File.Name())
|
||||
if !strings.HasSuffix(f.File.Name(), ".js") { // Use file name suffix for skipping non-JS files
|
||||
log.Info().Str("path", path).Msg("Skipping non-JS file")
|
||||
continue
|
||||
}
|
||||
|
||||
// Read the script file
|
||||
var script *misc.Script
|
||||
path := filepath.Join(config.Source, f.Name())
|
||||
script, err = misc.ReadScriptFile(path)
|
||||
if err != nil {
|
||||
return
|
||||
return nil, fmt.Errorf("failed to read script file %s: %v", path, err)
|
||||
}
|
||||
|
||||
// Append the valid script to the scripts slice
|
||||
scripts = append(scripts, script)
|
||||
|
||||
log.Debug().Str("path", path).Msg("Found script:")
|
||||
}
|
||||
|
||||
return
|
||||
// Return the collected scripts and nil error if successful
|
||||
return scripts, nil
|
||||
}
|
||||
|
||||
@@ -9,33 +9,52 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
DockerRegistryLabel = "docker-registry"
|
||||
DockerTagLabel = "docker-tag"
|
||||
DockerImagePullPolicy = "docker-imagepullpolicy"
|
||||
DockerImagePullSecrets = "docker-imagepullsecrets"
|
||||
ProxyFrontPortLabel = "proxy-front-port"
|
||||
ProxyHubPortLabel = "proxy-hub-port"
|
||||
ProxyHostLabel = "proxy-host"
|
||||
NamespacesLabel = "namespaces"
|
||||
ReleaseNamespaceLabel = "release-namespace"
|
||||
PersistentStorageLabel = "persistentstorage"
|
||||
StorageLimitLabel = "storagelimit"
|
||||
StorageClassLabel = "storageclass"
|
||||
DryRunLabel = "dryrun"
|
||||
PcapLabel = "pcap"
|
||||
ServiceMeshLabel = "servicemesh"
|
||||
TlsLabel = "tls"
|
||||
IgnoreTaintedLabel = "ignoretainted"
|
||||
IngressEnabledLabel = "ingress-enabled"
|
||||
TelemetryEnabledLabel = "telemetry-enabled"
|
||||
DebugLabel = "debug"
|
||||
ContainerPort = 80
|
||||
ContainerPortStr = "80"
|
||||
DockerRegistryLabel = "docker-registry"
|
||||
DockerTagLabel = "docker-tag"
|
||||
DockerImagePullPolicy = "docker-imagePullPolicy"
|
||||
DockerImagePullSecrets = "docker-imagePullSecrets"
|
||||
ProxyFrontPortLabel = "proxy-front-port"
|
||||
ProxyHubPortLabel = "proxy-hub-port"
|
||||
ProxyHostLabel = "proxy-host"
|
||||
NamespacesLabel = "namespaces"
|
||||
ExcludedNamespacesLabel = "excludedNamespaces"
|
||||
ReleaseNamespaceLabel = "release-namespace"
|
||||
PersistentStorageLabel = "persistentStorage"
|
||||
PersistentStorageStaticLabel = "persistentStorageStatic"
|
||||
EfsFileSytemIdAndPathLabel = "efsFileSytemIdAndPath"
|
||||
StorageLimitLabel = "storageLimit"
|
||||
StorageClassLabel = "storageClass"
|
||||
DryRunLabel = "dryRun"
|
||||
PcapLabel = "pcap"
|
||||
ServiceMeshLabel = "serviceMesh"
|
||||
TlsLabel = "tls"
|
||||
IgnoreTaintedLabel = "ignoreTainted"
|
||||
IngressEnabledLabel = "ingress-enabled"
|
||||
TelemetryEnabledLabel = "telemetry-enabled"
|
||||
ResourceGuardEnabledLabel = "resource-guard-enabled"
|
||||
PprofPortLabel = "pprof-port"
|
||||
PprofViewLabel = "pprof-view"
|
||||
DebugLabel = "debug"
|
||||
ContainerPort = 8080
|
||||
ContainerPortStr = "8080"
|
||||
PcapDest = "dest"
|
||||
PcapMaxSize = "maxSize"
|
||||
PcapMaxTime = "maxTime"
|
||||
PcapTimeInterval = "timeInterval"
|
||||
PcapKubeconfig = "kubeconfig"
|
||||
PcapDumpEnabled = "enabled"
|
||||
PcapTime = "time"
|
||||
WatchdogEnabled = "watchdogEnabled"
|
||||
)
|
||||
|
||||
type ResourceLimits struct {
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"750m"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"1Gi"`
|
||||
type ResourceLimitsHub struct {
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"0"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"5Gi"`
|
||||
}
|
||||
|
||||
type ResourceLimitsWorker struct {
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"0"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"3Gi"`
|
||||
}
|
||||
|
||||
type ResourceRequests struct {
|
||||
@@ -43,18 +62,22 @@ type ResourceRequests struct {
|
||||
Memory string `yaml:"memory" json:"memory" default:"50Mi"`
|
||||
}
|
||||
|
||||
type ResourceRequirements struct {
|
||||
Limits ResourceLimits `yaml:"limits" json:"limits"`
|
||||
Requests ResourceRequests `yaml:"requests" json:"requests"`
|
||||
type ResourceRequirementsHub struct {
|
||||
Limits ResourceLimitsHub `yaml:"limits" json:"limits"`
|
||||
Requests ResourceRequests `yaml:"requests" json:"requests"`
|
||||
}
|
||||
|
||||
type ResourceRequirementsWorker struct {
|
||||
Limits ResourceLimitsHub `yaml:"limits" json:"limits"`
|
||||
Requests ResourceRequests `yaml:"requests" json:"requests"`
|
||||
}
|
||||
|
||||
type WorkerConfig struct {
|
||||
SrvPort uint16 `yaml:"srvport" json:"srvport" default:"8897"`
|
||||
SrvPort uint16 `yaml:"srvPort" json:"srvPort" default:"48999"`
|
||||
}
|
||||
|
||||
type HubConfig struct {
|
||||
Port uint16 `yaml:"port" json:"port" default:"8898"`
|
||||
SrvPort uint16 `yaml:"srvport" json:"srvport" default:"8898"`
|
||||
SrvPort uint16 `yaml:"srvPort" json:"srvPort" default:"8898"`
|
||||
}
|
||||
|
||||
type FrontConfig struct {
|
||||
@@ -68,35 +91,124 @@ type ProxyConfig struct {
|
||||
Host string `yaml:"host" json:"host" default:"127.0.0.1"`
|
||||
}
|
||||
|
||||
type OverrideImageConfig struct {
|
||||
Worker string `yaml:"worker" json:"worker"`
|
||||
Hub string `yaml:"hub" json:"hub"`
|
||||
Front string `yaml:"front" json:"front"`
|
||||
}
|
||||
type OverrideTagConfig struct {
|
||||
Worker string `yaml:"worker" json:"worker"`
|
||||
Hub string `yaml:"hub" json:"hub"`
|
||||
Front string `yaml:"front" json:"front"`
|
||||
}
|
||||
|
||||
type DockerConfig struct {
|
||||
Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"`
|
||||
Tag string `yaml:"tag" json:"tag" default:"latest"`
|
||||
ImagePullPolicy string `yaml:"imagepullpolicy" json:"imagepullpolicy" default:"Always"`
|
||||
ImagePullSecrets []string `yaml:"imagepullsecrets" json:"imagepullsecrets"`
|
||||
Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"`
|
||||
Tag string `yaml:"tag" json:"tag" default:""`
|
||||
TagLocked bool `yaml:"tagLocked" json:"tagLocked" default:"true"`
|
||||
ImagePullPolicy string `yaml:"imagePullPolicy" json:"imagePullPolicy" default:"Always"`
|
||||
ImagePullSecrets []string `yaml:"imagePullSecrets" json:"imagePullSecrets"`
|
||||
OverrideImage OverrideImageConfig `yaml:"overrideImage" json:"overrideImage"`
|
||||
OverrideTag OverrideTagConfig `yaml:"overrideTag" json:"overrideTag"`
|
||||
}
|
||||
|
||||
type DnsConfig struct {
|
||||
Nameservers []string `yaml:"nameservers" json:"nameservers" default:"[]"`
|
||||
Searches []string `yaml:"searches" json:"searches" default:"[]"`
|
||||
Options []DnsConfigOption `yaml:"options" json:"options" default:"[]"`
|
||||
}
|
||||
|
||||
type DnsConfigOption struct {
|
||||
Name string `yaml:"name" json:"name"`
|
||||
Value string `yaml:"value" json:"value"`
|
||||
}
|
||||
|
||||
type ResourcesConfig struct {
|
||||
Worker ResourceRequirements `yaml:"worker" json:"worker"`
|
||||
Hub ResourceRequirements `yaml:"hub" json:"hub"`
|
||||
Hub ResourceRequirementsHub `yaml:"hub" json:"hub"`
|
||||
Sniffer ResourceRequirementsWorker `yaml:"sniffer" json:"sniffer"`
|
||||
Tracer ResourceRequirementsWorker `yaml:"tracer" json:"tracer"`
|
||||
}
|
||||
|
||||
type ProbesConfig struct {
|
||||
Hub ProbeConfig `yaml:"hub" json:"hub"`
|
||||
Sniffer ProbeConfig `yaml:"sniffer" json:"sniffer"`
|
||||
}
|
||||
|
||||
type NodeSelectorTermsConfig struct {
|
||||
Hub []v1.NodeSelectorTerm `yaml:"hub" json:"hub" default:"[]"`
|
||||
Workers []v1.NodeSelectorTerm `yaml:"workers" json:"workers" default:"[]"`
|
||||
Front []v1.NodeSelectorTerm `yaml:"front" json:"front" default:"[]"`
|
||||
Dex []v1.NodeSelectorTerm `yaml:"dex" json:"dex" default:"[]"`
|
||||
}
|
||||
|
||||
type TolerationsConfig struct {
|
||||
Hub []v1.Toleration `yaml:"hub" json:"hub" default:"[]"`
|
||||
Workers []v1.Toleration `yaml:"workers" json:"workers" default:"[]"`
|
||||
Front []v1.Toleration `yaml:"front" json:"front" default:"[]"`
|
||||
}
|
||||
|
||||
type ProbeConfig struct {
|
||||
InitialDelaySeconds int `yaml:"initialDelaySeconds" json:"initialDelaySeconds" default:"5"`
|
||||
PeriodSeconds int `yaml:"periodSeconds" json:"periodSeconds" default:"5"`
|
||||
SuccessThreshold int `yaml:"successThreshold" json:"successThreshold" default:"1"`
|
||||
FailureThreshold int `yaml:"failureThreshold" json:"failureThreshold" default:"3"`
|
||||
}
|
||||
|
||||
type ScriptingPermissions struct {
|
||||
CanSave bool `yaml:"canSave" json:"canSave" default:"true"`
|
||||
CanActivate bool `yaml:"canActivate" json:"canActivate" default:"true"`
|
||||
CanDelete bool `yaml:"canDelete" json:"canDelete" default:"true"`
|
||||
}
|
||||
|
||||
type Role struct {
|
||||
Filter string `yaml:"filter" json:"filter" default:""`
|
||||
CanDownloadPCAP bool `yaml:"canDownloadPCAP" json:"canDownloadPCAP" default:"false"`
|
||||
CanUseScripting bool `yaml:"canUseScripting" json:"canUseScripting" default:"false"`
|
||||
ScriptingPermissions ScriptingPermissions `yaml:"scriptingPermissions" json:"scriptingPermissions"`
|
||||
CanUpdateTargetedPods bool `yaml:"canUpdateTargetedPods" json:"canUpdateTargetedPods" default:"false"`
|
||||
CanStopTrafficCapturing bool `yaml:"canStopTrafficCapturing" json:"canStopTrafficCapturing" default:"false"`
|
||||
CanControlDissection bool `yaml:"canControlDissection" json:"canControlDissection" default:"false"`
|
||||
ShowAdminConsoleLink bool `yaml:"showAdminConsoleLink" json:"showAdminConsoleLink" default:"false"`
|
||||
}
|
||||
|
||||
type SamlConfig struct {
|
||||
IdpMetadataUrl string `yaml:"idpMetadataUrl" json:"idpMetadataUrl"`
|
||||
X509crt string `yaml:"x509crt" json:"x509crt"`
|
||||
X509key string `yaml:"x509key" json:"x509key"`
|
||||
RoleAttribute string `yaml:"roleAttribute" json:"roleAttribute"`
|
||||
Roles map[string]Role `yaml:"roles" json:"roles"`
|
||||
}
|
||||
|
||||
type AuthConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
ApprovedEmails []string `yaml:"approvedemails" json:"approvedemails" default:"[]"`
|
||||
ApprovedDomains []string `yaml:"approveddomains" json:"approveddomains" default:"[]"`
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
Type string `yaml:"type" json:"type" default:"saml"`
|
||||
Saml SamlConfig `yaml:"saml" json:"saml"`
|
||||
}
|
||||
|
||||
type IngressConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
ClassName string `yaml:"classname" json:"classname" default:"kubeshark-ingress-class"`
|
||||
Controller string `yaml:"controller" json:"controller" default:"k8s.io/ingress-nginx"`
|
||||
ClassName string `yaml:"className" json:"className" default:""`
|
||||
Host string `yaml:"host" json:"host" default:"ks.svc.cluster.local"`
|
||||
TLS []networking.IngressTLS `yaml:"tls" json:"tls"`
|
||||
CertManager string `yaml:"certmanager" json:"certmanager" default:"letsencrypt-prod"`
|
||||
Path string `yaml:"path" json:"path" default:"/"`
|
||||
TLS []networking.IngressTLS `yaml:"tls" json:"tls" default:"[]"`
|
||||
Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"`
|
||||
}
|
||||
|
||||
type RoutingConfig struct {
|
||||
Front FrontRoutingConfig `yaml:"front" json:"front"`
|
||||
}
|
||||
|
||||
type DashboardConfig struct {
|
||||
StreamingType string `yaml:"streamingType" json:"streamingType" default:"connect-rpc"`
|
||||
CompleteStreamingEnabled bool `yaml:"completeStreamingEnabled" json:"completeStreamingEnabled" default:"true"`
|
||||
}
|
||||
|
||||
type FrontRoutingConfig struct {
|
||||
BasePath string `yaml:"basePath" json:"basePath" default:""`
|
||||
}
|
||||
|
||||
type ReleaseConfig struct {
|
||||
Repo string `yaml:"repo" json:"repo" default:"https://helm.kubeshark.co"`
|
||||
Repo string `yaml:"repo" json:"repo" default:"https://helm.kubeshark.com"`
|
||||
Name string `yaml:"name" json:"name" default:"kubeshark"`
|
||||
Namespace string `yaml:"namespace" json:"namespace" default:"default"`
|
||||
}
|
||||
@@ -105,30 +217,172 @@ type TelemetryConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"true"`
|
||||
}
|
||||
|
||||
type ResourceGuardConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
}
|
||||
|
||||
type SentryConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
Environment string `yaml:"environment" json:"environment" default:"production"`
|
||||
}
|
||||
|
||||
type WatchdogConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
}
|
||||
|
||||
type GitopsConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
}
|
||||
|
||||
type CapabilitiesConfig struct {
|
||||
NetworkCapture []string `yaml:"networkCapture" json:"networkCapture" default:"[]"`
|
||||
ServiceMeshCapture []string `yaml:"serviceMeshCapture" json:"serviceMeshCapture" default:"[]"`
|
||||
EBPFCapture []string `yaml:"ebpfCapture" json:"ebpfCapture" default:"[]"`
|
||||
}
|
||||
|
||||
type MetricsConfig struct {
|
||||
Port uint16 `yaml:"port" json:"port" default:"49100"`
|
||||
}
|
||||
|
||||
type PprofConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
Port uint16 `yaml:"port" json:"port" default:"8000"`
|
||||
View string `yaml:"view" json:"view" default:"flamegraph"`
|
||||
}
|
||||
|
||||
type MiscConfig struct {
|
||||
JsonTTL string `yaml:"jsonTTL" json:"jsonTTL" default:"5m"`
|
||||
PcapTTL string `yaml:"pcapTTL" json:"pcapTTL" default:"0"`
|
||||
PcapErrorTTL string `yaml:"pcapErrorTTL" json:"pcapErrorTTL" default:"0"`
|
||||
TrafficSampleRate int `yaml:"trafficSampleRate" json:"trafficSampleRate" default:"100"`
|
||||
TcpStreamChannelTimeoutMs int `yaml:"tcpStreamChannelTimeoutMs" json:"tcpStreamChannelTimeoutMs" default:"10000"`
|
||||
TcpStreamChannelTimeoutShow bool `yaml:"tcpStreamChannelTimeoutShow" json:"tcpStreamChannelTimeoutShow" default:"false"`
|
||||
ResolutionStrategy string `yaml:"resolutionStrategy" json:"resolutionStrategy" default:"auto"`
|
||||
DuplicateTimeframe string `yaml:"duplicateTimeframe" json:"duplicateTimeframe" default:"200ms"`
|
||||
DetectDuplicates bool `yaml:"detectDuplicates" json:"detectDuplicates" default:"false"`
|
||||
StaleTimeoutSeconds int `yaml:"staleTimeoutSeconds" json:"staleTimeoutSeconds" default:"30"`
|
||||
}
|
||||
|
||||
type PcapDumpConfig struct {
|
||||
PcapDumpEnabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
PcapTimeInterval string `yaml:"timeInterval" json:"timeInterval" default:"1m"`
|
||||
PcapMaxTime string `yaml:"maxTime" json:"maxTime" default:"1h"`
|
||||
PcapMaxSize string `yaml:"maxSize" json:"maxSize" default:"500MB"`
|
||||
PcapTime string `yaml:"time" json:"time" default:"time"`
|
||||
PcapDebug bool `yaml:"debug" json:"debug" default:"false"`
|
||||
PcapDest string `yaml:"dest" json:"dest" default:""`
|
||||
}
|
||||
|
||||
type PortMapping struct {
|
||||
HTTP []uint16 `yaml:"http" json:"http"`
|
||||
AMQP []uint16 `yaml:"amqp" json:"amqp"`
|
||||
KAFKA []uint16 `yaml:"kafka" json:"kafka"`
|
||||
REDIS []uint16 `yaml:"redis" json:"redis"`
|
||||
LDAP []uint16 `yaml:"ldap" json:"ldap"`
|
||||
DIAMETER []uint16 `yaml:"diameter" json:"diameter"`
|
||||
}
|
||||
|
||||
type SecurityContextConfig struct {
|
||||
Privileged bool `yaml:"privileged" json:"privileged" default:"true"`
|
||||
AppArmorProfile AppArmorProfileConfig `yaml:"appArmorProfile" json:"appArmorProfile"`
|
||||
SeLinuxOptions SeLinuxOptionsConfig `yaml:"seLinuxOptions" json:"seLinuxOptions"`
|
||||
Capabilities CapabilitiesConfig `yaml:"capabilities" json:"capabilities"`
|
||||
}
|
||||
|
||||
type AppArmorProfileConfig struct {
|
||||
Type string `yaml:"type" json:"type"`
|
||||
LocalhostProfile string `yaml:"localhostProfile" json:"localhostProfile"`
|
||||
}
|
||||
|
||||
type SeLinuxOptionsConfig struct {
|
||||
Level string `yaml:"level" json:"level"`
|
||||
Role string `yaml:"role" json:"role"`
|
||||
Type string `yaml:"type" json:"type"`
|
||||
User string `yaml:"user" json:"user"`
|
||||
}
|
||||
|
||||
type RawCaptureConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"true"`
|
||||
StorageSize string `yaml:"storageSize" json:"storageSize" default:"1Gi"`
|
||||
}
|
||||
|
||||
type SnapshotsConfig struct {
|
||||
StorageClass string `yaml:"storageClass" json:"storageClass" default:""`
|
||||
StorageSize string `yaml:"storageSize" json:"storageSize" default:"20Gi"`
|
||||
}
|
||||
|
||||
type DelayedDissectionConfig struct {
|
||||
Image string `yaml:"image" json:"image" default:"kubeshark/worker:master"`
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"1"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"4Gi"`
|
||||
}
|
||||
|
||||
type DissectionConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"true"`
|
||||
StopAfter string `yaml:"stopAfter" json:"stopAfter" default:"5m"`
|
||||
}
|
||||
|
||||
type CaptureConfig struct {
|
||||
Dissection DissectionConfig `yaml:"dissection" json:"dissection"`
|
||||
CaptureSelf bool `yaml:"captureSelf" json:"captureSelf" default:"false"`
|
||||
Raw RawCaptureConfig `yaml:"raw" json:"raw"`
|
||||
DbMaxSize string `yaml:"dbMaxSize" json:"dbMaxSize" default:"500Mi"`
|
||||
}
|
||||
|
||||
type TapConfig struct {
|
||||
Docker DockerConfig `yaml:"docker" json:"docker"`
|
||||
Proxy ProxyConfig `yaml:"proxy" json:"proxy"`
|
||||
PodRegexStr string `yaml:"regex" json:"regex" default:".*"`
|
||||
Namespaces []string `yaml:"namespaces" json:"namespaces" default:"[]"`
|
||||
Release ReleaseConfig `yaml:"release" json:"release"`
|
||||
PersistentStorage bool `yaml:"persistentstorage" json:"persistentstorage" default:"false"`
|
||||
StorageLimit string `yaml:"storagelimit" json:"storagelimit" default:"200Mi"`
|
||||
StorageClass string `yaml:"storageclass" json:"storageclass" default:"standard"`
|
||||
DryRun bool `yaml:"dryrun" json:"dryrun" default:"false"`
|
||||
Pcap string `yaml:"pcap" json:"pcap" default:""`
|
||||
Resources ResourcesConfig `yaml:"resources" json:"resources"`
|
||||
ServiceMesh bool `yaml:"servicemesh" json:"servicemesh" default:"true"`
|
||||
Tls bool `yaml:"tls" json:"tls" default:"true"`
|
||||
PacketCapture string `yaml:"packetcapture" json:"packetcapture" default:"libpcap"`
|
||||
IgnoreTainted bool `yaml:"ignoretainted" json:"ignoretainted" default:"false"`
|
||||
Labels map[string]string `yaml:"labels" json:"labels" default:"{}"`
|
||||
Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"`
|
||||
NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeselectorterms" json:"nodeselectorterms" default:"[]"`
|
||||
Auth AuthConfig `yaml:"auth" json:"auth"`
|
||||
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
|
||||
IPv6 bool `yaml:"ipv6" json:"ipv6" default:"true"`
|
||||
Debug bool `yaml:"debug" json:"debug" default:"false"`
|
||||
Telemetry TelemetryConfig `yaml:"telemetry" json:"telemetry"`
|
||||
Docker DockerConfig `yaml:"docker" json:"docker"`
|
||||
Proxy ProxyConfig `yaml:"proxy" json:"proxy"`
|
||||
PodRegexStr string `yaml:"regex" json:"regex" default:".*"`
|
||||
Namespaces []string `yaml:"namespaces" json:"namespaces" default:"[]"`
|
||||
ExcludedNamespaces []string `yaml:"excludedNamespaces" json:"excludedNamespaces" default:"[]"`
|
||||
BpfOverride string `yaml:"bpfOverride" json:"bpfOverride" default:""`
|
||||
Capture CaptureConfig `yaml:"capture" json:"capture"`
|
||||
DelayedDissection DelayedDissectionConfig `yaml:"delayedDissection" json:"delayedDissection"`
|
||||
Snapshots SnapshotsConfig `yaml:"snapshots" json:"snapshots"`
|
||||
Release ReleaseConfig `yaml:"release" json:"release"`
|
||||
PersistentStorage bool `yaml:"persistentStorage" json:"persistentStorage" default:"false"`
|
||||
PersistentStorageStatic bool `yaml:"persistentStorageStatic" json:"persistentStorageStatic" default:"false"`
|
||||
PersistentStoragePvcVolumeMode string `yaml:"persistentStoragePvcVolumeMode" json:"persistentStoragePvcVolumeMode" default:"FileSystem"`
|
||||
EfsFileSytemIdAndPath string `yaml:"efsFileSytemIdAndPath" json:"efsFileSytemIdAndPath" default:""`
|
||||
Secrets []string `yaml:"secrets" json:"secrets" default:"[]"`
|
||||
StorageLimit string `yaml:"storageLimit" json:"storageLimit" default:"10Gi"`
|
||||
StorageClass string `yaml:"storageClass" json:"storageClass" default:"standard"`
|
||||
DryRun bool `yaml:"dryRun" json:"dryRun" default:"false"`
|
||||
DnsConfig DnsConfig `yaml:"dns" json:"dns"`
|
||||
Resources ResourcesConfig `yaml:"resources" json:"resources"`
|
||||
Probes ProbesConfig `yaml:"probes" json:"probes"`
|
||||
ServiceMesh bool `yaml:"serviceMesh" json:"serviceMesh" default:"true"`
|
||||
Tls bool `yaml:"tls" json:"tls" default:"true"`
|
||||
DisableTlsLog bool `yaml:"disableTlsLog" json:"disableTlsLog" default:"true"`
|
||||
PacketCapture string `yaml:"packetCapture" json:"packetCapture" default:"best"`
|
||||
Labels map[string]string `yaml:"labels" json:"labels" default:"{}"`
|
||||
Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"`
|
||||
NodeSelectorTerms NodeSelectorTermsConfig `yaml:"nodeSelectorTerms" json:"nodeSelectorTerms" default:"{}"`
|
||||
Tolerations TolerationsConfig `yaml:"tolerations" json:"tolerations" default:"{}"`
|
||||
Auth AuthConfig `yaml:"auth" json:"auth"`
|
||||
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
|
||||
PriorityClass string `yaml:"priorityClass" json:"priorityClass" default:""`
|
||||
Routing RoutingConfig `yaml:"routing" json:"routing"`
|
||||
IPv6 bool `yaml:"ipv6" json:"ipv6" default:"true"`
|
||||
Debug bool `yaml:"debug" json:"debug" default:"false"`
|
||||
Dashboard DashboardConfig `yaml:"dashboard" json:"dashboard"`
|
||||
Telemetry TelemetryConfig `yaml:"telemetry" json:"telemetry"`
|
||||
ResourceGuard ResourceGuardConfig `yaml:"resourceGuard" json:"resourceGuard"`
|
||||
Watchdog WatchdogConfig `yaml:"watchdog" json:"watchdog"`
|
||||
Gitops GitopsConfig `yaml:"gitops" json:"gitops"`
|
||||
Sentry SentryConfig `yaml:"sentry" json:"sentry"`
|
||||
DefaultFilter string `yaml:"defaultFilter" json:"defaultFilter" default:""`
|
||||
LiveConfigMapChangesDisabled bool `yaml:"liveConfigMapChangesDisabled" json:"liveConfigMapChangesDisabled" default:"false"`
|
||||
GlobalFilter string `yaml:"globalFilter" json:"globalFilter" default:""`
|
||||
EnabledDissectors []string `yaml:"enabledDissectors" json:"enabledDissectors"`
|
||||
PortMapping PortMapping `yaml:"portMapping" json:"portMapping"`
|
||||
CustomMacros map[string]string `yaml:"customMacros" json:"customMacros" default:"{\"https\":\"tls and (http or http2)\"}"`
|
||||
Metrics MetricsConfig `yaml:"metrics" json:"metrics"`
|
||||
Pprof PprofConfig `yaml:"pprof" json:"pprof"`
|
||||
Misc MiscConfig `yaml:"misc" json:"misc"`
|
||||
SecurityContext SecurityContextConfig `yaml:"securityContext" json:"securityContext"`
|
||||
MountBpf bool `yaml:"mountBpf" json:"mountBpf" default:"true"`
|
||||
HostNetwork bool `yaml:"hostNetwork" json:"hostNetwork" default:"true"`
|
||||
}
|
||||
|
||||
func (config *TapConfig) PodRegex() *regexp.Regexp {
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
hub = "hub"
|
||||
worker = "worker"
|
||||
front = "front"
|
||||
)
|
||||
|
||||
var (
|
||||
registry = "docker.io/kubeshark/"
|
||||
tag = "latest"
|
||||
)
|
||||
|
||||
func GetRegistry() string {
|
||||
return registry
|
||||
}
|
||||
|
||||
func SetRegistry(value string) {
|
||||
if strings.HasPrefix(value, "docker.io/kubeshark") {
|
||||
registry = "docker.io/kubeshark/"
|
||||
} else {
|
||||
registry = value
|
||||
}
|
||||
}
|
||||
|
||||
func GetTag() string {
|
||||
return tag
|
||||
}
|
||||
|
||||
func SetTag(value string) {
|
||||
tag = value
|
||||
}
|
||||
|
||||
func getImage(image string) string {
|
||||
return fmt.Sprintf("%s%s:%s", registry, image, tag)
|
||||
}
|
||||
|
||||
func GetHubImage() string {
|
||||
return getImage(hub)
|
||||
}
|
||||
|
||||
func GetWorkerImage() string {
|
||||
return getImage(worker)
|
||||
}
|
||||
|
||||
func GetFrontImage() string {
|
||||
return getImage(front)
|
||||
}
|
||||
@@ -12,14 +12,14 @@ import (
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
// formatError wraps error with a detailed message that is meant for the user.
|
||||
// FormatError wraps error with a detailed message that is meant for the user.
|
||||
// While the errors are meant to be displayed, they are not meant to be exported as classes outsite of CLI.
|
||||
func FormatError(err error) error {
|
||||
var errorNew error
|
||||
if k8serrors.IsForbidden(err) {
|
||||
errorNew = fmt.Errorf("insufficient permissions: %w. "+
|
||||
"supply the required permission or control %s's access to namespaces by setting %s "+
|
||||
"in the config file or setting the targeted namespace with --%s %s=<NAMEPSACE>",
|
||||
"in the config file or setting the targeted namespace with --%s %s=<NAMESPACE>",
|
||||
err,
|
||||
misc.Software,
|
||||
configStructs.ReleaseNamespaceLabel,
|
||||
|
||||
224
go.mod
224
go.mod
@@ -1,185 +1,143 @@
|
||||
module github.com/kubeshark/kubeshark
|
||||
|
||||
go 1.19
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.5
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2 v1.18.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.27
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.70
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.35.0
|
||||
github.com/creasty/defaults v1.5.2
|
||||
github.com/docker/docker v20.10.24+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
github.com/go-cmd/cmd v1.4.3
|
||||
github.com/goccy/go-yaml v1.11.2
|
||||
github.com/google/go-github/v37 v37.0.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
|
||||
github.com/kubeshark/gopacket v1.1.39
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rivo/tview v0.0.0-20240818110301-fd649dbf1223
|
||||
github.com/robertkrimen/otto v0.2.1
|
||||
github.com/rs/zerolog v1.28.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
helm.sh/helm/v3 v3.12.0
|
||||
k8s.io/api v0.27.1
|
||||
k8s.io/apimachinery v0.27.1
|
||||
k8s.io/client-go v0.27.1
|
||||
k8s.io/kubectl v0.27.1
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/tanqiangyes/grep-go v0.0.0-20220515134556-b36bff9c3d8e
|
||||
helm.sh/helm/v3 v3.18.4
|
||||
k8s.io/api v0.33.2
|
||||
k8s.io/apimachinery v0.33.2
|
||||
k8s.io/client-go v0.33.2
|
||||
k8s.io/kubectl v0.33.2
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/BurntSushi/toml v1.2.1 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.2.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.3 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.26 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.29 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.28 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.19.2 // indirect
|
||||
github.com/aws/smithy-go v1.13.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bytedance/sonic v1.9.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/containerd/containerd v1.7.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/cli v20.10.21+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/containerd/containerd v1.7.27 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/gdamore/encoding v1.0.0 // indirect
|
||||
github.com/gdamore/tcell/v2 v2.7.1 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.0.5 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.1 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.3.5 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.16.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kubeshark/tracerproto v1.0.0 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/lib/pq v1.10.7 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rubenv/sql-migrate v1.3.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rubenv/sql-migrate v1.8.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.14.0 // indirect
|
||||
go.starlark.net v0.0.0-20220203230714-bb14e151c28f // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.9.0 // indirect
|
||||
golang.org/x/mod v0.9.0 // indirect
|
||||
golang.org/x/net v0.10.0 // indirect
|
||||
golang.org/x/oauth2 v0.4.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
golang.org/x/term v0.8.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
golang.org/x/tools v0.7.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
|
||||
google.golang.org/grpc v1.53.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
golang.org/x/crypto v0.39.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.28.0 // indirect
|
||||
golang.org/x/sync v0.15.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/grpc v1.68.1 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/sourcemap.v1 v1.0.5 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.27.1 // indirect
|
||||
k8s.io/apiserver v0.27.1 // indirect
|
||||
k8s.io/cli-runtime v0.27.1 // indirect
|
||||
k8s.io/component-base v0.27.1 // indirect
|
||||
k8s.io/klog/v2 v2.90.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect
|
||||
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
|
||||
oras.land/oras-go v1.2.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/api v0.13.2 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.1 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.33.2 // indirect
|
||||
k8s.io/apiserver v0.33.2 // indirect
|
||||
k8s.io/cli-runtime v0.33.2 // indirect
|
||||
k8s.io/component-base v0.33.2 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
oras.land/oras-go/v2 v2.6.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.19.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
apiVersion: v2
|
||||
appVersion: "50.2"
|
||||
name: kubeshark
|
||||
version: "52.12.0"
|
||||
description: The API Traffic Analyzer for Kubernetes
|
||||
home: https://kubeshark.co
|
||||
home: https://kubeshark.com
|
||||
keywords:
|
||||
- kubeshark
|
||||
- packet capture
|
||||
@@ -15,12 +16,10 @@ keywords:
|
||||
- api
|
||||
kubeVersion: '>= 1.16.0-0'
|
||||
maintainers:
|
||||
- email: info@kubeshark.co
|
||||
- email: support@kubeshark.com
|
||||
name: Kubeshark
|
||||
url: https://kubeshark.co
|
||||
name: kubeshark
|
||||
url: https://kubeshark.com
|
||||
sources:
|
||||
- https://github.com/kubeshark/kubeshark/tree/master/helm-chart
|
||||
type: application
|
||||
version: "50.2"
|
||||
icon: https://raw.githubusercontent.com/kubeshark/assets/master/logo/vector/logo.svg
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# Helm Chart of Kubeshark
|
||||
|
||||
## Officially
|
||||
## Official
|
||||
|
||||
Add the Helm repo for Kubeshark:
|
||||
|
||||
```shell
|
||||
helm repo add kubeshark https://helm.kubeshark.co
|
||||
helm repo add kubeshark https://helm.kubeshark.com
|
||||
```
|
||||
|
||||
then install Kubeshark:
|
||||
@@ -14,7 +14,7 @@ then install Kubeshark:
|
||||
helm install kubeshark kubeshark/kubeshark
|
||||
```
|
||||
|
||||
## Locally
|
||||
## Local
|
||||
|
||||
Clone the repo:
|
||||
|
||||
@@ -23,6 +23,14 @@ git clone git@github.com:kubeshark/kubeshark.git --depth 1
|
||||
cd kubeshark/helm-chart
|
||||
```
|
||||
|
||||
In case you want to clone a specific tag of the repo (e.g. `v52.3.59`):
|
||||
|
||||
```shell
|
||||
git clone git@github.com:kubeshark/kubeshark.git --depth 1 --branch <tag>
|
||||
cd kubeshark/helm-chart
|
||||
```
|
||||
> See the list of available tags here: https://github.com/kubeshark/kubeshark/tags
|
||||
|
||||
Render the templates
|
||||
|
||||
```shell
|
||||
@@ -41,38 +49,47 @@ Uninstall Kubeshark:
|
||||
helm uninstall kubeshark
|
||||
```
|
||||
|
||||
## Accesing
|
||||
## Port-forward
|
||||
|
||||
Do the port forwarding:
|
||||
|
||||
```shell
|
||||
kubectl port-forward -n kubeshark service/kubeshark-hub 8898:80 & \
|
||||
kubectl port-forward -n kubeshark service/kubeshark-front 8899:80
|
||||
kubectl port-forward service/kubeshark-front 8899:80
|
||||
```
|
||||
|
||||
Visit [localhost:8899](http://localhost:8899)
|
||||
|
||||
## Installing with Ingress Enabled
|
||||
You can also use `kubeshark proxy` for a more stable port-forward connection.
|
||||
|
||||
## Add a License Key
|
||||
|
||||
When it's necessary, you can use:
|
||||
|
||||
```shell
|
||||
helm install kubeshark kubeshark/kubeshark \
|
||||
--set tap.ingress.enabled=true \
|
||||
--set tap.ingress.host=ks.svc.cluster.local \
|
||||
--set "tap.ingress.approveddomains={gmail.com}" \
|
||||
--set license=LICENSE_GOES_HERE
|
||||
--set license=YOUR_LICENSE_GOES_HERE
|
||||
```
|
||||
|
||||
You can get your license [here](https://console.kubeshark.co/).
|
||||
Get your license from Kubeshark's [Admin Console](https://console.kubeshark.com/).
|
||||
|
||||
## Installing with Persistent Storage Enabled
|
||||
## Installing with Ingress (EKS) enabled
|
||||
|
||||
```shell
|
||||
helm install kubeshark kubeshark/kubeshark \
|
||||
--set tap.persistentstorage=true \
|
||||
--set license=LICENSE_GOES_HERE
|
||||
helm install kubeshark kubeshark/kubeshark -f values.yaml
|
||||
```
|
||||
|
||||
You can get your license [here](https://console.kubeshark.co/).
|
||||
Set this `value.yaml`:
|
||||
```shell
|
||||
tap:
|
||||
ingress:
|
||||
enabled: true
|
||||
className: "alb"
|
||||
host: ks.example.com
|
||||
tls: []
|
||||
annotations:
|
||||
alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:7..8:certificate/b...65c
|
||||
alb.ingress.kubernetes.io/target-type: ip
|
||||
alb.ingress.kubernetes.io/scheme: internet-facing
|
||||
```
|
||||
|
||||
## Disabling IPV6
|
||||
|
||||
@@ -82,3 +99,446 @@ Not all have IPV6 enabled, hence this has to be disabled as follows:
|
||||
helm install kubeshark kubeshark/kubeshark \
|
||||
--set tap.ipv6=false
|
||||
```
|
||||
|
||||
## Prometheus Metrics
|
||||
|
||||
Please refer to [metrics](./metrics.md) documentation for details.
|
||||
|
||||
## Override Tag, Tags, Images
|
||||
|
||||
In addition to using a private registry, you can further override the images' tag, specific image tags and specific image names.
|
||||
|
||||
Example for overriding image names:
|
||||
|
||||
```yaml
|
||||
docker:
|
||||
overrideImage:
|
||||
worker: docker.io/kubeshark/worker:v52.3.87
|
||||
front: docker.io/kubeshark/front:v52.3.87
|
||||
hub: docker.io/kubeshark/hub:v52.3.87
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|-------------------------------------------|-----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `tap.docker.registry` | Docker registry to pull from | `docker.io/kubeshark` |
|
||||
| `tap.docker.tag` | Tag of the Docker images | `latest` |
|
||||
| `tap.docker.tagLocked` | Lock the Docker image tags to prevent automatic upgrades to the latest branch image version. | `true` |
|
||||
| `tap.docker.tagLocked` | If `false` - use latest minor tag | `true` |
|
||||
| `tap.docker.imagePullPolicy` | Kubernetes image pull policy | `Always` |
|
||||
| `tap.docker.imagePullSecrets` | Kubernetes secrets to pull the images | `[]` |
|
||||
| `tap.docker.overrideImage` | Can be used to directly override image names | `""` |
|
||||
| `tap.docker.overrideTag` | Can be used to override image tags | `""` |
|
||||
| `tap.proxy.hub.srvPort` | Hub server port. Change if already occupied. | `8898` |
|
||||
| `tap.proxy.worker.srvPort` | Worker server port. Change if already occupied.| `48999` |
|
||||
| `tap.proxy.front.port` | Front service port. Change if already occupied.| `8899` |
|
||||
| `tap.proxy.host` | Change to 0.0.0.0 top open up to the world. | `127.0.0.1` |
|
||||
| `tap.regex` | Target (process traffic from) pods that match regex | `.*` |
|
||||
| `tap.namespaces` | Target pods in namespaces | `[]` |
|
||||
| `tap.excludedNamespaces` | Exclude pods in namespaces | `[]` |
|
||||
| `tap.bpfOverride` | When using AF_PACKET as a traffic capture backend, override any existing pod targeting rules and set explicit BPF expression (e.g. `net 0.0.0.0/0`). | `[]` |
|
||||
| `tap.capture.dissection.enabled` | Set to `true` to have L7 protocol dissection start automatically. When set to `false`, dissection is disabled by default. This property can be dynamically controlled via the dashboard. | `true` |
|
||||
| `tap.capture.dissection.stopAfter` | Set to a duration (e.g. `30s`) to have L7 dissection stop after no activity. | `5m` |
|
||||
| `tap.capture.raw.enabled` | Enable raw capture of packets and syscalls to disk for offline analysis | `true` |
|
||||
| `tap.capture.raw.storageSize` | Maximum storage size for raw capture files (supports K8s quantity format: `1Gi`, `500Mi`, etc.) | `1Gi` |
|
||||
| `tap.capture.dbMaxSize` | Maximum size for capture database (e.g., `4Gi`, `2000Mi`). When empty, automatically uses 80% of allocated storage (`tap.storageLimit`). | `""` |
|
||||
| `tap.snapshots.storageClass` | Storage class for snapshots volume. When empty, uses `emptyDir`. When set, creates a PVC with this storage class | `""` |
|
||||
| `tap.snapshots.storageSize` | Storage size for snapshots volume (supports K8s quantity format: `1Gi`, `500Mi`, etc.) | `10Gi` |
|
||||
| `tap.release.repo` | URL of the Helm chart repository | `https://helm.kubeshark.com` |
|
||||
| `tap.release.name` | Helm release name | `kubeshark` |
|
||||
| `tap.release.namespace` | Helm release namespace | `default` |
|
||||
| `tap.persistentStorage` | Use `persistentVolumeClaim` instead of `emptyDir` | `false` |
|
||||
| `tap.persistentStorageStatic` | Use static persistent volume provisioning (explicitly defined `PersistentVolume` ) | `false` |
|
||||
| `tap.persistentStoragePvcVolumeMode` | Set the pvc volume mode (Filesystem\|Block) | `Filesystem` |
|
||||
| `tap.efsFileSytemIdAndPath` | [EFS file system ID and, optionally, subpath and/or access point](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/examples/kubernetes/access_points/README.md) `<FileSystemId>:<Path>:<AccessPointId>` | "" |
|
||||
| `tap.storageLimit` | Limit of either the `emptyDir` or `persistentVolumeClaim` | `5Gi` |
|
||||
| `tap.storageClass` | Storage class of the `PersistentVolumeClaim` | `standard` |
|
||||
| `tap.dryRun` | Preview of all pods matching the regex, without tapping them | `false` |
|
||||
| `tap.dnsConfig.nameservers` | Nameservers to use for DNS resolution | `[]` |
|
||||
| `tap.dnsConfig.searches` | Search domains to use for DNS resolution | `[]` |
|
||||
| `tap.dnsConfig.options` | DNS options to use for DNS resolution | `[]` |
|
||||
| `tap.resources.hub.limits.cpu` | CPU limit for hub | `""` (no limit) |
|
||||
| `tap.resources.hub.limits.memory` | Memory limit for hub | `5Gi` |
|
||||
| `tap.resources.hub.requests.cpu` | CPU request for hub | `50m` |
|
||||
| `tap.resources.hub.requests.memory` | Memory request for hub | `50Mi` |
|
||||
| `tap.resources.sniffer.limits.cpu` | CPU limit for sniffer | `""` (no limit) |
|
||||
| `tap.resources.sniffer.limits.memory` | Memory limit for sniffer | `3Gi` |
|
||||
| `tap.resources.sniffer.requests.cpu` | CPU request for sniffer | `50m` |
|
||||
| `tap.resources.sniffer.requests.memory` | Memory request for sniffer | `50Mi` |
|
||||
| `tap.resources.tracer.limits.cpu` | CPU limit for tracer | `""` (no limit) |
|
||||
| `tap.resources.tracer.limits.memory` | Memory limit for tracer | `3Gi` |
|
||||
| `tap.resources.tracer.requests.cpu` | CPU request for tracer | `50m` |
|
||||
| `tap.resources.tracer.requests.memory` | Memory request for tracer | `50Mi` |
|
||||
| `tap.probes.hub.initialDelaySeconds` | Initial delay before probing the hub | `15` |
|
||||
| `tap.probes.hub.periodSeconds` | Period between probes for the hub | `10` |
|
||||
| `tap.probes.hub.successThreshold` | Number of successful probes before considering the hub healthy | `1` |
|
||||
| `tap.probes.hub.failureThreshold` | Number of failed probes before considering the hub unhealthy | `3` |
|
||||
| `tap.probes.sniffer.initialDelaySeconds` | Initial delay before probing the sniffer | `15` |
|
||||
| `tap.probes.sniffer.periodSeconds` | Period between probes for the sniffer | `10` |
|
||||
| `tap.probes.sniffer.successThreshold` | Number of successful probes before considering the sniffer healthy | `1` |
|
||||
| `tap.probes.sniffer.failureThreshold` | Number of failed probes before considering the sniffer unhealthy | `3` |
|
||||
| `tap.serviceMesh` | Capture traffic from service meshes like Istio, Linkerd, Consul, etc. | `true` |
|
||||
| `tap.tls` | Capture the encrypted/TLS traffic from cryptography libraries like OpenSSL | `true` |
|
||||
| `tap.disableTlsLog` | Suppress logging for TLS/eBPF | `true` |
|
||||
| `tap.labels` | Kubernetes labels to apply to all Kubeshark resources | `{}` |
|
||||
| `tap.annotations` | Kubernetes annotations to apply to all Kubeshark resources | `{}` |
|
||||
| `tap.nodeSelectorTerms.workers` | Node selector terms for workers components | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.nodeSelectorTerms.hub` | Node selector terms for hub component | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.nodeSelectorTerms.front` | Node selector terms for front-end component | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.priorityClass` | Priority class name for Kubeshark components | `""` |
|
||||
| `tap.tolerations.workers` | Tolerations for workers components | `[ {"operator": "Exists", "effect": "NoExecute"}` |
|
||||
| `tap.tolerations.hub` | Tolerations for hub component | `[]` |
|
||||
| `tap.tolerations.front` | Tolerations for front-end component | `[]` |
|
||||
| `tap.auth.enabled` | Enable authentication | `false` |
|
||||
| `tap.auth.type` | Authentication type (1 option available: `saml`) | `saml` |
|
||||
| `tap.auth.approvedEmails` | List of approved email addresses for authentication | `[]` |
|
||||
| `tap.auth.approvedDomains` | List of approved email domains for authentication | `[]` |
|
||||
| `tap.auth.saml.idpMetadataUrl` | SAML IDP metadata URL <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.x509crt` | A self-signed X.509 `.cert` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.x509key` | A self-signed X.509 `.key` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.roleAttribute` | A SAML attribute name corresponding to user's authorization role <br/>(effective, if `tap.auth.type = saml`) | `role` |
|
||||
| `tap.auth.saml.roles` | A list of SAML authorization roles and their permissions <br/>(effective, if `tap.auth.type = saml`) | `{"admin":{"canDownloadPCAP":true,"canUpdateTargetedPods":true,"canUseScripting":true, "scriptingPermissions":{"canSave":true, "canActivate":true, "canDelete":true}, "canStopTrafficCapturing":true, "canControlDissection":true, "filter":"","showAdminConsoleLink":true}}` |
|
||||
| `tap.ingress.enabled` | Enable `Ingress` | `false` |
|
||||
| `tap.ingress.className` | Ingress class name | `""` |
|
||||
| `tap.ingress.host` | Host of the `Ingress` | `ks.svc.cluster.local` |
|
||||
| `tap.ingress.tls` | `Ingress` TLS configuration | `[]` |
|
||||
| `tap.ingress.annotations` | `Ingress` annotations | `{}` |
|
||||
| `tap.routing.front.basePath` | Set this value to serve `front` under specific base path. Example: `/custompath` (forward slash must be present) | `""` |
|
||||
| `tap.ipv6` | Enable IPv6 support for the front-end | `true` |
|
||||
| `tap.debug` | Enable debug mode | `false` |
|
||||
| `tap.telemetry.enabled` | Enable anonymous usage statistics collection | `true` |
|
||||
| `tap.resourceGuard.enabled` | Enable resource guard worker process, which watches RAM/disk usage and enables/disables traffic capture based on available resources | `false` |
|
||||
| `tap.secrets` | List of secrets to be used as source for environment variables (e.g. `kubeshark-license`) | `[]` |
|
||||
| `tap.sentry.enabled` | Enable sending of error logs to Sentry | `true` (only for qualified users) |
|
||||
| `tap.sentry.environment` | Sentry environment to label error logs with | `production` |
|
||||
| `tap.defaultFilter` | Sets the default dashboard KFL filter (e.g. `http`). By default, this value is set to filter out noisy protocols such as DNS, UDP, ICMP and TCP. The user can easily change this, **temporarily**, in the Dashboard. For a permanent change, you should change this value in the `values.yaml` or `config.yaml` file. | `""` |
|
||||
| `tap.liveConfigMapChangesDisabled` | If set to `true`, all user functionality (scripting, targeting settings, global & default KFL modification, traffic recording, traffic capturing on/off, protocol dissectors) involving dynamic ConfigMap changes from UI will be disabled | `false` |
|
||||
| `tap.globalFilter` | Prepends to any KFL filter and can be used to limit what is visible in the dashboard. For example, `redact("request.headers.Authorization")` will redact the appropriate field. Another example `!dns` will not show any DNS traffic. | `""` |
|
||||
| `tap.metrics.port` | Pod port used to expose Prometheus metrics | `49100` |
|
||||
| `tap.enabledDissectors` | This is an array of strings representing the list of supported protocols. Remove or comment out redundant protocols (e.g., dns).| The default list excludes: `udp` and `tcp` |
|
||||
| `tap.mountBpf` | BPF filesystem needs to be mounted for eBPF to work properly. This helm value determines whether Kubeshark will attempt to mount the filesystem. This option is not required if filesystem is already mounts. │ `true`|
|
||||
| `tap.hostNetwork` | Enable host network mode for worker DaemonSet pods. When enabled, worker pods use the host's network namespace for direct network access. | `true` |
|
||||
| `tap.gitops.enabled` | Enable GitOps functionality. This will allow you to use GitOps to manage your Kubeshark configuration. | `false` |
|
||||
| `logs.file` | Logs dump path | `""` |
|
||||
| `pcapdump.enabled` | Enable recording of all traffic captured according to other parameters. Whatever Kubeshark captures, considering pod targeting rules, will be stored in pcap files ready to be viewed by tools | `false` |
|
||||
| `pcapdump.maxTime` | The time window into the past that will be stored. Older traffic will be discarded. | `2h` |
|
||||
| `pcapdump.maxSize` | The maximum storage size the PCAP files will consume. Old files that cause to surpass storage consumption will get discarded. | `500MB` |
|
||||
| `kube.configPath` | Path to the `kubeconfig` file (`$HOME/.kube/config`) | `""` |
|
||||
| `kube.context` | Kubernetes context to use for the deployment | `""` |
|
||||
| `dumpLogs` | Enable dumping of logs | `false` |
|
||||
| `headless` | Enable running in headless mode | `false` |
|
||||
| `license` | License key for the Pro/Enterprise edition | `""` |
|
||||
| `scripting.enabled` | Enables scripting | `false` |
|
||||
| `scripting.env` | Environment variables for the scripting | `{}` |
|
||||
| `scripting.source` | Source directory of the scripts | `""` |
|
||||
| `scripting.watchScripts` | Enable watch mode for the scripts in source directory | `true` |
|
||||
| `timezone` | IANA time zone applied to time shown in the front-end | `""` (local time zone applies) |
|
||||
| `supportChatEnabled` | Enable real-time support chat channel based on Intercom | `false` |
|
||||
| `internetConnectivity` | Turns off API requests that are dependent on Internet connectivity such as `telemetry` and `online-support`. | `true` |
|
||||
|
||||
KernelMapping pairs kernel versions with a
|
||||
DriverContainer image. Kernel versions can be matched
|
||||
literally or using a regular expression
|
||||
|
||||
# Installing with SAML enabled
|
||||
|
||||
### Prerequisites:
|
||||
|
||||
##### 1. Generate X.509 certificate & key (TL;DR: https://ubuntu.com/server/docs/security-certificates)
|
||||
|
||||
**Example:**
|
||||
```
|
||||
openssl genrsa -out mykey.key 2048
|
||||
openssl req -new -key mykey.key -out mycsr.csr
|
||||
openssl x509 -signkey mykey.key -in mycsr.csr -req -days 365 -out mycert.crt
|
||||
```
|
||||
|
||||
**What you get:**
|
||||
- `mycert.crt` - use it for `tap.auth.saml.x509crt`
|
||||
- `mykey.key` - use it for `tap.auth.saml.x509crt`
|
||||
|
||||
##### 2. Prepare your SAML IDP
|
||||
|
||||
You should set up the required SAML IDP (Google, Auth0, your custom IDP, etc.)
|
||||
|
||||
During setup, an IDP provider will typically request to enter:
|
||||
- Metadata URL
|
||||
- ACS URL (Assertion Consumer Service URL, aka Callback URL)
|
||||
- SLO URL (Single Logout URL)
|
||||
|
||||
Correspondingly, you will enter these (if you run the most default Kubeshark setup):
|
||||
- [http://localhost:8899/saml/metadata](http://localhost:8899/saml/metadata)
|
||||
- [http://localhost:8899/saml/acs](http://localhost:8899/saml/acs)
|
||||
- [http://localhost:8899/saml/slo](http://localhost:8899/saml/slo)
|
||||
|
||||
Otherwise, if you have `tap.ingress.enabled == true`, change protocol & domain respectively - showing example domain:
|
||||
- [https://kubeshark.example.com/saml/metadata](https://kubeshark.example.com/saml/metadata)
|
||||
- [https://kubeshark.example.com/saml/acs](https://kubeshark.example.com/saml/acs)
|
||||
- [https://kubeshark.example.com/saml/slo](https://kubeshark.example.com/saml/slo)
|
||||
|
||||
```shell
|
||||
helm install kubeshark kubeshark/kubeshark -f values.yaml
|
||||
```
|
||||
|
||||
Set this `value.yaml`:
|
||||
```shell
|
||||
tap:
|
||||
auth:
|
||||
enabled: true
|
||||
type: saml
|
||||
saml:
|
||||
idpMetadataUrl: "https://ti..th0.com/samlp/metadata/MpWiDCM..qdnDG"
|
||||
x509crt: |
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDlTCCAn0CFFRUzMh+dZvp+FvWd4gRaiBVN8EvMA0GCSqGSIb3DQEBCwUAMIGG
|
||||
MSQwIgYJKoZIhvcNAQkBFhV3ZWJtYXN0ZXJAZXhhbXBsZS5jb20wHhcNMjMxMjI4
|
||||
........<redacted: please, generate your own X.509 cert>........
|
||||
ZMzM7YscqZwoVhTOhrD4/5nIfOD/hTWG/MBe2Um1V1IYF8aVEllotTKTgsF6ZblA
|
||||
miCOgl6lIlZy
|
||||
-----END CERTIFICATE-----
|
||||
x509key: |
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDlgDFKsRHj+mok
|
||||
euOF0IpwToOEpQGtafB75ytv3psD/tQAzEIug+rkDriVvsfcvafj0qcaTeYvnCoz
|
||||
........<redacted: please, generate your own X.509 key>.........
|
||||
sUpBCu0E3nRJM/QB2ui5KhNR7uvPSL+kSsaEq19/mXqsL+mRi9aqy2wMEvUSU/kt
|
||||
UaV5sbRtTzYLxpOSQyi8CEFA+A==
|
||||
-----END PRIVATE KEY-----
|
||||
```
|
||||
|
||||
# Installing with Dex OIDC authentication
|
||||
|
||||
[**Click here to see full docs**](https://docs.kubeshark.com/en/saml#installing-with-oidc-enabled-dex-idp).
|
||||
|
||||
Choose this option, if **you already have a running instance** of Dex in your cluster &
|
||||
you want to set up Dex OIDC authentication for Kubeshark users.
|
||||
|
||||
Kubeshark supports authentication using [Dex - A Federated OpenID Connect Provider](https://dexidp.io/).
|
||||
Dex is an abstraction layer designed for integrating a wide variety of Identity Providers.
|
||||
|
||||
**Requirement:**
|
||||
Your Dex IdP must have a publicly accessible URL.
|
||||
|
||||
### Pre-requisites:
|
||||
|
||||
**1. If you configured Ingress for Kubeshark:**
|
||||
|
||||
(see section: "Installing with Ingress (EKS) enabled")
|
||||
|
||||
OAuth2 callback URL is: <br/>
|
||||
`https://<kubeshark-ingress-hostname>/api/oauth2/callback`
|
||||
|
||||
**2. If you did not configure Ingress for Kubeshark:**
|
||||
|
||||
OAuth2 callback URL is: <br/>
|
||||
`http://0.0.0.0:8899/api/oauth2/callback`
|
||||
|
||||
Use chosen OAuth2 callback URL to replace `<your-kubeshark-host>` in Step 3.
|
||||
|
||||
**3. Add this static client to your Dex IdP configuration (`config.yaml`):**
|
||||
```yaml
|
||||
staticClients:
|
||||
- id: kubeshark
|
||||
secret: create your own client password
|
||||
name: Kubeshark
|
||||
redirectURIs:
|
||||
- https://<your-kubeshark-host>/api/oauth2/callback
|
||||
```
|
||||
|
||||
**Final step:**
|
||||
|
||||
Add these helm values to set up OIDC authentication powered by your Dex IdP:
|
||||
|
||||
```yaml
|
||||
# values.yaml
|
||||
|
||||
tap:
|
||||
auth:
|
||||
enabled: true
|
||||
type: dex
|
||||
dexOidc:
|
||||
issuer: <put Dex IdP issuer URL here>
|
||||
clientId: kubeshark
|
||||
clientSecret: create your own client password
|
||||
refreshTokenLifetime: "3960h" # 165 days
|
||||
oauth2StateParamExpiry: "10m"
|
||||
bypassSslCaCheck: false
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Note:**<br/>
|
||||
Set `tap.auth.dexOidc.bypassSslCaCheck: true`
|
||||
to allow Kubeshark communication with Dex IdP having an unknown SSL Certificate Authority.
|
||||
|
||||
This setting allows you to prevent such SSL CA-related errors:<br/>
|
||||
`tls: failed to verify certificate: x509: certificate signed by unknown authority`
|
||||
|
||||
---
|
||||
|
||||
Once you run `helm install kubeshark kubeshark/kubeshark -f ./values.yaml`, Kubeshark will be installed with (Dex) OIDC authentication enabled.
|
||||
|
||||
---
|
||||
|
||||
# Installing your own Dex IdP along with Kubeshark
|
||||
|
||||
Choose this option, if **you need to deploy an instance of Dex IdP** along with Kubeshark &
|
||||
set up Dex OIDC authentication for Kubeshark users.
|
||||
|
||||
Depending on Ingress enabled/disabled, your Dex configuration might differ.
|
||||
|
||||
**Requirement:**
|
||||
Please, configure Ingress using `tap.ingress` for your Kubeshark installation. For example:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
ingress:
|
||||
enabled: true
|
||||
className: "alb"
|
||||
host: ks.example.com
|
||||
tls: []
|
||||
annotations:
|
||||
alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:7..8:certificate/b...65c
|
||||
alb.ingress.kubernetes.io/target-type: ip
|
||||
alb.ingress.kubernetes.io/scheme: internet-facing
|
||||
```
|
||||
|
||||
The following Dex settings will have these values:
|
||||
|
||||
| Setting | Value |
|
||||
|-------------------------------------------------------|----------------------------------------------|
|
||||
| `tap.auth.dexOidc.issuer` | `https://ks.example.com/dex` |
|
||||
| `tap.auth.dexConfig.issuer` | `https://ks.example.com/dex` |
|
||||
| `tap.auth.dexConfig.staticClients -> redirectURIs` | `https://ks.example.com/api/oauth2/callback` |
|
||||
| `tap.auth.dexConfig.connectors -> config.redirectURI` | `https://ks.example.com/dex/callback` |
|
||||
|
||||
---
|
||||
|
||||
### Before proceeding with Dex IdP installation:
|
||||
|
||||
Please, make sure to prepare the following things first.
|
||||
|
||||
1. Choose **[Connectors](https://dexidp.io/docs/connectors/)** to enable in Dex IdP.
|
||||
- i.e. how many kind of "Log in with ..." options you'd like to offer your users
|
||||
- You will need to specify connectors in `tap.auth.dexConfig.connectors`
|
||||
2. Choose type of **[Storage](https://dexidp.io/docs/configuration/storage/)** to use in Dex IdP.
|
||||
- You will need to specify storage settings in `tap.auth.dexConfig.storage`
|
||||
- default: `memory`
|
||||
3. Decide on the OAuth2 `?state=` param expiration time:
|
||||
- field: `tap.auth.dexOidc.oauth2StateParamExpiry`
|
||||
- default: `10m` (10 minutes)
|
||||
- valid time units are `s`, `m`, `h`
|
||||
4. Decide on the refresh token expiration:
|
||||
- field 1: `tap.auth.dexOidc.expiry.refreshTokenLifetime`
|
||||
- field 2: `tap.auth.dexConfig.expiry.refreshTokens.absoluteLifetime`
|
||||
- default: `3960h` (165 days)
|
||||
- valid time units are `s`, `m`, `h`
|
||||
5. Create a unique & secure password to set in these fields:
|
||||
- field 1: `tap.auth.dexOidc.clientSecret`
|
||||
- field 2: `tap.auth.dexConfig.staticClients -> secret`
|
||||
- password must be the same for these 2 fields
|
||||
6. Discover more possibilities of **[Dex Configuration](https://dexidp.io/docs/configuration/)**
|
||||
- if you decide to include more configuration options, make sure to add them into `tap.auth.dexConfig`
|
||||
---
|
||||
|
||||
### Once you are ready with all the points described above:
|
||||
|
||||
Use these helm `values.yaml` fields to:
|
||||
- Deploy your own instance of Dex IdP along with Kubeshark
|
||||
- Enable OIDC authentication for Kubeshark users
|
||||
|
||||
Make sure to:
|
||||
- Replace `<your-ingress-hostname>` with a correct Kubeshark Ingress host (`tap.auth.ingress.host`).
|
||||
- refer to section **Installing with Ingress (EKS) enabled** to find out how you can configure Ingress host.
|
||||
|
||||
Helm `values.yaml`:
|
||||
```yaml
|
||||
tap:
|
||||
auth:
|
||||
enabled: true
|
||||
type: dex
|
||||
dexOidc:
|
||||
issuer: https://<your-ingress-hostname>/dex
|
||||
|
||||
# Client ID/secret must be taken from `tap.auth.dexConfig.staticClients -> id/secret`
|
||||
clientId: kubeshark
|
||||
clientSecret: create your own client password
|
||||
|
||||
refreshTokenLifetime: "3960h" # 165 days
|
||||
oauth2StateParamExpiry: "10m"
|
||||
bypassSslCaCheck: false
|
||||
dexConfig:
|
||||
# This field is REQUIRED!
|
||||
#
|
||||
# The base path of Dex and the external name of the OpenID Connect service.
|
||||
# This is the canonical URL that all clients MUST use to refer to Dex. If a
|
||||
# path is provided, Dex's HTTP service will listen at a non-root URL.
|
||||
issuer: https://<your-ingress-hostname>/dex
|
||||
|
||||
# Expiration configuration for tokens, signing keys, etc.
|
||||
expiry:
|
||||
refreshTokens:
|
||||
validIfNotUsedFor: "2160h" # 90 days
|
||||
absoluteLifetime: "3960h" # 165 days
|
||||
|
||||
# This field is REQUIRED!
|
||||
#
|
||||
# The storage configuration determines where Dex stores its state.
|
||||
# See the documentation (https://dexidp.io/docs/storage/) for further information.
|
||||
storage:
|
||||
type: memory
|
||||
|
||||
# This field is REQUIRED!
|
||||
#
|
||||
# Attention:
|
||||
# Do not change this field and its values.
|
||||
# This field is required for internal Kubeshark-to-Dex communication.
|
||||
#
|
||||
# HTTP service configuration
|
||||
web:
|
||||
http: 0.0.0.0:5556
|
||||
|
||||
# This field is REQUIRED!
|
||||
#
|
||||
# Attention:
|
||||
# Do not change this field and its values.
|
||||
# This field is required for internal Kubeshark-to-Dex communication.
|
||||
#
|
||||
# Telemetry configuration
|
||||
telemetry:
|
||||
http: 0.0.0.0:5558
|
||||
|
||||
# This field is REQUIRED!
|
||||
#
|
||||
# Static clients registered in Dex by default.
|
||||
staticClients:
|
||||
- id: kubeshark
|
||||
secret: create your own client password
|
||||
name: Kubeshark
|
||||
redirectURIs:
|
||||
- https://<your-ingress-hostname>/api/oauth2/callback
|
||||
|
||||
# Enable the password database.
|
||||
# It's a "virtual" connector (identity provider) that stores
|
||||
# login credentials in Dex's store.
|
||||
enablePasswordDB: true
|
||||
|
||||
# Connectors are used to authenticate users against upstream identity providers.
|
||||
# See the documentation (https://dexidp.io/docs/connectors/) for further information.
|
||||
#
|
||||
# Attention:
|
||||
# When you define a new connector, `config.redirectURI` must be:
|
||||
# https://<your-ingress-hostname>/dex/callback
|
||||
#
|
||||
# Example with Google connector:
|
||||
# connectors:
|
||||
# - type: google
|
||||
# id: google
|
||||
# name: Google
|
||||
# config:
|
||||
# clientID: your Google Cloud Auth app client ID
|
||||
# clientSecret: your Google Auth app client ID
|
||||
# redirectURI: https://<your-ingress-hostname>/dex/callback
|
||||
connectors: []
|
||||
```
|
||||
|
||||
56
helm-chart/metrics.md
Normal file
56
helm-chart/metrics.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# Metrics
|
||||
|
||||
Kubeshark provides metrics from `worker` components.
|
||||
It can be useful for monitoring and debugging purpose.
|
||||
|
||||
## Configuration
|
||||
|
||||
By default, Kubeshark uses port `49100` to expose metrics via service `kubeshark-worker-metrics`.
|
||||
|
||||
In case you use [kube-prometheus-stack] (https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) community Helm chart, additional scrape configuration for Kubeshark worker metrics endpoint can be configured with values:
|
||||
|
||||
```
|
||||
prometheus:
|
||||
enabled: true
|
||||
prometheusSpec:
|
||||
additionalScrapeConfigs: |
|
||||
- job_name: 'kubeshark-worker-metrics'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
target_label: pod
|
||||
- source_labels: [__meta_kubernetes_pod_node_name]
|
||||
target_label: node
|
||||
- source_labels: [__meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: ^metrics$
|
||||
- source_labels: [__address__, __meta_kubernetes_endpoint_port_number]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?
|
||||
replacement: $1:49100
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
```
|
||||
|
||||
|
||||
## Available metrics
|
||||
|
||||
| Name | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| kubeshark_received_packets_total | Counter | Total number of packets received |
|
||||
| kubeshark_dropped_packets_total | Counter | Total number of packets dropped |
|
||||
| kubeshark_dropped_chunks_total | Counter | Total number of dropped packet chunks |
|
||||
| kubeshark_processed_bytes_total | Counter | Total number of bytes processed |
|
||||
| kubeshark_tcp_packets_total | Counter | Total number of TCP packets |
|
||||
| kubeshark_dns_packets_total | Counter | Total number of DNS packets |
|
||||
| kubeshark_icmp_packets_total | Counter | Total number of ICMP packets |
|
||||
| kubeshark_reassembled_tcp_payloads_total | Counter | Total number of reassembled TCP payloads |
|
||||
| kubeshark_matched_pairs_total | Counter | Total number of matched pairs |
|
||||
| kubeshark_dropped_tcp_streams_total | Counter | Total number of dropped TCP streams |
|
||||
| kubeshark_live_tcp_streams | Gauge | Number of live TCP streams |
|
||||
|
||||
## Ready-to-use Dashboard
|
||||
|
||||
You can import a ready-to-use dashboard from [Grafana's Dashboards Portal](https://grafana.com/grafana/dashboards/21332-kubeshark-dashboard-v3-4/).
|
||||
@@ -4,9 +4,15 @@ kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "kubeshark.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -4,11 +4,11 @@ kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-cluster-role
|
||||
name: kubeshark-cluster-role-{{ .Release.Namespace }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -16,6 +16,7 @@ rules:
|
||||
- extensions
|
||||
- apps
|
||||
resources:
|
||||
- nodes
|
||||
- pods
|
||||
- services
|
||||
- endpoints
|
||||
@@ -24,3 +25,70 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-self-config-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
- v1
|
||||
resourceNames:
|
||||
- kubeshark-secret
|
||||
- kubeshark-config-map
|
||||
- kubeshark-secret-default
|
||||
- kubeshark-config-map-default
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
- v1
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
- pods/log
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- "*"
|
||||
@@ -1,6 +1,26 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-cluster-role-binding-{{ .Release.Namespace }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubeshark-cluster-role-{{ .Release.Namespace }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "kubeshark.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
@@ -8,12 +28,12 @@ metadata:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-cluster-role-binding
|
||||
name: kubeshark-self-config-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubeshark-cluster-role
|
||||
kind: Role
|
||||
name: kubeshark-self-config-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "kubeshark.serviceAccountName" . }}
|
||||
|
||||
@@ -1,58 +1,193 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "kubeshark.fullname" . }}-hub
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "kubeshark.name" . }}-hub
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
sidecar.istio.io/inject: "false"
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
{{- if .Values.tap.priorityClass }}
|
||||
priorityClassName: {{ .Values.tap.priorityClass | quote }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: kubeshark-hub
|
||||
- name: hub
|
||||
command:
|
||||
- ./hub
|
||||
{{ .Values.tap.debug | ternary "- -debug" "" }}
|
||||
- -port
|
||||
- "8080"
|
||||
- -loglevel
|
||||
- '{{ .Values.logLevel | default "warning" }}'
|
||||
- -capture-stop-after
|
||||
- "{{ if hasKey .Values.tap.capture.dissection "stopAfter" }}{{ .Values.tap.capture.dissection.stopAfter }}{{ else }}5m{{ end }}"
|
||||
- -snapshot-size-limit
|
||||
- '{{ .Values.tap.snapshots.storageSize }}'
|
||||
{{- if .Values.tap.delayedDissection.image }}
|
||||
- -dissector-image
|
||||
- '{{ .Values.tap.delayedDissection.image }}'
|
||||
{{- end }}
|
||||
{{- if .Values.tap.delayedDissection.cpu }}
|
||||
- -dissector-cpu
|
||||
- '{{ .Values.tap.delayedDissection.cpu }}'
|
||||
{{- end }}
|
||||
{{- if .Values.tap.delayedDissection.memory }}
|
||||
- -dissector-memory
|
||||
- '{{ .Values.tap.delayedDissection.memory }}'
|
||||
{{- end }}
|
||||
{{- if .Values.tap.gitops.enabled }}
|
||||
- -gitops
|
||||
{{- end }}
|
||||
- -cloud-api-url
|
||||
- '{{ .Values.cloudApiUrl }}'
|
||||
{{- if .Values.tap.secrets }}
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: kubeshark-config-map
|
||||
{{- range .Values.tap.secrets }}
|
||||
- secretRef:
|
||||
name: kubeshark-secret
|
||||
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}'
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
|
||||
name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: SENTRY_ENABLED
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: '{{ .Values.tap.sentry.environment }}'
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
{{- if .Values.tap.docker.overrideImage.hub }}
|
||||
image: '{{ .Values.tap.docker.overrideImage.hub }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.hub }}
|
||||
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.overrideTag.hub }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/hub:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: {{ .Values.tap.probes.hub.periodSeconds }}
|
||||
failureThreshold: {{ .Values.tap.probes.hub.failureThreshold }}
|
||||
successThreshold: {{ .Values.tap.probes.hub.successThreshold }}
|
||||
initialDelaySeconds: {{ .Values.tap.probes.hub.initialDelaySeconds }}
|
||||
tcpSocket:
|
||||
port: 80
|
||||
port: 8080
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: {{ .Values.tap.probes.hub.periodSeconds }}
|
||||
failureThreshold: {{ .Values.tap.probes.hub.failureThreshold }}
|
||||
successThreshold: {{ .Values.tap.probes.hub.successThreshold }}
|
||||
initialDelaySeconds: {{ .Values.tap.probes.hub.initialDelaySeconds }}
|
||||
tcpSocket:
|
||||
port: 80
|
||||
port: 8080
|
||||
resources:
|
||||
limits:
|
||||
{{ if ne (toString .Values.tap.resources.hub.limits.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.hub.limits.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.hub.limits.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.hub.limits.memory }}
|
||||
{{ end }}
|
||||
requests:
|
||||
{{ if ne (toString .Values.tap.resources.hub.requests.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.hub.requests.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.hub.requests.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.hub.requests.memory }}
|
||||
{{ end }}
|
||||
volumeMounts:
|
||||
- name: saml-x509-volume
|
||||
mountPath: "/etc/saml/x509"
|
||||
readOnly: true
|
||||
- name: snapshots-volume
|
||||
mountPath: "/app/data/snapshots"
|
||||
{{- if gt (len .Values.tap.nodeSelectorTerms.hub) 0}}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
{{- toYaml .Values.tap.nodeSelectorTerms.hub | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.tap.dns.nameservers .Values.tap.dns.searches .Values.tap.dns.options }}
|
||||
dnsConfig:
|
||||
{{- if .Values.tap.dns.nameservers }}
|
||||
nameservers:
|
||||
{{- range .Values.tap.dns.nameservers }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.searches }}
|
||||
searches:
|
||||
{{- range .Values.tap.dns.searches }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.options }}
|
||||
options:
|
||||
{{- range .Values.tap.dns.options }}
|
||||
- name: {{ .name | quote }}
|
||||
{{- if .value }}
|
||||
value: {{ .value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.tolerations.hub }}
|
||||
tolerations:
|
||||
{{- range .Values.tap.tolerations.hub }}
|
||||
- key: {{ .key | quote }}
|
||||
operator: {{ .operator | quote }}
|
||||
{{- if .value }}
|
||||
value: {{ .value | quote }}
|
||||
{{- end }}
|
||||
{{- if .effect }}
|
||||
effect: {{ .effect | quote }}
|
||||
{{- end }}
|
||||
{{- if .tolerationSeconds }}
|
||||
tolerationSeconds: {{ .tolerationSeconds }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: saml-x509-volume
|
||||
projected:
|
||||
sources:
|
||||
- secret:
|
||||
name: kubeshark-saml-x509-crt-secret
|
||||
items:
|
||||
- key: AUTH_SAML_X509_CRT
|
||||
path: kubeshark.crt
|
||||
- secret:
|
||||
name: kubeshark-saml-x509-key-secret
|
||||
items:
|
||||
- key: AUTH_SAML_X509_KEY
|
||||
path: kubeshark.key
|
||||
- name: snapshots-volume
|
||||
{{- if .Values.tap.snapshots.storageClass }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ include "kubeshark.name" . }}-snapshots-pvc
|
||||
{{- else }}
|
||||
emptyDir:
|
||||
sizeLimit: {{ .Values.tap.snapshots.storageSize }}
|
||||
{{- end }}
|
||||
|
||||
@@ -3,10 +3,10 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-hub
|
||||
@@ -15,9 +15,7 @@ spec:
|
||||
ports:
|
||||
- name: kubeshark-hub
|
||||
port: 80
|
||||
targetPort: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
||||
|
||||
@@ -1,35 +1,111 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "kubeshark.fullname" . }}-front
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
app.kubeshark.com/app: front
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "kubeshark.name" . }}-front
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: front
|
||||
app.kubeshark.com/app: front
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
app.kubeshark.com/app: front
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: REACT_APP_DEFAULT_FILTER
|
||||
value: ' '
|
||||
- name: REACT_APP_HUB_HOST
|
||||
value: ' '
|
||||
- name: REACT_APP_HUB_PORT
|
||||
value: '{{ .Values.tap.ingress.enabled | ternary "/api" (print ":" .Values.tap.proxy.hub.port) }}'
|
||||
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}'
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
|
||||
- name: REACT_APP_AUTH_ENABLED
|
||||
value: '{{- if or (and .Values.cloudLicenseEnabled (not (empty .Values.license))) (not .Values.internetConnectivity) -}}
|
||||
{{ (and .Values.tap.auth.enabled (eq .Values.tap.auth.type "dex")) | ternary true false }}
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled | ternary "true" .Values.tap.auth.enabled }}
|
||||
{{- end }}'
|
||||
- name: REACT_APP_AUTH_TYPE
|
||||
value: '{{- if and .Values.cloudLicenseEnabled (not (eq .Values.tap.auth.type "dex")) -}}
|
||||
default
|
||||
{{- else -}}
|
||||
{{ .Values.tap.auth.type }}
|
||||
{{- end }}'
|
||||
- name: REACT_APP_COMPLETE_STREAMING_ENABLED
|
||||
value: '{{- if and (hasKey .Values.tap "dashboard") (hasKey .Values.tap.dashboard "completeStreamingEnabled") -}}
|
||||
{{ eq .Values.tap.dashboard.completeStreamingEnabled true | ternary "true" "false" }}
|
||||
{{- else -}}
|
||||
true
|
||||
{{- end }}'
|
||||
- name: REACT_APP_STREAMING_TYPE
|
||||
value: '{{ default "" (((.Values).tap).dashboard).streamingType }}'
|
||||
- name: REACT_APP_AUTH_SAML_IDP_METADATA_URL
|
||||
value: '{{ not (eq .Values.tap.auth.saml.idpMetadataUrl "") | ternary .Values.tap.auth.saml.idpMetadataUrl " " }}'
|
||||
- name: REACT_APP_TIMEZONE
|
||||
value: '{{ not (eq .Values.timezone "") | ternary .Values.timezone " " }}'
|
||||
- name: REACT_APP_SCRIPTING_HIDDEN
|
||||
value: '{{- if and .Values.scripting (eq (.Values.scripting.enabled | toString) "false") -}}
|
||||
true
|
||||
{{- else -}}
|
||||
false
|
||||
{{- end }}'
|
||||
- name: REACT_APP_SCRIPTING_DISABLED
|
||||
value: '{{- if .Values.tap.liveConfigMapChangesDisabled -}}
|
||||
{{- if .Values.demoModeEnabled -}}
|
||||
{{ .Values.demoModeEnabled | ternary false true }}
|
||||
{{- else -}}
|
||||
true
|
||||
{{- end }}
|
||||
{{- else -}}
|
||||
false
|
||||
{{- end }}'
|
||||
- name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled }}'
|
||||
- name: REACT_APP_PRESET_FILTERS_CHANGING_ENABLED
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
- name: REACT_APP_BPF_OVERRIDE_DISABLED
|
||||
value: '{{ eq .Values.tap.packetCapture "af_packet" | ternary "false" "true" }}'
|
||||
- name: REACT_APP_RECORDING_DISABLED
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled }}'
|
||||
- name: REACT_APP_DISSECTION_ENABLED
|
||||
value: '{{ .Values.tap.capture.dissection.enabled | ternary "true" "false" }}'
|
||||
- name: REACT_APP_DISSECTION_CONTROL_ENABLED
|
||||
value: '{{- if and .Values.tap.liveConfigMapChangesDisabled (not .Values.tap.capture.dissection.enabled) -}}
|
||||
true
|
||||
{{- else -}}
|
||||
{{ not .Values.tap.liveConfigMapChangesDisabled | ternary "true" "false" }}
|
||||
{{- end -}}'
|
||||
- name: 'REACT_APP_CLOUD_LICENSE_ENABLED'
|
||||
value: '{{- if or (and .Values.cloudLicenseEnabled (not (empty .Values.license))) (not .Values.internetConnectivity) -}}
|
||||
"false"
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled }}
|
||||
{{- end }}'
|
||||
- name: REACT_APP_SUPPORT_CHAT_ENABLED
|
||||
value: '{{ and .Values.supportChatEnabled .Values.internetConnectivity | ternary "true" "false" }}'
|
||||
- name: REACT_APP_BETA_ENABLED
|
||||
value: '{{ default false .Values.betaEnabled | ternary "true" "false" }}'
|
||||
- name: REACT_APP_DISSECTORS_UPDATING_ENABLED
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
- name: REACT_APP_RAW_CAPTURE_ENABLED
|
||||
value: '{{ .Values.tap.capture.raw.enabled | ternary "true" "false" }}'
|
||||
- name: REACT_APP_SENTRY_ENABLED
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: REACT_APP_SENTRY_ENVIRONMENT
|
||||
value: '{{ .Values.tap.sentry.environment }}'
|
||||
{{- if .Values.tap.docker.overrideImage.front }}
|
||||
image: '{{ .Values.tap.docker.overrideImage.front }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.front }}
|
||||
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.overrideTag.front }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/front:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
name: kubeshark-front
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
@@ -37,14 +113,14 @@ spec:
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
tcpSocket:
|
||||
port: 80
|
||||
port: 8080
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
tcpSocket:
|
||||
port: 80
|
||||
port: 8080
|
||||
timeoutSeconds: 1
|
||||
resources:
|
||||
limits:
|
||||
@@ -58,9 +134,59 @@ spec:
|
||||
mountPath: /etc/nginx/conf.d/default.conf
|
||||
subPath: default.conf
|
||||
readOnly: true
|
||||
{{- if gt (len .Values.tap.nodeSelectorTerms.front) 0}}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
{{- toYaml .Values.tap.nodeSelectorTerms.front | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.tap.dns.nameservers .Values.tap.dns.searches .Values.tap.dns.options }}
|
||||
dnsConfig:
|
||||
{{- if .Values.tap.dns.nameservers }}
|
||||
nameservers:
|
||||
{{- range .Values.tap.dns.nameservers }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.searches }}
|
||||
searches:
|
||||
{{- range .Values.tap.dns.searches }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.options }}
|
||||
options:
|
||||
{{- range .Values.tap.dns.options }}
|
||||
- name: {{ .name | quote }}
|
||||
{{- if .value }}
|
||||
value: {{ .value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.tolerations.front }}
|
||||
tolerations:
|
||||
{{- range .Values.tap.tolerations.front }}
|
||||
- key: {{ .key | quote }}
|
||||
operator: {{ .operator | quote }}
|
||||
{{- if .value }}
|
||||
value: {{ .value | quote }}
|
||||
{{- end }}
|
||||
{{- if .effect }}
|
||||
effect: {{ .effect | quote }}
|
||||
{{- end }}
|
||||
{{- if .tolerationSeconds }}
|
||||
tolerationSeconds: {{ .tolerationSeconds }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: nginx-config
|
||||
configMap:
|
||||
name: kubeshark-nginx-config-map
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
{{- if .Values.tap.priorityClass }}
|
||||
priorityClassName: {{ .Values.tap.priorityClass | quote }}
|
||||
{{- end }}
|
||||
|
||||
@@ -4,8 +4,8 @@ kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-front
|
||||
@@ -14,9 +14,7 @@ spec:
|
||||
ports:
|
||||
- name: kubeshark-front
|
||||
port: 80
|
||||
targetPort: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubeshark.co/app: front
|
||||
app.kubeshark.com/app: front
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
||||
|
||||
@@ -1,22 +1,44 @@
|
||||
---
|
||||
{{- if .Values.tap.persistentstorage }}
|
||||
{{- if .Values.tap.persistentStorageStatic }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: kubeshark-persistent-volume
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
capacity:
|
||||
storage: {{ .Values.tap.storageLimit }}
|
||||
volumeMode: Filesystem
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: {{ .Values.tap.storageClass }}
|
||||
{{- if .Values.tap.efsFileSytemIdAndPath }}
|
||||
csi:
|
||||
driver: efs.csi.aws.com
|
||||
volumeHandle: {{ .Values.tap.efsFileSytemIdAndPath }}
|
||||
{{ end }}
|
||||
---
|
||||
{{ end }}
|
||||
{{- if .Values.tap.persistentStorage }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-persistent-volume-claim
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
volumeMode: {{ .Values.tap.persistentStoragePvcVolumeMode }}
|
||||
accessModes:
|
||||
- ReadWriteMany
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.tap.storagelimit }}
|
||||
storageClassName: {{ .Values.tap.storageclass }}
|
||||
storage: {{ .Values.tap.storageLimit }}
|
||||
storageClassName: {{ .Values.tap.storageClass }}
|
||||
status: {}
|
||||
{{- end }}
|
||||
|
||||
22
helm-chart/templates/09-snapshots-pvc.yaml
Normal file
22
helm-chart/templates/09-snapshots-pvc.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
{{- if .Values.tap.snapshots.storageClass }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "kubeshark.name" . }}-snapshots-pvc
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.tap.snapshots.storageSize }}
|
||||
storageClassName: {{ .Values.tap.snapshots.storageClass }}
|
||||
status: {}
|
||||
{{- end }}
|
||||
@@ -3,11 +3,11 @@ apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
sidecar.istio.io/inject: "false"
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-worker-daemon-set
|
||||
@@ -15,76 +15,204 @@ metadata:
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: worker
|
||||
{{- include "kubeshark.labels" . | nindent 6 }}
|
||||
app.kubeshark.com/app: worker
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: kubeshark
|
||||
spec:
|
||||
{{- if or .Values.tap.mountBpf .Values.tap.persistentStorage}}
|
||||
initContainers:
|
||||
{{- end }}
|
||||
{{- if .Values.tap.mountBpf }}
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mkdir -p /sys/fs/bpf && mount | grep -q '/sys/fs/bpf' || mount -t bpf bpf /sys/fs/bpf
|
||||
{{- if .Values.tap.docker.overrideTag.worker }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
name: mount-bpf
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /sys
|
||||
name: sys
|
||||
mountPropagation: Bidirectional
|
||||
{{- end }}
|
||||
{{- if .Values.tap.persistentStorage }}
|
||||
- command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mkdir -p /app/data/$NODE_NAME && rm -rf /app/data/$NODE_NAME/tracer_*
|
||||
{{- if .Values.tap.docker.overrideTag.worker }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
name: cleanup-data-dir
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- mountPath: /app/data
|
||||
name: data
|
||||
{{- end }}
|
||||
containers:
|
||||
- command:
|
||||
- ./worker
|
||||
- -i
|
||||
- any
|
||||
- -port
|
||||
- '{{ .Values.tap.proxy.worker.srvport }}'
|
||||
- '{{ .Values.tap.proxy.worker.srvPort }}'
|
||||
- -metrics-port
|
||||
- '{{ .Values.tap.metrics.port }}'
|
||||
- -packet-capture
|
||||
- '{{ .Values.tap.packetCapture }}'
|
||||
- -loglevel
|
||||
- '{{ .Values.logLevel | default "warning" }}'
|
||||
{{- if not .Values.tap.tls }}
|
||||
- -disable-tracer
|
||||
{{- end }}
|
||||
{{- if .Values.tap.serviceMesh }}
|
||||
- -servicemesh
|
||||
{{ .Values.tap.tls | ternary "- -tls" "" }}
|
||||
{{- end }}
|
||||
- -procfs
|
||||
- /hostproc
|
||||
{{ .Values.tap.debug | ternary "- -debug" "" }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}'
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
|
||||
name: kubeshark-worker-daemon-set
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: kubeshark-secret
|
||||
{{- if .Values.tap.debug }}
|
||||
env:
|
||||
- name: PROFILING_ENABLED
|
||||
value: "true"
|
||||
- name: PROFILING_DUMP_PATH
|
||||
value: "pprof"
|
||||
- name: PROFILING_INTERVAL_SECONDS
|
||||
value: "60"
|
||||
{{- if .Values.tap.resourceGuard.enabled }}
|
||||
- -enable-resource-guard
|
||||
{{- end }}
|
||||
{{- if .Values.tap.watchdog.enabled }}
|
||||
- -enable-watchdog
|
||||
{{- end }}
|
||||
- -resolution-strategy
|
||||
- '{{ .Values.tap.misc.resolutionStrategy }}'
|
||||
- -staletimeout
|
||||
- '{{ .Values.tap.misc.staleTimeoutSeconds }}'
|
||||
- -storage-size
|
||||
- '{{ .Values.tap.storageLimit }}'
|
||||
- -capture-db-max-size
|
||||
- '{{ .Values.tap.capture.dbMaxSize }}'
|
||||
- -cloud-api-url
|
||||
- '{{ .Values.cloudApiUrl }}'
|
||||
{{- if .Values.tap.docker.overrideImage.worker }}
|
||||
image: '{{ .Values.tap.docker.overrideImage.worker }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.worker }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
name: sniffer
|
||||
ports:
|
||||
- containerPort: {{ .Values.tap.metrics.port }}
|
||||
protocol: TCP
|
||||
name: metrics
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: TCP_STREAM_CHANNEL_TIMEOUT_MS
|
||||
value: '{{ .Values.tap.misc.tcpStreamChannelTimeoutMs }}'
|
||||
- name: TCP_STREAM_CHANNEL_TIMEOUT_SHOW
|
||||
value: '{{ .Values.tap.misc.tcpStreamChannelTimeoutShow }}'
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
- name: SENTRY_ENABLED
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: '{{ .Values.tap.sentry.environment }}'
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ .Values.tap.resources.worker.limits.cpu }}
|
||||
memory: {{ .Values.tap.resources.worker.limits.memory }}
|
||||
{{ if ne (toString .Values.tap.resources.sniffer.limits.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.sniffer.limits.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.sniffer.limits.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.sniffer.limits.memory }}
|
||||
{{ end }}
|
||||
requests:
|
||||
cpu: {{ .Values.tap.resources.worker.requests.cpu }}
|
||||
memory: {{ .Values.tap.resources.worker.requests.memory }}
|
||||
{{ if ne (toString .Values.tap.resources.sniffer.requests.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.sniffer.requests.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.sniffer.requests.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.sniffer.requests.memory }}
|
||||
{{ end }}
|
||||
securityContext:
|
||||
privileged: {{ .Values.tap.securityContext.privileged }}
|
||||
{{- if not .Values.tap.securityContext.privileged }}
|
||||
{{- $aaProfile := .Values.tap.securityContext.appArmorProfile }}
|
||||
{{- $selinuxOpts := .Values.tap.securityContext.seLinuxOptions }}
|
||||
{{- if or (ne $aaProfile.type "") (ne $aaProfile.localhostProfile "") }}
|
||||
appArmorProfile:
|
||||
{{- if ne $aaProfile.type "" }}
|
||||
type: {{ $aaProfile.type }}
|
||||
{{- end }}
|
||||
{{- if ne $aaProfile.localhostProfile "" }}
|
||||
localhostProfile: {{ $aaProfile.localhostProfile }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or (ne $selinuxOpts.level "") (ne $selinuxOpts.role "") (ne $selinuxOpts.type "") (ne $selinuxOpts.user "") }}
|
||||
seLinuxOptions:
|
||||
{{- if ne $selinuxOpts.level "" }}
|
||||
level: {{ $selinuxOpts.level }}
|
||||
{{- end }}
|
||||
{{- if ne $selinuxOpts.role "" }}
|
||||
role: {{ $selinuxOpts.role }}
|
||||
{{- end }}
|
||||
{{- if ne $selinuxOpts.type "" }}
|
||||
type: {{ $selinuxOpts.type }}
|
||||
{{- end }}
|
||||
{{- if ne $selinuxOpts.user "" }}
|
||||
user: {{ $selinuxOpts.user }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
capabilities:
|
||||
add:
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- DAC_OVERRIDE
|
||||
- SYS_RESOURCE
|
||||
- SYS_MODULE
|
||||
{{- range .Values.tap.securityContext.capabilities.networkCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.serviceMesh }}
|
||||
{{- range .Values.tap.securityContext.capabilities.serviceMeshCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.securityContext.capabilities.ebpfCapture }}
|
||||
{{- range .Values.tap.securityContext.capabilities.ebpfCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
drop:
|
||||
- ALL
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: {{ .Values.tap.probes.sniffer.periodSeconds }}
|
||||
failureThreshold: {{ .Values.tap.probes.sniffer.failureThreshold }}
|
||||
successThreshold: {{ .Values.tap.probes.sniffer.successThreshold }}
|
||||
initialDelaySeconds: {{ .Values.tap.probes.sniffer.initialDelaySeconds }}
|
||||
tcpSocket:
|
||||
port: {{ .Values.tap.proxy.worker.srvport }}
|
||||
port: {{ .Values.tap.proxy.worker.srvPort }}
|
||||
livenessProbe:
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: {{ .Values.tap.probes.sniffer.periodSeconds }}
|
||||
failureThreshold: {{ .Values.tap.probes.sniffer.failureThreshold }}
|
||||
successThreshold: {{ .Values.tap.probes.sniffer.successThreshold }}
|
||||
initialDelaySeconds: {{ .Values.tap.probes.sniffer.initialDelaySeconds }}
|
||||
tcpSocket:
|
||||
port: {{ .Values.tap.proxy.worker.srvport }}
|
||||
port: {{ .Values.tap.proxy.worker.srvPort }}
|
||||
volumeMounts:
|
||||
- mountPath: /hostproc
|
||||
name: proc
|
||||
@@ -92,28 +220,171 @@ spec:
|
||||
- mountPath: /sys
|
||||
name: sys
|
||||
readOnly: true
|
||||
{{- if .Values.tap.persistentstorage }}
|
||||
mountPropagation: HostToContainer
|
||||
- mountPath: /app/data
|
||||
name: kubeshark-persistent-volume
|
||||
{{- end }}
|
||||
name: data
|
||||
{{- if .Values.tap.tls }}
|
||||
- command:
|
||||
- ./tracer
|
||||
- -procfs
|
||||
- /hostproc
|
||||
{{- if .Values.tap.disableTlsLog }}
|
||||
- -disable-tls-log
|
||||
{{- end }}
|
||||
{{- if .Values.tap.pprof.enabled }}
|
||||
- -port
|
||||
- '{{ add .Values.tap.proxy.worker.srvPort 1 }}'
|
||||
{{- end }}
|
||||
- -loglevel
|
||||
- '{{ .Values.logLevel | default "warning" }}'
|
||||
{{- if .Values.tap.docker.overrideTag.worker }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
name: tracer
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
- name: SENTRY_ENABLED
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: '{{ .Values.tap.sentry.environment }}'
|
||||
resources:
|
||||
limits:
|
||||
{{ if ne (toString .Values.tap.resources.tracer.limits.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.tracer.limits.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.tracer.limits.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.tracer.limits.memory }}
|
||||
{{ end }}
|
||||
requests:
|
||||
{{ if ne (toString .Values.tap.resources.tracer.requests.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.tracer.requests.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.tracer.requests.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.tracer.requests.memory }}
|
||||
{{ end }}
|
||||
securityContext:
|
||||
privileged: {{ .Values.tap.securityContext.privileged }}
|
||||
{{- if not .Values.tap.securityContext.privileged }}
|
||||
{{- $aaProfile := .Values.tap.securityContext.appArmorProfile }}
|
||||
{{- $selinuxOpts := .Values.tap.securityContext.seLinuxOptions }}
|
||||
{{- if or (ne $aaProfile.type "") (ne $aaProfile.localhostProfile "") }}
|
||||
appArmorProfile:
|
||||
{{- if ne $aaProfile.type "" }}
|
||||
type: {{ $aaProfile.type }}
|
||||
{{- end }}
|
||||
{{- if ne $aaProfile.localhostProfile "" }}
|
||||
localhostProfile: {{ $aaProfile.localhostProfile }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or (ne $selinuxOpts.level "") (ne $selinuxOpts.role "") (ne $selinuxOpts.type "") (ne $selinuxOpts.user "") }}
|
||||
seLinuxOptions:
|
||||
{{- if ne $selinuxOpts.level "" }}
|
||||
level: {{ $selinuxOpts.level }}
|
||||
{{- end }}
|
||||
{{- if ne $selinuxOpts.role "" }}
|
||||
role: {{ $selinuxOpts.role }}
|
||||
{{- end }}
|
||||
{{- if ne $selinuxOpts.type "" }}
|
||||
type: {{ $selinuxOpts.type }}
|
||||
{{- end }}
|
||||
{{- if ne $selinuxOpts.user "" }}
|
||||
user: {{ $selinuxOpts.user }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
capabilities:
|
||||
add:
|
||||
{{- range .Values.tap.securityContext.capabilities.ebpfCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
{{- range .Values.tap.securityContext.capabilities.networkCapture }}
|
||||
{{ print "- " . }}
|
||||
{{- end }}
|
||||
drop:
|
||||
- ALL
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /hostproc
|
||||
name: proc
|
||||
readOnly: true
|
||||
- mountPath: /sys
|
||||
name: sys
|
||||
readOnly: true
|
||||
mountPropagation: HostToContainer
|
||||
- mountPath: /app/data
|
||||
name: data
|
||||
- mountPath: /etc/os-release
|
||||
name: os-release
|
||||
readOnly: true
|
||||
- mountPath: /hostroot
|
||||
mountPropagation: HostToContainer
|
||||
name: root
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
hostNetwork: true
|
||||
hostNetwork: {{ .Values.tap.hostNetwork }}
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
terminationGracePeriodSeconds: 0
|
||||
{{- if .Values.tap.priorityClass }}
|
||||
priorityClassName: {{ .Values.tap.priorityClass | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.tolerations.workers }}
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
{{- if not .Values.tap.ignoretainted }}
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
{{- end }}
|
||||
{{- if gt (len .Values.tap.nodeselectorterms) 0}}
|
||||
{{- range .Values.tap.tolerations.workers }}
|
||||
- key: {{ .key | quote }}
|
||||
operator: {{ .operator | quote }}
|
||||
{{- if .value }}
|
||||
value: {{ .value | quote }}
|
||||
{{- end }}
|
||||
{{- if .effect }}
|
||||
effect: {{ .effect | quote }}
|
||||
{{- end }}
|
||||
{{- if .tolerationSeconds }}
|
||||
tolerationSeconds: {{ .tolerationSeconds }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if gt (len .Values.tap.nodeSelectorTerms.workers) 0}}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
{{- toYaml .Values.tap.nodeselectorterms | nindent 12 }}
|
||||
{{- toYaml .Values.tap.nodeSelectorTerms.workers | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.tap.dns.nameservers .Values.tap.dns.searches .Values.tap.dns.options }}
|
||||
dnsConfig:
|
||||
{{- if .Values.tap.dns.nameservers }}
|
||||
nameservers:
|
||||
{{- range .Values.tap.dns.nameservers }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.searches }}
|
||||
searches:
|
||||
{{- range .Values.tap.dns.searches }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.options }}
|
||||
options:
|
||||
{{- range .Values.tap.dns.options }}
|
||||
- name: {{ .name | quote }}
|
||||
{{- if .value }}
|
||||
value: {{ .value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /proc
|
||||
@@ -121,8 +392,22 @@ spec:
|
||||
- hostPath:
|
||||
path: /sys
|
||||
name: sys
|
||||
{{- if .Values.tap.persistentstorage }}
|
||||
- name: kubeshark-persistent-volume
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- hostPath:
|
||||
path: /etc/os-release
|
||||
name: os-release
|
||||
{{- if .Values.tap.tls }}
|
||||
- hostPath:
|
||||
path: /
|
||||
name: root
|
||||
{{- end }}
|
||||
- name: data
|
||||
{{- if .Values.tap.persistentStorage }}
|
||||
persistentVolumeClaim:
|
||||
claimName: kubeshark-persistent-volume-claim
|
||||
{{- else }}
|
||||
emptyDir:
|
||||
sizeLimit: {{ .Values.tap.storageLimit }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
---
|
||||
{{- if .Values.tap.ingress.enabled }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-ingress-class
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
controller: {{ .Values.tap.ingress.controller }}
|
||||
{{- end }}
|
||||
@@ -4,37 +4,34 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
certmanager.k8s.io/cluster-issuer: {{ .Values.tap.ingress.certmanager }}
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$2
|
||||
{{- if .Values.tap.annotations }}
|
||||
nginx.org/websocket-services: "kubeshark-front"
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.ingress.annotations }}
|
||||
{{- toYaml .Values.tap.ingress.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
name: kubeshark-ingress
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
ingressClassName: {{ .Values.tap.ingress.classname }}
|
||||
{{- if .Values.tap.ingress.className }}
|
||||
ingressClassName: {{ .Values.tap.ingress.className }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- host: {{ .Values.tap.ingress.host }}
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: kubeshark-hub
|
||||
port:
|
||||
number: 80
|
||||
path: /api(/|$)(.*)
|
||||
pathType: Prefix
|
||||
- backend:
|
||||
service:
|
||||
name: kubeshark-front
|
||||
port:
|
||||
number: 80
|
||||
path: /()(.*)
|
||||
path: {{ default "/" (((.Values).tap).ingress).path }}
|
||||
pathType: Prefix
|
||||
{{- if .Values.tap.ingress.tls }}
|
||||
tls:
|
||||
{{- if gt (len .Values.tap.ingress.tls) 0}}
|
||||
{{- toYaml .Values.tap.ingress.tls | nindent 2 }}
|
||||
{{- end }}
|
||||
status:
|
||||
89
helm-chart/templates/11-nginx-config-map.yaml
Normal file
89
helm-chart/templates/11-nginx-config-map.yaml
Normal file
@@ -0,0 +1,89 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-nginx-config-map
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
data:
|
||||
default.conf: |
|
||||
server {
|
||||
listen 8080;
|
||||
{{- if .Values.tap.ipv6 }}
|
||||
listen [::]:8080;
|
||||
{{- end }}
|
||||
access_log /dev/stdout;
|
||||
error_log /dev/stdout;
|
||||
|
||||
client_body_buffer_size 64k;
|
||||
client_header_buffer_size 32k;
|
||||
large_client_header_buffers 8 64k;
|
||||
|
||||
location {{ default "" (((.Values.tap).routing).front).basePath }}/api {
|
||||
rewrite ^{{ default "" (((.Values.tap).routing).front).basePath }}/api(.*)$ $1 break;
|
||||
proxy_pass http://kubeshark-hub;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header Upgrade websocket;
|
||||
proxy_set_header Connection Upgrade;
|
||||
proxy_set_header Authorization $http_authorization;
|
||||
proxy_pass_header Authorization;
|
||||
proxy_connect_timeout 4s;
|
||||
proxy_read_timeout 120s;
|
||||
proxy_send_timeout 12s;
|
||||
proxy_pass_request_headers on;
|
||||
}
|
||||
|
||||
location {{ default "" (((.Values.tap).routing).front).basePath }}/saml {
|
||||
rewrite ^{{ default "" (((.Values.tap).routing).front).basePath }}/saml(.*)$ /saml$1 break;
|
||||
proxy_pass http://kubeshark-hub;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_connect_timeout 4s;
|
||||
proxy_read_timeout 120s;
|
||||
proxy_send_timeout 12s;
|
||||
proxy_pass_request_headers on;
|
||||
}
|
||||
|
||||
{{- if .Values.tap.auth.dexConfig }}
|
||||
location /dex {
|
||||
rewrite ^{{ default "" (((.Values.tap).routing).front).basePath }}/dex(.*)$ /dex$1 break;
|
||||
proxy_pass http://kubeshark-dex;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header Upgrade websocket;
|
||||
proxy_set_header Connection Upgrade;
|
||||
proxy_set_header Authorization $http_authorization;
|
||||
proxy_pass_header Authorization;
|
||||
proxy_connect_timeout 4s;
|
||||
proxy_read_timeout 120s;
|
||||
proxy_send_timeout 12s;
|
||||
proxy_pass_request_headers on;
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{- if (((.Values.tap).routing).front).basePath }}
|
||||
location {{ .Values.tap.routing.front.basePath }} {
|
||||
rewrite ^{{ .Values.tap.routing.front.basePath }}(.*)$ $1 break;
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
expires -1;
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
expires -1;
|
||||
add_header Cache-Control no-cache;
|
||||
}
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
|
||||
87
helm-chart/templates/12-config-map.yaml
Normal file
87
helm-chart/templates/12-config-map.yaml
Normal file
@@ -0,0 +1,87 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ include "kubeshark.configmapName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
data:
|
||||
POD_REGEX: '{{ .Values.tap.regex }}'
|
||||
NAMESPACES: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}'
|
||||
EXCLUDED_NAMESPACES: '{{ gt (len .Values.tap.excludedNamespaces) 0 | ternary (join "," .Values.tap.excludedNamespaces) "" }}'
|
||||
BPF_OVERRIDE: '{{ .Values.tap.bpfOverride }}'
|
||||
DISSECTION_ENABLED: '{{ .Values.tap.capture.dissection.enabled | ternary "true" "false" }}'
|
||||
CAPTURE_SELF: '{{ .Values.tap.capture.captureSelf | ternary "true" "false" }}'
|
||||
SCRIPTING_SCRIPTS: '{}'
|
||||
SCRIPTING_ACTIVE_SCRIPTS: '{{ gt (len .Values.scripting.active) 0 | ternary (join "," .Values.scripting.active) "" }}'
|
||||
INGRESS_ENABLED: '{{ .Values.tap.ingress.enabled }}'
|
||||
INGRESS_HOST: '{{ .Values.tap.ingress.host }}'
|
||||
PROXY_FRONT_PORT: '{{ .Values.tap.proxy.front.port }}'
|
||||
AUTH_ENABLED: '{{- if and .Values.cloudLicenseEnabled (not (empty .Values.license)) -}}
|
||||
{{ and .Values.tap.auth.enabled (eq .Values.tap.auth.type "dex") | ternary true false }}
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled | ternary "true" (.Values.tap.auth.enabled | ternary "true" "") }}
|
||||
{{- end }}'
|
||||
AUTH_TYPE: '{{- if and .Values.cloudLicenseEnabled (not (eq .Values.tap.auth.type "dex")) -}}
|
||||
default
|
||||
{{- else -}}
|
||||
{{ .Values.tap.auth.type }}
|
||||
{{- end }}'
|
||||
AUTH_SAML_IDP_METADATA_URL: '{{ .Values.tap.auth.saml.idpMetadataUrl }}'
|
||||
AUTH_SAML_ROLE_ATTRIBUTE: '{{ .Values.tap.auth.saml.roleAttribute }}'
|
||||
AUTH_SAML_ROLES: '{{ .Values.tap.auth.saml.roles | toJson }}'
|
||||
AUTH_OIDC_ISSUER: '{{ default "not set" (((.Values.tap).auth).dexOidc).issuer }}'
|
||||
AUTH_OIDC_REFRESH_TOKEN_LIFETIME: '{{ default "3960h" (((.Values.tap).auth).dexOidc).refreshTokenLifetime }}'
|
||||
AUTH_OIDC_STATE_PARAM_EXPIRY: '{{ default "10m" (((.Values.tap).auth).dexOidc).oauth2StateParamExpiry }}'
|
||||
AUTH_OIDC_BYPASS_SSL_CA_CHECK: '{{- if and
|
||||
(hasKey .Values.tap "auth")
|
||||
(hasKey .Values.tap.auth "dexOidc")
|
||||
(hasKey .Values.tap.auth.dexOidc "bypassSslCaCheck")
|
||||
-}}
|
||||
{{ eq .Values.tap.auth.dexOidc.bypassSslCaCheck true | ternary "true" "false" }}
|
||||
{{- else -}}
|
||||
false
|
||||
{{- end }}'
|
||||
TELEMETRY_DISABLED: '{{ not .Values.internetConnectivity | ternary "true" (not .Values.tap.telemetry.enabled | ternary "true" "false") }}'
|
||||
SCRIPTING_DISABLED: '{{- if .Values.tap.liveConfigMapChangesDisabled -}}
|
||||
{{- if .Values.demoModeEnabled -}}
|
||||
{{ .Values.demoModeEnabled | ternary false true }}
|
||||
{{- else -}}
|
||||
true
|
||||
{{- end }}
|
||||
{{- else -}}
|
||||
false
|
||||
{{- end }}'
|
||||
TARGETED_PODS_UPDATE_DISABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "" }}'
|
||||
PRESET_FILTERS_CHANGING_ENABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
RECORDING_DISABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "" }}'
|
||||
DISSECTION_CONTROL_ENABLED: '{{- if and .Values.tap.liveConfigMapChangesDisabled (not .Values.tap.capture.dissection.enabled) -}}
|
||||
true
|
||||
{{- else -}}
|
||||
{{ not .Values.tap.liveConfigMapChangesDisabled | ternary "true" "false" }}
|
||||
{{- end }}'
|
||||
GLOBAL_FILTER: {{ include "kubeshark.escapeDoubleQuotes" .Values.tap.globalFilter | quote }}
|
||||
DEFAULT_FILTER: {{ include "kubeshark.escapeDoubleQuotes" .Values.tap.defaultFilter | quote }}
|
||||
TRAFFIC_SAMPLE_RATE: '{{ .Values.tap.misc.trafficSampleRate }}'
|
||||
JSON_TTL: '{{ .Values.tap.misc.jsonTTL }}'
|
||||
PCAP_TTL: '{{ .Values.tap.misc.pcapTTL }}'
|
||||
PCAP_ERROR_TTL: '{{ .Values.tap.misc.pcapErrorTTL }}'
|
||||
TIMEZONE: '{{ not (eq .Values.timezone "") | ternary .Values.timezone " " }}'
|
||||
CLOUD_LICENSE_ENABLED: '{{- if and .Values.cloudLicenseEnabled (not (empty .Values.license)) -}}
|
||||
false
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled }}
|
||||
{{- end }}'
|
||||
DUPLICATE_TIMEFRAME: '{{ .Values.tap.misc.duplicateTimeframe }}'
|
||||
ENABLED_DISSECTORS: '{{ gt (len .Values.tap.enabledDissectors) 0 | ternary (join "," .Values.tap.enabledDissectors) "" }}'
|
||||
CUSTOM_MACROS: '{{ toJson .Values.tap.customMacros }}'
|
||||
DISSECTORS_UPDATING_ENABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
DETECT_DUPLICATES: '{{ .Values.tap.misc.detectDuplicates | ternary "true" "false" }}'
|
||||
PCAP_DUMP_ENABLE: '{{ .Values.pcapdump.enabled }}'
|
||||
PCAP_TIME_INTERVAL: '{{ .Values.pcapdump.timeInterval }}'
|
||||
PCAP_MAX_TIME: '{{ .Values.pcapdump.maxTime }}'
|
||||
PCAP_MAX_SIZE: '{{ .Values.pcapdump.maxSize }}'
|
||||
PORT_MAPPING: '{{ toJson .Values.tap.portMapping }}'
|
||||
RAW_CAPTURE_ENABLED: '{{ .Values.tap.capture.raw.enabled | ternary "true" "false" }}'
|
||||
RAW_CAPTURE_STORAGE_SIZE: '{{ .Values.tap.capture.raw.storageSize }}'
|
||||
@@ -1,28 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-nginx-config-map
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
data:
|
||||
default.conf: |
|
||||
server {
|
||||
listen 80;
|
||||
{{- if .Values.tap.ipv6 }}
|
||||
listen [::]:80;
|
||||
{{- end }}
|
||||
add_header Cache-Control no-cache;
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
try_files $uri $uri/ /index.html;
|
||||
expires -1;
|
||||
}
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubeshark-config-map
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
data:
|
||||
POD_REGEX: '{{ .Values.tap.regex }}'
|
||||
NAMESPACES: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}'
|
||||
SCRIPTING_ENV: '{{ .Values.scripting.env | toJson }}'
|
||||
SCRIPTING_SCRIPTS: '[]'
|
||||
AUTH_ENABLED: '{{ .Values.tap.auth.enabled | ternary "true" "" }}'
|
||||
AUTH_APPROVED_EMAILS: '{{ gt (len .Values.tap.auth.approvedemails) 0 | ternary (join "," .Values.tap.auth.approvedemails) "" }}'
|
||||
AUTH_APPROVED_DOMAINS: '{{ gt (len .Values.tap.auth.approveddomains) 0 | ternary (join "," .Values.tap.auth.approveddomains) "" }}'
|
||||
TELEMETRY_DISABLED: '{{ not .Values.tap.telemetry.enabled | ternary "true" "" }}'
|
||||
43
helm-chart/templates/13-secret.yaml
Normal file
43
helm-chart/templates/13-secret.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ include "kubeshark.secretName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
LICENSE: '{{ .Values.license }}'
|
||||
SCRIPTING_ENV: '{{ .Values.scripting.env | toJson }}'
|
||||
OIDC_CLIENT_ID: '{{ default "not set" (((.Values.tap).auth).dexOidc).clientId }}'
|
||||
OIDC_CLIENT_SECRET: '{{ default "not set" (((.Values.tap).auth).dexOidc).clientSecret }}'
|
||||
|
||||
---
|
||||
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubeshark-saml-x509-crt-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
AUTH_SAML_X509_CRT: |
|
||||
{{ .Values.tap.auth.saml.x509crt | nindent 4 }}
|
||||
|
||||
---
|
||||
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubeshark-saml-x509-key-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
AUTH_SAML_X509_KEY: |
|
||||
{{ .Values.tap.auth.saml.x509key | nindent 4 }}
|
||||
|
||||
---
|
||||
@@ -0,0 +1,53 @@
|
||||
{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" }}
|
||||
apiVersion: security.openshift.io/v1
|
||||
kind: SecurityContextConstraints
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-scc
|
||||
priority: 10
|
||||
allowPrivilegedContainer: true
|
||||
allowHostDirVolumePlugin: true
|
||||
allowHostNetwork: true
|
||||
allowHostPorts: true
|
||||
allowHostPID: true
|
||||
allowHostIPC: true
|
||||
readOnlyRootFilesystem: false
|
||||
requiredDropCapabilities:
|
||||
- MKNOD
|
||||
allowedCapabilities:
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- DAC_OVERRIDE
|
||||
- SYS_RESOURCE
|
||||
- SYS_MODULE
|
||||
- IPC_LOCK
|
||||
runAsUser:
|
||||
type: RunAsAny
|
||||
fsGroup:
|
||||
type: MustRunAs
|
||||
seLinuxContext:
|
||||
type: RunAsAny
|
||||
supplementalGroups:
|
||||
type: RunAsAny
|
||||
seccompProfiles:
|
||||
- '*'
|
||||
volumes:
|
||||
- configMap
|
||||
- downwardAPI
|
||||
- emptyDir
|
||||
- persistentVolumeClaim
|
||||
- secret
|
||||
- hostPath
|
||||
- projected
|
||||
- ephemeral
|
||||
users:
|
||||
- system:serviceaccount:{{ .Release.Namespace }}:kubeshark-service-account
|
||||
{{- end }}
|
||||
@@ -1,10 +0,0 @@
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubeshark-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
LICENSE: '{{ .Values.license }}'
|
||||
23
helm-chart/templates/15-worker-service-metrics.yaml
Normal file
23
helm-chart/templates/15-worker-service-metrics.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/port: '{{ .Values.tap.metrics.port }}'
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-worker-metrics
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.com/app: worker
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
ports:
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: {{ .Values.tap.metrics.port }}
|
||||
targetPort: {{ .Values.tap.metrics.port }}
|
||||
23
helm-chart/templates/16-hub-service-metrics.yaml
Normal file
23
helm-chart/templates/16-hub-service-metrics.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
prometheus.io/port: '9100'
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-hub-metrics
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
ports:
|
||||
- name: metrics
|
||||
protocol: TCP
|
||||
port: 9100
|
||||
targetPort: 9100
|
||||
104
helm-chart/templates/17-network-policies.yaml
Normal file
104
helm-chart/templates/17-network-policies.yaml
Normal file
@@ -0,0 +1,104 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-hub-network-policy
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.com/app: hub
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 9100
|
||||
egress:
|
||||
- {}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-front-network-policy
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.com/app: front
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
egress:
|
||||
- {}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-dex-network-policy
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.com/app: dex
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 5556
|
||||
egress:
|
||||
- {}
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-worker-network-policy
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.com/app: worker
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: {{ .Values.tap.proxy.worker.srvPort }}
|
||||
- protocol: TCP
|
||||
port: {{ .Values.tap.metrics.port }}
|
||||
egress:
|
||||
- {}
|
||||
27
helm-chart/templates/18-cleanup-job.yaml
Normal file
27
helm-chart/templates/18-cleanup-job.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
{{ if .Values.tap.gitops.enabled -}}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: kubeshark-cleanup-job
|
||||
annotations:
|
||||
"helm.sh/hook": pre-delete
|
||||
"helm.sh/hook-delete-policy": hook-succeeded
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
{{- if .Values.tap.priorityClass }}
|
||||
priorityClassName: {{ .Values.tap.priorityClass | quote }}
|
||||
{{- end }}
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: cleanup
|
||||
{{- if .Values.tap.docker.overrideImage.hub }}
|
||||
image: '{{ .Values.tap.docker.overrideImage.hub }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.hub }}
|
||||
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.overrideTag.hub }}'
|
||||
{{ else }}
|
||||
image: '{{ .Values.tap.docker.registry }}/hub:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
|
||||
{{- end }}
|
||||
command: ["/app/cleanup"]
|
||||
{{ end -}}
|
||||
112
helm-chart/templates/18-dex-deployment.yaml
Normal file
112
helm-chart/templates/18-dex-deployment.yaml
Normal file
@@ -0,0 +1,112 @@
|
||||
{{- if .Values.tap.auth.dexConfig }}
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.com/app: dex
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "kubeshark.name" . }}-dex
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.com/app: dex
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.com/app: dex
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
- name: kubeshark-dex
|
||||
image: 'dexidp/dex:v2.42.0-alpine'
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 5556
|
||||
protocol: TCP
|
||||
- name: telemetry
|
||||
containerPort: 5558
|
||||
protocol: TCP
|
||||
args:
|
||||
- dex
|
||||
- serve
|
||||
- /etc/dex/dex-config.yaml
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
volumeMounts:
|
||||
- name: dex-secret-conf-volume
|
||||
mountPath: /etc/dex/dex-config.yaml
|
||||
subPath: dex-config.yaml
|
||||
readOnly: true
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz/live
|
||||
port: 5558
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz/ready
|
||||
port: 5558
|
||||
periodSeconds: 1
|
||||
failureThreshold: 3
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 1
|
||||
resources:
|
||||
limits:
|
||||
cpu: 750m
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
{{- if gt (len .Values.tap.nodeSelectorTerms.dex) 0}}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
{{- toYaml .Values.tap.nodeSelectorTerms.dex | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.tap.dns.nameservers .Values.tap.dns.searches .Values.tap.dns.options }}
|
||||
dnsConfig:
|
||||
{{- if .Values.tap.dns.nameservers }}
|
||||
nameservers:
|
||||
{{- range .Values.tap.dns.nameservers }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.searches }}
|
||||
searches:
|
||||
{{- range .Values.tap.dns.searches }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.dns.options }}
|
||||
options:
|
||||
{{- range .Values.tap.dns.options }}
|
||||
- name: {{ .name | quote }}
|
||||
{{- if .value }}
|
||||
value: {{ .value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: dex-secret-conf-volume
|
||||
secret:
|
||||
secretName: kubeshark-dex-conf-secret
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
{{- if .Values.tap.priorityClass }}
|
||||
priorityClassName: {{ .Values.tap.priorityClass | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
25
helm-chart/templates/19-dex-service.yaml
Normal file
25
helm-chart/templates/19-dex-service.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
{{- if .Values.tap.auth.dexConfig }}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.com/app: dex
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-dex
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
ports:
|
||||
- name: kubeshark-dex
|
||||
port: 80
|
||||
targetPort: 5556
|
||||
selector:
|
||||
app.kubeshark.com/app: dex
|
||||
type: ClusterIP
|
||||
|
||||
{{- end }}
|
||||
14
helm-chart/templates/20-dex-secret.yaml
Normal file
14
helm-chart/templates/20-dex-secret.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
{{- if .Values.tap.auth.dexConfig }}
|
||||
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubeshark-dex-conf-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
data:
|
||||
dex-config.yaml: {{ .Values.tap.auth.dexConfig | toYaml | b64enc | quote }}
|
||||
|
||||
{{- end }}
|
||||
@@ -1,31 +1,53 @@
|
||||
Thank you for installing {{ title .Chart.Name }}.
|
||||
|
||||
Your deployment has been successful. The release is named {{ .Release.Name }} and it has been deployed in the {{ .Release.Namespace }} namespace.
|
||||
Registry: {{ .Values.tap.docker.registry }}
|
||||
Tag: {{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (printf "v%s" .Chart.Version) }}
|
||||
{{- if .Values.tap.docker.overrideTag.worker }}
|
||||
Overridden worker tag: {{ .Values.tap.docker.overrideTag.worker }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideTag.hub }}
|
||||
Overridden hub tag: {{ .Values.tap.docker.overrideTag.hub }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideTag.front }}
|
||||
Overridden front tag: {{ .Values.tap.docker.overrideTag.front }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideImage.worker }}
|
||||
Overridden worker image: {{ .Values.tap.docker.overrideImage.worker }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideImage.hub }}
|
||||
Overridden hub image: {{ .Values.tap.docker.overrideImage.hub }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideImage.front }}
|
||||
Overridden front image: {{ .Values.tap.docker.overrideImage.front }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.tap.telemetry.enabled }}
|
||||
Notice: Telemetry is enabled. Kubeshark will collect usage statistics.
|
||||
{{ end }}
|
||||
Your deployment has been successful. The release is named `{{ .Release.Name }}` and it has been deployed in the `{{ .Release.Namespace }}` namespace.
|
||||
|
||||
{{- if .Values.tap.ingress.enabled }}
|
||||
Notices:
|
||||
{{- if .Values.supportChatEnabled}}
|
||||
- Support chat using Intercom is enabled. It can be disabled using `--set supportChatEnabled=false`
|
||||
{{- end }}
|
||||
{{- if eq .Values.license ""}}
|
||||
- No license key was detected.
|
||||
- Authenticate through the dashboard to activate a complementary COMMUNITY license.
|
||||
- If you have an Enterprise license, download the license key from https://console.kubeshark.com/
|
||||
- An Enterprise license-key can be added as 'license: <license>' in helm values or as `--set license=<license>` or as `LICENSE` via mounted secret (`tap.secrets`).
|
||||
- Contact us to get an Enterprise license: https://kubeshark.com/contact-us.
|
||||
{{- end }}
|
||||
{{ if .Values.tap.ingress.enabled }}
|
||||
|
||||
{{ if not .Values.license -}}
|
||||
warning:
|
||||
> Ingress option enabled but license not set. The application should not work as expected.
|
||||
> Get a license at https://console.kubeshark.co/
|
||||
{{- else }}
|
||||
You can now access the application through the following URL:
|
||||
http{{ if .Values.tap.ingress.tls }}s{{ end }}://{{ .Values.tap.ingress.host }}
|
||||
{{- end -}}
|
||||
http{{ if .Values.tap.ingress.tls }}s{{ end }}://{{ .Values.tap.ingress.host }}{{ default "" (((.Values.tap).routing).front).basePath }}/
|
||||
|
||||
{{- else }}
|
||||
To access the application, follow these steps:
|
||||
|
||||
1. Perform port forwarding with the following commands:
|
||||
|
||||
kubectl port-forward -n {{ .Release.Namespace }} service/kubeshark-hub 8898:80 & \
|
||||
kubectl port-forward -n {{ .Release.Namespace }} service/kubeshark-front 8899:80
|
||||
you could also run: `kubeshark proxy` (which simply manages the port-forward connection)
|
||||
|
||||
2. Once port forwarding is done, you can access the application by visiting the following URL in your web browser:
|
||||
http://0.0.0.0:8899
|
||||
http://127.0.0.1:8899{{ default "" (((.Values.tap).routing).front).basePath }}/
|
||||
|
||||
{{ end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "kubeshark.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
@@ -11,16 +11,7 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "kubeshark.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- printf "%s-%s" .Release.Name .Chart.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
@@ -36,13 +27,8 @@ Common labels
|
||||
{{- define "kubeshark.labels" -}}
|
||||
helm.sh/chart: {{ include "kubeshark.chart" . }}
|
||||
{{ include "kubeshark.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/version: {{ .Chart.Version | quote }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.additionalLabels }}
|
||||
{{ toYaml . }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.labels }}
|
||||
{{ toYaml .Values.tap.labels }}
|
||||
{{- end }}
|
||||
@@ -60,9 +46,67 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "kubeshark.serviceAccountName" -}}
|
||||
{{- if and .Values.serviceAccount .Values.serviceAccount.create }}
|
||||
{{- default (include "kubeshark.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- printf "%s-service-account" .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Set configmap and secret names based on gitops.enabled
|
||||
*/}}
|
||||
{{- define "kubeshark.configmapName" -}}
|
||||
kubeshark-config-map{{ if .Values.tap.gitops.enabled }}-default{{ end }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "kubeshark.secretName" -}}
|
||||
kubeshark-secret{{ if .Values.tap.gitops.enabled }}-default{{ end }}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{/*
|
||||
Escape double quotes in a string
|
||||
*/}}
|
||||
{{- define "kubeshark.escapeDoubleQuotes" -}}
|
||||
{{- regexReplaceAll "\"" . "\"" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Define debug docker tag suffix
|
||||
*/}}
|
||||
{{- define "kubeshark.dockerTagDebugVersion" -}}
|
||||
{{- .Values.tap.pprof.enabled | ternary "-debug" "" }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create docker tag default version
|
||||
*/}}
|
||||
{{- define "kubeshark.defaultVersion" -}}
|
||||
{{- $defaultVersion := (printf "v%s" .Chart.Version) -}}
|
||||
{{- if .Values.tap.docker.tagLocked }}
|
||||
{{- $defaultVersion = regexReplaceAll "^([^.]+\\.[^.]+).*" $defaultVersion "$1" -}}
|
||||
{{- end }}
|
||||
{{- $defaultVersion }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Set sentry based on internet connectivity and telemetry
|
||||
*/}}
|
||||
{{- define "sentry.enabled" -}}
|
||||
{{- $sentryEnabledVal := .Values.tap.sentry.enabled -}}
|
||||
{{- if not .Values.internetConnectivity -}}
|
||||
{{- $sentryEnabledVal = false -}}
|
||||
{{- else if not .Values.tap.telemetry.enabled -}}
|
||||
{{- $sentryEnabledVal = false -}}
|
||||
{{- end -}}
|
||||
{{- $sentryEnabledVal -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Dex IdP: retrieve a secret for static client with a specific ID
|
||||
*/}}
|
||||
{{- define "getDexKubesharkStaticClientSecret" -}}
|
||||
{{- $clientId := .clientId -}}
|
||||
{{- range .clients }}
|
||||
{{- if eq .id $clientId }}
|
||||
{{- .secret }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,83 +1,288 @@
|
||||
config: {}
|
||||
dumplogs: false
|
||||
headless: false
|
||||
kube:
|
||||
configpath: ""
|
||||
context: ""
|
||||
license: ""
|
||||
logs:
|
||||
file: ""
|
||||
manifests:
|
||||
dump: false
|
||||
scripting:
|
||||
env: null
|
||||
source: ""
|
||||
watchscripts: true
|
||||
tap:
|
||||
annotations: {}
|
||||
auth:
|
||||
approveddomains: []
|
||||
approvedemails: []
|
||||
enabled: false
|
||||
debug: false
|
||||
docker:
|
||||
imagepullpolicy: Always
|
||||
imagepullsecrets: null
|
||||
registry: docker.io/kubeshark
|
||||
tag: latest
|
||||
dryrun: false
|
||||
ignoretainted: false
|
||||
ingress:
|
||||
certmanager: letsencrypt-prod
|
||||
classname: kubeshark-ingress-class
|
||||
controller: k8s.io/ingress-nginx
|
||||
enabled: false
|
||||
host: ks.svc.cluster.local
|
||||
tls: null
|
||||
ipv6: true
|
||||
labels: {}
|
||||
namespaces: []
|
||||
nodeselectorterms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
packetcapture: libpcap
|
||||
pcap: ""
|
||||
persistentstorage: false
|
||||
tag: ""
|
||||
tagLocked: true
|
||||
imagePullPolicy: Always
|
||||
imagePullSecrets: []
|
||||
overrideImage:
|
||||
worker: ""
|
||||
hub: ""
|
||||
front: ""
|
||||
overrideTag:
|
||||
worker: ""
|
||||
hub: ""
|
||||
front: ""
|
||||
proxy:
|
||||
worker:
|
||||
srvPort: 48999
|
||||
hub:
|
||||
srvPort: 8898
|
||||
front:
|
||||
port: 8899
|
||||
host: 127.0.0.1
|
||||
hub:
|
||||
port: 8898
|
||||
srvport: 8898
|
||||
worker:
|
||||
srvport: 8897
|
||||
regex: .*
|
||||
namespaces: []
|
||||
excludedNamespaces: []
|
||||
bpfOverride: ""
|
||||
capture:
|
||||
dissection:
|
||||
enabled: true
|
||||
stopAfter: 5m
|
||||
captureSelf: false
|
||||
raw:
|
||||
enabled: true
|
||||
storageSize: 1Gi
|
||||
dbMaxSize: 500Mi
|
||||
delayedDissection:
|
||||
image: kubeshark/worker:master
|
||||
cpu: "1"
|
||||
memory: 4Gi
|
||||
snapshots:
|
||||
storageClass: ""
|
||||
storageSize: 20Gi
|
||||
release:
|
||||
repo: https://helm.kubeshark.com
|
||||
name: kubeshark
|
||||
namespace: default
|
||||
repo: https://helm.kubeshark.co
|
||||
persistentStorage: false
|
||||
persistentStorageStatic: false
|
||||
persistentStoragePvcVolumeMode: FileSystem
|
||||
efsFileSytemIdAndPath: ""
|
||||
secrets: []
|
||||
storageLimit: 10Gi
|
||||
storageClass: standard
|
||||
dryRun: false
|
||||
dns:
|
||||
nameservers: []
|
||||
searches: []
|
||||
options: []
|
||||
resources:
|
||||
hub:
|
||||
limits:
|
||||
cpu: 750m
|
||||
memory: 1Gi
|
||||
cpu: "0"
|
||||
memory: 5Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
worker:
|
||||
sniffer:
|
||||
limits:
|
||||
cpu: 750m
|
||||
memory: 1Gi
|
||||
cpu: "0"
|
||||
memory: 5Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
servicemesh: true
|
||||
storageclass: standard
|
||||
storagelimit: 200Mi
|
||||
tracer:
|
||||
limits:
|
||||
cpu: "0"
|
||||
memory: 5Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
probes:
|
||||
hub:
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
sniffer:
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
serviceMesh: true
|
||||
tls: true
|
||||
disableTlsLog: true
|
||||
packetCapture: best
|
||||
labels: {}
|
||||
annotations: {}
|
||||
nodeSelectorTerms:
|
||||
hub:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
workers:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
front:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
dex:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/os
|
||||
operator: In
|
||||
values:
|
||||
- linux
|
||||
tolerations:
|
||||
hub: []
|
||||
workers:
|
||||
- operator: Exists
|
||||
effect: NoExecute
|
||||
front: []
|
||||
auth:
|
||||
enabled: false
|
||||
type: saml
|
||||
saml:
|
||||
idpMetadataUrl: ""
|
||||
x509crt: ""
|
||||
x509key: ""
|
||||
roleAttribute: role
|
||||
roles:
|
||||
admin:
|
||||
filter: ""
|
||||
canDownloadPCAP: true
|
||||
canUseScripting: true
|
||||
scriptingPermissions:
|
||||
canSave: true
|
||||
canActivate: true
|
||||
canDelete: true
|
||||
canUpdateTargetedPods: true
|
||||
canStopTrafficCapturing: true
|
||||
canControlDissection: true
|
||||
showAdminConsoleLink: true
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
host: ks.svc.cluster.local
|
||||
path: /
|
||||
tls: []
|
||||
annotations: {}
|
||||
priorityClass: ""
|
||||
routing:
|
||||
front:
|
||||
basePath: ""
|
||||
ipv6: true
|
||||
debug: false
|
||||
dashboard:
|
||||
streamingType: connect-rpc
|
||||
completeStreamingEnabled: true
|
||||
telemetry:
|
||||
enabled: true
|
||||
tls: true
|
||||
resourceGuard:
|
||||
enabled: false
|
||||
watchdog:
|
||||
enabled: false
|
||||
gitops:
|
||||
enabled: false
|
||||
sentry:
|
||||
enabled: false
|
||||
environment: production
|
||||
defaultFilter: ""
|
||||
liveConfigMapChangesDisabled: false
|
||||
globalFilter: ""
|
||||
enabledDissectors:
|
||||
- amqp
|
||||
- dns
|
||||
- http
|
||||
- icmp
|
||||
- kafka
|
||||
- redis
|
||||
- ws
|
||||
- ldap
|
||||
- radius
|
||||
- diameter
|
||||
- udp-flow
|
||||
- tcp-flow
|
||||
- tcp-conn
|
||||
- udp-conn
|
||||
portMapping:
|
||||
http:
|
||||
- 80
|
||||
- 443
|
||||
- 8080
|
||||
amqp:
|
||||
- 5671
|
||||
- 5672
|
||||
kafka:
|
||||
- 9092
|
||||
redis:
|
||||
- 6379
|
||||
ldap:
|
||||
- 389
|
||||
diameter:
|
||||
- 3868
|
||||
customMacros:
|
||||
https: tls and (http or http2)
|
||||
metrics:
|
||||
port: 49100
|
||||
pprof:
|
||||
enabled: false
|
||||
port: 8000
|
||||
view: flamegraph
|
||||
misc:
|
||||
jsonTTL: 5m
|
||||
pcapTTL: "0"
|
||||
pcapErrorTTL: "0"
|
||||
trafficSampleRate: 100
|
||||
tcpStreamChannelTimeoutMs: 10000
|
||||
tcpStreamChannelTimeoutShow: false
|
||||
resolutionStrategy: auto
|
||||
duplicateTimeframe: 200ms
|
||||
detectDuplicates: false
|
||||
staleTimeoutSeconds: 30
|
||||
securityContext:
|
||||
privileged: true
|
||||
appArmorProfile:
|
||||
type: ""
|
||||
localhostProfile: ""
|
||||
seLinuxOptions:
|
||||
level: ""
|
||||
role: ""
|
||||
type: ""
|
||||
user: ""
|
||||
capabilities:
|
||||
networkCapture:
|
||||
- NET_RAW
|
||||
- NET_ADMIN
|
||||
serviceMeshCapture:
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- DAC_OVERRIDE
|
||||
ebpfCapture:
|
||||
- SYS_ADMIN
|
||||
- SYS_PTRACE
|
||||
- SYS_RESOURCE
|
||||
- IPC_LOCK
|
||||
mountBpf: true
|
||||
hostNetwork: true
|
||||
logs:
|
||||
file: ""
|
||||
grep: ""
|
||||
pcapdump:
|
||||
enabled: false
|
||||
timeInterval: 1m
|
||||
maxTime: 1h
|
||||
maxSize: 500MB
|
||||
time: time
|
||||
debug: false
|
||||
dest: ""
|
||||
kube:
|
||||
configPath: ""
|
||||
context: ""
|
||||
dumpLogs: false
|
||||
headless: false
|
||||
license: ""
|
||||
cloudApiUrl: "https://api.kubeshark.com"
|
||||
cloudLicenseEnabled: true
|
||||
demoModeEnabled: false
|
||||
supportChatEnabled: false
|
||||
betaEnabled: false
|
||||
internetConnectivity: true
|
||||
scripting:
|
||||
enabled: false
|
||||
env: {}
|
||||
source: ""
|
||||
sources: []
|
||||
watchScripts: true
|
||||
active: []
|
||||
console: true
|
||||
timezone: ""
|
||||
logLevel: warning
|
||||
|
||||
94
install.sh
Normal file
94
install.sh
Normal file
@@ -0,0 +1,94 @@
|
||||
#!/bin/sh
|
||||
|
||||
EXE_NAME=kubeshark
|
||||
ALIAS_NAME=ks
|
||||
PROG_NAME=Kubeshark
|
||||
INSTALL_PATH=/usr/local/bin/$EXE_NAME
|
||||
ALIAS_PATH=/usr/local/bin/$ALIAS_NAME
|
||||
REPO=https://github.com/kubeshark/kubeshark
|
||||
OS=$(echo $(uname -s) | tr '[:upper:]' '[:lower:]')
|
||||
ARCH=$(echo $(uname -m) | tr '[:upper:]' '[:lower:]')
|
||||
SUPPORTED_PAIRS="linux_amd64 linux_arm64 darwin_amd64 darwin_arm64"
|
||||
|
||||
ESC="\033["
|
||||
F_DEFAULT=39
|
||||
F_RED=31
|
||||
F_GREEN=32
|
||||
F_YELLOW=33
|
||||
B_DEFAULT=49
|
||||
B_RED=41
|
||||
B_BLUE=44
|
||||
B_LIGHT_BLUE=104
|
||||
|
||||
if [ "$ARCH" = "x86_64" ]; then
|
||||
ARCH="amd64"
|
||||
fi
|
||||
|
||||
if [ "$ARCH" = "aarch64" ]; then
|
||||
ARCH="arm64"
|
||||
fi
|
||||
|
||||
echo $SUPPORTED_PAIRS | grep -w -q "${OS}_${ARCH}"
|
||||
|
||||
if [ $? != 0 ] ; then
|
||||
echo "\n${ESC}${F_RED}m🛑 Unsupported OS \"$OS\" or architecture \"$ARCH\". Failed to install $PROG_NAME.${ESC}${F_DEFAULT}m"
|
||||
echo "${ESC}${B_RED}mPlease report 🐛 to $REPO/issues${ESC}${F_DEFAULT}m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for Homebrew and kubeshark installation
|
||||
if command -v brew >/dev/null; then
|
||||
if brew list kubeshark &>/dev/null; then
|
||||
echo "📦 Found $PROG_NAME instance installed with Homebrew"
|
||||
echo "${ESC}${F_GREEN}m⬇️ Removing before installation with script${ESC}${F_DEFAULT}m"
|
||||
brew uninstall kubeshark
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "\n🦈 ${ESC}${F_DEFAULT};${B_BLUE}m Started to download $PROG_NAME ${ESC}${B_DEFAULT};${F_DEFAULT}m"
|
||||
|
||||
if curl -# --fail -Lo $EXE_NAME ${REPO}/releases/latest/download/${EXE_NAME}_${OS}_${ARCH} ; then
|
||||
chmod +x $PWD/$EXE_NAME
|
||||
echo "\n${ESC}${F_GREEN}m⬇️ $PROG_NAME is downloaded into $PWD/$EXE_NAME${ESC}${F_DEFAULT}m"
|
||||
else
|
||||
echo "\n${ESC}${F_RED}m🛑 Couldn't download ${REPO}/releases/latest/download/${EXE_NAME}_${OS}_${ARCH}\n\
|
||||
⚠️ Check your internet connection.\n\
|
||||
⚠️ Make sure 'curl' command is available.\n\
|
||||
⚠️ Make sure there is no directory named '${EXE_NAME}' in ${PWD}\n\
|
||||
${ESC}${F_DEFAULT}m"
|
||||
echo "${ESC}${B_RED}mPlease report 🐛 to $REPO/issues${ESC}${F_DEFAULT}m"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
use_cmd=$EXE_NAME
|
||||
printf "Do you want to install system-wide? Requires sudo 😇 (y/N)? "
|
||||
old_stty_cfg=$(stty -g)
|
||||
stty raw -echo ; answer=$(head -c 1) ; stty $old_stty_cfg
|
||||
if echo "$answer" | grep -iq "^y" ;then
|
||||
echo "$answer"
|
||||
sudo mv ./$EXE_NAME $INSTALL_PATH || exit 1
|
||||
echo "${ESC}${F_GREEN}m$PROG_NAME is installed into $INSTALL_PATH${ESC}${F_DEFAULT}m\n"
|
||||
|
||||
ls $ALIAS_PATH >> /dev/null 2>&1
|
||||
if [ $? != 0 ] ; then
|
||||
printf "Do you want to add 'ks' alias for Kubeshark? (y/N)? "
|
||||
old_stty_cfg=$(stty -g)
|
||||
stty raw -echo ; answer=$(head -c 1) ; stty $old_stty_cfg
|
||||
if echo "$answer" | grep -iq "^y" ; then
|
||||
echo "$answer"
|
||||
sudo ln -s $INSTALL_PATH $ALIAS_PATH
|
||||
|
||||
use_cmd=$ALIAS_NAME
|
||||
else
|
||||
echo "$answer"
|
||||
fi
|
||||
else
|
||||
use_cmd=$ALIAS_NAME
|
||||
fi
|
||||
else
|
||||
echo "$answer"
|
||||
use_cmd="./$EXE_NAME"
|
||||
fi
|
||||
|
||||
echo "${ESC}${F_GREEN}m✅ You can use the ${ESC}${F_DEFAULT};${B_LIGHT_BLUE}m $use_cmd ${ESC}${B_DEFAULT};${F_GREEN}m command now.${ESC}${F_DEFAULT}m"
|
||||
echo "\n${ESC}${F_YELLOW}mPlease give us a star 🌟 on ${ESC}${F_DEFAULT}m$REPO${ESC}${F_YELLOW}m if you ❤️ $PROG_NAME!${ESC}${F_DEFAULT}m"
|
||||
57
integration/README.md
Normal file
57
integration/README.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# Integration Tests
|
||||
|
||||
This directory contains integration tests that run against a real Kubernetes cluster.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. **Kubernetes cluster** - A running cluster accessible via `kubectl`
|
||||
2. **kubectl** - Configured with appropriate context
|
||||
3. **Go 1.21+** - For running tests
|
||||
|
||||
## Running Tests
|
||||
|
||||
```bash
|
||||
# Run all integration tests
|
||||
make test-integration
|
||||
|
||||
# Run specific command tests
|
||||
make test-integration-mcp
|
||||
|
||||
# Run with verbose output
|
||||
make test-integration-verbose
|
||||
|
||||
# Run with custom timeout (default: 5m)
|
||||
INTEGRATION_TIMEOUT=10m make test-integration
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `KUBESHARK_BINARY` | Auto-built | Path to pre-built kubeshark binary |
|
||||
| `INTEGRATION_TIMEOUT` | `5m` | Test timeout duration |
|
||||
| `KUBECONFIG` | `~/.kube/config` | Kubernetes config file |
|
||||
| `INTEGRATION_SKIP_CLEANUP` | `false` | Skip cleanup after tests (for debugging) |
|
||||
|
||||
## Test Structure
|
||||
|
||||
```
|
||||
integration/
|
||||
├── README.md # This file
|
||||
├── common_test.go # Shared test helpers
|
||||
├── mcp_test.go # MCP command integration tests
|
||||
├── tap_test.go # Tap command tests (future)
|
||||
└── ... # Additional command tests
|
||||
```
|
||||
|
||||
## Writing New Tests
|
||||
|
||||
1. Create `<command>_test.go` with build tag `//go:build integration`
|
||||
2. Use helpers from `common_test.go`: `requireKubernetesCluster(t)`, `getKubesharkBinary(t)`, `cleanupKubeshark(t, binary)`
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
```bash
|
||||
# JSON output for CI parsing
|
||||
go test -tags=integration -json ./integration/...
|
||||
```
|
||||
217
integration/common_test.go
Normal file
217
integration/common_test.go
Normal file
@@ -0,0 +1,217 @@
|
||||
//go:build integration
|
||||
|
||||
// Package integration contains integration tests that run against a real Kubernetes cluster.
|
||||
// Run with: go test -tags=integration ./integration/...
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
binaryName = "kubeshark"
|
||||
defaultTimeout = 2 * time.Minute
|
||||
startupTimeout = 3 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
// binaryPath caches the built binary path
|
||||
binaryPath string
|
||||
buildOnce sync.Once
|
||||
buildErr error
|
||||
)
|
||||
|
||||
// requireKubernetesCluster skips the test if no Kubernetes cluster is available.
|
||||
func requireKubernetesCluster(t *testing.T) {
|
||||
t.Helper()
|
||||
if !hasKubernetesCluster() {
|
||||
t.Skip("Skipping: no Kubernetes cluster available")
|
||||
}
|
||||
}
|
||||
|
||||
// hasKubernetesCluster returns true if a Kubernetes cluster is accessible.
|
||||
func hasKubernetesCluster() bool {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
return exec.CommandContext(ctx, "kubectl", "cluster-info").Run() == nil
|
||||
}
|
||||
|
||||
// getKubesharkBinary returns the path to the kubeshark binary, building it if necessary.
|
||||
func getKubesharkBinary(t *testing.T) string {
|
||||
t.Helper()
|
||||
|
||||
// Check if binary path is provided via environment
|
||||
if envBinary := os.Getenv("KUBESHARK_BINARY"); envBinary != "" {
|
||||
if _, err := os.Stat(envBinary); err == nil {
|
||||
return envBinary
|
||||
}
|
||||
t.Fatalf("KUBESHARK_BINARY set but file not found: %s", envBinary)
|
||||
}
|
||||
|
||||
// Build once per test run
|
||||
buildOnce.Do(func() {
|
||||
binaryPath, buildErr = buildBinary(t)
|
||||
})
|
||||
|
||||
if buildErr != nil {
|
||||
t.Fatalf("Failed to build binary: %v", buildErr)
|
||||
}
|
||||
|
||||
return binaryPath
|
||||
}
|
||||
|
||||
// buildBinary compiles the binary and returns its path.
|
||||
func buildBinary(t *testing.T) (string, error) {
|
||||
t.Helper()
|
||||
|
||||
// Find project root (directory containing go.mod)
|
||||
projectRoot, err := findProjectRoot()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("finding project root: %w", err)
|
||||
}
|
||||
|
||||
outputPath := filepath.Join(projectRoot, "bin", binaryName+"_integration_test")
|
||||
|
||||
t.Logf("Building %s binary at %s", binaryName, outputPath)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "go", "build",
|
||||
"-o", outputPath,
|
||||
filepath.Join(projectRoot, binaryName+".go"),
|
||||
)
|
||||
cmd.Dir = projectRoot
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("build failed: %w\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
return outputPath, nil
|
||||
}
|
||||
|
||||
// findProjectRoot locates the project root by finding go.mod
|
||||
func findProjectRoot() (string, error) {
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil {
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
parent := filepath.Dir(dir)
|
||||
if parent == dir {
|
||||
return "", fmt.Errorf("could not find go.mod in any parent directory")
|
||||
}
|
||||
dir = parent
|
||||
}
|
||||
}
|
||||
|
||||
// runKubeshark executes the kubeshark binary with the given arguments.
|
||||
// Returns combined stdout/stderr and any error.
|
||||
func runKubeshark(t *testing.T, binary string, args ...string) (string, error) {
|
||||
t.Helper()
|
||||
return runKubesharkWithTimeout(t, binary, defaultTimeout, args...)
|
||||
}
|
||||
|
||||
// runKubesharkWithTimeout executes the kubeshark binary with a custom timeout.
|
||||
func runKubesharkWithTimeout(t *testing.T, binary string, timeout time.Duration, args ...string) (string, error) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
t.Logf("Running: %s %s", binary, strings.Join(args, " "))
|
||||
|
||||
cmd := exec.CommandContext(ctx, binary, args...)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
|
||||
output := stdout.String()
|
||||
if stderr.Len() > 0 {
|
||||
output += "\n[stderr]\n" + stderr.String()
|
||||
}
|
||||
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
return output, fmt.Errorf("command timed out after %v", timeout)
|
||||
}
|
||||
|
||||
return output, err
|
||||
}
|
||||
|
||||
// cleanupKubeshark ensures Kubeshark is not running in the cluster.
|
||||
func cleanupKubeshark(t *testing.T, binary string) {
|
||||
t.Helper()
|
||||
|
||||
if os.Getenv("INTEGRATION_SKIP_CLEANUP") == "true" {
|
||||
t.Log("Skipping cleanup (INTEGRATION_SKIP_CLEANUP=true)")
|
||||
return
|
||||
}
|
||||
|
||||
t.Log("Cleaning up any existing Kubeshark installation...")
|
||||
|
||||
// Run clean command, ignore errors (might not be installed)
|
||||
_, _ = runKubeshark(t, binary, "clean")
|
||||
|
||||
// Wait a moment for resources to be deleted
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
// waitForKubesharkReady waits for Kubeshark to be ready after starting.
|
||||
func waitForKubesharkReady(t *testing.T, binary string, timeout time.Duration) error {
|
||||
t.Helper()
|
||||
|
||||
t.Log("Waiting for Kubeshark to be ready...")
|
||||
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
// Check if pods are running
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
cmd := exec.CommandContext(ctx, "kubectl", "get", "pods", "-l", "app.kubernetes.io/name=kubeshark", "-o", "jsonpath={.items[*].status.phase}")
|
||||
output, err := cmd.Output()
|
||||
cancel()
|
||||
|
||||
if err == nil && strings.Contains(string(output), "Running") {
|
||||
t.Log("Kubeshark is ready")
|
||||
return nil
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
return fmt.Errorf("timeout waiting for Kubeshark to be ready")
|
||||
}
|
||||
|
||||
// isKubesharkRunning checks if Kubeshark is currently running in the cluster.
|
||||
func isKubesharkRunning(t *testing.T) bool {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "kubectl", "get", "pods", "-l", "app.kubernetes.io/name=kubeshark", "-o", "name")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(output)) != ""
|
||||
}
|
||||
529
integration/mcp_test.go
Normal file
529
integration/mcp_test.go
Normal file
@@ -0,0 +1,529 @@
|
||||
//go:build integration
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MCPRequest represents a JSON-RPC request
|
||||
type MCPRequest struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
ID int `json:"id"`
|
||||
Method string `json:"method"`
|
||||
Params interface{} `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// MCPResponse represents a JSON-RPC response
|
||||
type MCPResponse struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
ID int `json:"id"`
|
||||
Result json.RawMessage `json:"result,omitempty"`
|
||||
Error *MCPError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// MCPError represents a JSON-RPC error
|
||||
type MCPError struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// mcpSession represents a running MCP server session
|
||||
type mcpSession struct {
|
||||
cmd *exec.Cmd
|
||||
stdin io.WriteCloser
|
||||
stdout *bufio.Reader
|
||||
stderr *bytes.Buffer // Captured stderr for debugging
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// startMCPSession starts an MCP server and returns a session for sending requests.
|
||||
// By default, starts in read-only mode (no --allow-destructive).
|
||||
func startMCPSession(t *testing.T, binary string, args ...string) *mcpSession {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
cmdArgs := append([]string{"mcp"}, args...)
|
||||
cmd := exec.CommandContext(ctx, binary, cmdArgs...)
|
||||
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
cancel()
|
||||
t.Fatalf("Failed to create stdin pipe: %v", err)
|
||||
}
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
cancel()
|
||||
t.Fatalf("Failed to create stdout pipe: %v", err)
|
||||
}
|
||||
|
||||
// Capture stderr for debugging
|
||||
var stderrBuf bytes.Buffer
|
||||
cmd.Stderr = &stderrBuf
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
cancel()
|
||||
t.Fatalf("Failed to start MCP server: %v", err)
|
||||
}
|
||||
|
||||
return &mcpSession{
|
||||
cmd: cmd,
|
||||
stdin: stdin,
|
||||
stdout: bufio.NewReader(stdout),
|
||||
stderr: &stderrBuf,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// startMCPSessionWithDestructive starts an MCP server with --allow-destructive flag.
|
||||
func startMCPSessionWithDestructive(t *testing.T, binary string, args ...string) *mcpSession {
|
||||
t.Helper()
|
||||
allArgs := append([]string{"--allow-destructive"}, args...)
|
||||
return startMCPSession(t, binary, allArgs...)
|
||||
}
|
||||
|
||||
// sendRequest sends a JSON-RPC request and returns the response (30s timeout).
|
||||
func (s *mcpSession) sendRequest(t *testing.T, req MCPRequest) MCPResponse {
|
||||
t.Helper()
|
||||
return s.sendRequestWithTimeout(t, req, 30*time.Second)
|
||||
}
|
||||
|
||||
// sendRequestWithTimeout sends a JSON-RPC request with a custom timeout.
|
||||
func (s *mcpSession) sendRequestWithTimeout(t *testing.T, req MCPRequest, timeout time.Duration) MCPResponse {
|
||||
t.Helper()
|
||||
|
||||
reqBytes, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal request: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Sending: %s", string(reqBytes))
|
||||
|
||||
if _, err := s.stdin.Write(append(reqBytes, '\n')); err != nil {
|
||||
t.Fatalf("Failed to write request: %v", err)
|
||||
}
|
||||
|
||||
// Read response with timeout
|
||||
responseChan := make(chan string, 1)
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
line, err := s.stdout.ReadString('\n')
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
responseChan <- line
|
||||
}()
|
||||
|
||||
select {
|
||||
case line := <-responseChan:
|
||||
t.Logf("Received: %s", strings.TrimSpace(line))
|
||||
var resp MCPResponse
|
||||
if err := json.Unmarshal([]byte(line), &resp); err != nil {
|
||||
t.Fatalf("Failed to unmarshal response: %v\nResponse: %s", err, line)
|
||||
}
|
||||
return resp
|
||||
case err := <-errChan:
|
||||
t.Fatalf("Failed to read response: %v", err)
|
||||
return MCPResponse{}
|
||||
case <-time.After(timeout):
|
||||
t.Fatalf("Timeout waiting for MCP response after %v", timeout)
|
||||
return MCPResponse{}
|
||||
}
|
||||
}
|
||||
|
||||
// callTool invokes an MCP tool and returns the response (30s timeout).
|
||||
func (s *mcpSession) callTool(t *testing.T, id int, toolName string, args map[string]interface{}) MCPResponse {
|
||||
t.Helper()
|
||||
return s.callToolWithTimeout(t, id, toolName, args, 30*time.Second)
|
||||
}
|
||||
|
||||
// callToolWithTimeout invokes an MCP tool with a custom timeout.
|
||||
func (s *mcpSession) callToolWithTimeout(t *testing.T, id int, toolName string, args map[string]interface{}, timeout time.Duration) MCPResponse {
|
||||
t.Helper()
|
||||
|
||||
return s.sendRequestWithTimeout(t, MCPRequest{
|
||||
JSONRPC: "2.0",
|
||||
ID: id,
|
||||
Method: "tools/call",
|
||||
Params: map[string]interface{}{
|
||||
"name": toolName,
|
||||
"arguments": args,
|
||||
},
|
||||
}, timeout)
|
||||
}
|
||||
|
||||
// close terminates the MCP session.
|
||||
func (s *mcpSession) close() {
|
||||
s.cancel()
|
||||
_ = s.cmd.Wait()
|
||||
}
|
||||
|
||||
// getStderr returns any captured stderr output (useful for debugging failures).
|
||||
func (s *mcpSession) getStderr() string {
|
||||
if s.stderr == nil {
|
||||
return ""
|
||||
}
|
||||
return s.stderr.String()
|
||||
}
|
||||
|
||||
// initialize sends the MCP initialize request and returns the response.
|
||||
func (s *mcpSession) initialize(t *testing.T, id int) MCPResponse {
|
||||
t.Helper()
|
||||
return s.sendRequest(t, MCPRequest{
|
||||
JSONRPC: "2.0",
|
||||
ID: id,
|
||||
Method: "initialize",
|
||||
Params: map[string]interface{}{
|
||||
"protocolVersion": "2024-11-05",
|
||||
"capabilities": map[string]interface{}{},
|
||||
"clientInfo": map[string]interface{}{"name": "test", "version": "1.0"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestMCP_Initialize tests the MCP initialization handshake.
|
||||
func TestMCP_Initialize(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
resp := session.initialize(t, 1)
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Initialize failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if _, ok := result["capabilities"]; !ok {
|
||||
t.Error("Response missing capabilities")
|
||||
}
|
||||
if _, ok := result["serverInfo"]; !ok {
|
||||
t.Error("Response missing serverInfo")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_ToolsList_ReadOnly tests that tools/list returns only safe tools in read-only mode.
|
||||
func TestMCP_ToolsList_ReadOnly(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.sendRequest(t, MCPRequest{JSONRPC: "2.0", ID: 2, Method: "tools/list"})
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("tools/list failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Tools []struct{ Name string `json:"name"` } `json:"tools"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
toolNames := make(map[string]bool)
|
||||
for _, tool := range result.Tools {
|
||||
toolNames[tool.Name] = true
|
||||
}
|
||||
|
||||
if !toolNames["check_kubeshark_status"] {
|
||||
t.Error("Missing expected tool: check_kubeshark_status")
|
||||
}
|
||||
if toolNames["start_kubeshark"] || toolNames["stop_kubeshark"] {
|
||||
t.Error("Destructive tools should not be available in read-only mode")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_ToolsList_WithDestructive tests that tools/list includes destructive tools when flag is set.
|
||||
func TestMCP_ToolsList_WithDestructive(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSessionWithDestructive(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.sendRequest(t, MCPRequest{JSONRPC: "2.0", ID: 2, Method: "tools/list"})
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("tools/list failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Tools []struct{ Name string `json:"name"` } `json:"tools"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
toolNames := make(map[string]bool)
|
||||
for _, tool := range result.Tools {
|
||||
toolNames[tool.Name] = true
|
||||
}
|
||||
|
||||
for _, expected := range []string{"check_kubeshark_status", "start_kubeshark", "stop_kubeshark"} {
|
||||
if !toolNames[expected] {
|
||||
t.Errorf("Missing expected tool: %s", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_CheckKubesharkStatus_NotRunning tests check_kubeshark_status when Kubeshark is not running.
|
||||
func TestMCP_CheckKubesharkStatus_NotRunning(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
cleanupKubeshark(t, binary)
|
||||
|
||||
session := startMCPSession(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.callTool(t, 2, "check_kubeshark_status", nil)
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("check_kubeshark_status failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Content []struct{ Text string `json:"text"` } `json:"content"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if len(result.Content) == 0 || (!strings.Contains(result.Content[0].Text, "not running") && !strings.Contains(result.Content[0].Text, "NOT")) {
|
||||
t.Errorf("Expected 'not running' status")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_StartKubeshark tests the start_kubeshark tool.
|
||||
func TestMCP_StartKubeshark(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping in short mode")
|
||||
}
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
cleanupKubeshark(t, binary)
|
||||
t.Cleanup(func() { cleanupKubeshark(t, binary) })
|
||||
|
||||
session := startMCPSessionWithDestructive(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.callToolWithTimeout(t, 2, "start_kubeshark", nil, 3*time.Minute)
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("start_kubeshark failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
if !isKubesharkRunning(t) {
|
||||
t.Error("Kubeshark should be running after start_kubeshark")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_StartKubeshark_WithoutFlag tests that start_kubeshark fails without --allow-destructive.
|
||||
func TestMCP_StartKubeshark_WithoutFlag(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.callTool(t, 2, "start_kubeshark", nil)
|
||||
|
||||
var result struct {
|
||||
Content []struct{ Text string `json:"text"` } `json:"content"`
|
||||
IsError bool `json:"isError"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if !result.IsError {
|
||||
t.Error("Expected isError=true without --allow-destructive")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_StopKubeshark tests the stop_kubeshark tool.
|
||||
func TestMCP_StopKubeshark(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping in short mode")
|
||||
}
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
|
||||
session := startMCPSessionWithDestructive(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 0)
|
||||
|
||||
// Start Kubeshark if not running
|
||||
if !isKubesharkRunning(t) {
|
||||
resp := session.callToolWithTimeout(t, 1, "start_kubeshark", nil, 2*time.Minute)
|
||||
if resp.Error != nil {
|
||||
t.Skipf("Could not start Kubeshark: %v", resp.Error.Message)
|
||||
}
|
||||
}
|
||||
|
||||
resp := session.callToolWithTimeout(t, 2, "stop_kubeshark", nil, 2*time.Minute)
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("stop_kubeshark failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
if isKubesharkRunning(t) {
|
||||
t.Error("Kubeshark should not be running after stop_kubeshark")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_StopKubeshark_WithoutFlag tests that stop_kubeshark fails without --allow-destructive.
|
||||
func TestMCP_StopKubeshark_WithoutFlag(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.callTool(t, 2, "stop_kubeshark", nil)
|
||||
|
||||
var result struct {
|
||||
IsError bool `json:"isError"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if !result.IsError {
|
||||
t.Error("Expected isError=true without --allow-destructive")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_FullLifecycle tests the complete lifecycle: check -> start -> check -> stop -> check
|
||||
func TestMCP_FullLifecycle(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping in short mode")
|
||||
}
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
cleanupKubeshark(t, binary)
|
||||
|
||||
session := startMCPSessionWithDestructive(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
|
||||
// Check -> Start -> Check -> Stop -> Check
|
||||
if resp := session.callTool(t, 2, "check_kubeshark_status", nil); resp.Error != nil {
|
||||
t.Fatalf("Initial status check failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
if resp := session.callToolWithTimeout(t, 3, "start_kubeshark", nil, 3*time.Minute); resp.Error != nil {
|
||||
t.Fatalf("Start failed: %s", resp.Error.Message)
|
||||
}
|
||||
if err := waitForKubesharkReady(t, binary, startupTimeout); err != nil {
|
||||
t.Fatalf("Kubeshark did not become ready: %v", err)
|
||||
}
|
||||
|
||||
if resp := session.callTool(t, 4, "check_kubeshark_status", nil); resp.Error != nil {
|
||||
t.Fatalf("Status check after start failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
if resp := session.callToolWithTimeout(t, 5, "stop_kubeshark", nil, 2*time.Minute); resp.Error != nil {
|
||||
t.Fatalf("Stop failed: %s", resp.Error.Message)
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
if resp := session.callTool(t, 6, "check_kubeshark_status", nil); resp.Error != nil {
|
||||
t.Fatalf("Final status check failed: %s", resp.Error.Message)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_APIToolsRequireKubeshark tests that API tools return helpful errors when Kubeshark isn't running.
|
||||
func TestMCP_APIToolsRequireKubeshark(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
cleanupKubeshark(t, binary)
|
||||
|
||||
session := startMCPSession(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
|
||||
for i, tool := range []string{"list_workloads", "list_api_calls", "get_api_stats"} {
|
||||
resp := session.callTool(t, i+2, tool, nil)
|
||||
// Either error or helpful message is acceptable
|
||||
if resp.Error != nil {
|
||||
t.Logf("%s returned error (expected): %s", tool, resp.Error.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_SetFlags tests that --set flags are passed correctly.
|
||||
func TestMCP_SetFlags(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t), "--set", "tap.namespaces={default}")
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.sendRequest(t, MCPRequest{JSONRPC: "2.0", ID: 2, Method: "tools/list"})
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("tools/list failed with --set flags: %s", resp.Error.Message)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkMCP_CheckStatus benchmarks the check_kubeshark_status tool.
|
||||
func BenchmarkMCP_CheckStatus(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping benchmark in short mode")
|
||||
}
|
||||
if !hasKubernetesCluster() {
|
||||
b.Skip("Skipping: no Kubernetes cluster available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, getKubesharkBinary(b), "mcp")
|
||||
stdin, _ := cmd.StdinPipe()
|
||||
stdout, _ := cmd.StdoutPipe()
|
||||
reader := bufio.NewReader(stdout)
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
b.Fatalf("Failed to start MCP: %v", err)
|
||||
}
|
||||
defer func() { cancel(); _ = cmd.Wait() }()
|
||||
|
||||
// Initialize
|
||||
initReq, _ := json.Marshal(MCPRequest{
|
||||
JSONRPC: "2.0", ID: 0, Method: "initialize",
|
||||
Params: map[string]interface{}{
|
||||
"protocolVersion": "2024-11-05",
|
||||
"capabilities": map[string]interface{}{},
|
||||
"clientInfo": map[string]interface{}{"name": "bench", "version": "1.0"},
|
||||
},
|
||||
})
|
||||
_, _ = stdin.Write(append(initReq, '\n'))
|
||||
_, _ = reader.ReadString('\n')
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := json.Marshal(MCPRequest{
|
||||
JSONRPC: "2.0", ID: i + 1, Method: "tools/call",
|
||||
Params: map[string]interface{}{"name": "check_kubeshark_status", "arguments": map[string]interface{}{}},
|
||||
})
|
||||
if _, err := stdin.Write(append(req, '\n')); err != nil {
|
||||
b.Fatalf("Write failed: %v", err)
|
||||
}
|
||||
if _, err := reader.ReadString('\n'); err != nil {
|
||||
b.Fatalf("Read failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package connect
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -12,7 +11,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -90,39 +88,6 @@ func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
|
||||
}
|
||||
}
|
||||
|
||||
type postRegexRequest struct {
|
||||
Regex string `json:"regex"`
|
||||
Namespaces []string `json:"namespaces"`
|
||||
}
|
||||
|
||||
func (connector *Connector) PostRegexToHub(regex string, namespaces []string) {
|
||||
postRegexUrl := fmt.Sprintf("%s/pods/regex", connector.url)
|
||||
|
||||
payload := postRegexRequest{
|
||||
Regex: regex,
|
||||
Namespaces: namespaces,
|
||||
}
|
||||
|
||||
if payloadMarshalled, err := json.Marshal(payload); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to marshal the pod regex:")
|
||||
} else {
|
||||
ok := false
|
||||
for !ok {
|
||||
var resp *http.Response
|
||||
if resp, err = utils.Post(postRegexUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
|
||||
if _, ok := err.(*url.Error); ok {
|
||||
break
|
||||
}
|
||||
log.Warn().Err(err).Msg("Failed sending the pod regex to Hub. Retrying...")
|
||||
} else {
|
||||
log.Debug().Str("regex", regex).Strs("namespaces", namespaces).Msg("Reported pod regex to Hub:")
|
||||
return
|
||||
}
|
||||
time.Sleep(DefaultSleep)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type postLicenseRequest struct {
|
||||
License string `json:"license"`
|
||||
}
|
||||
@@ -154,195 +119,6 @@ func (connector *Connector) PostLicense(license string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (connector *Connector) PostLicenseSingle(license string) {
|
||||
postLicenseUrl := fmt.Sprintf("%s/license", connector.url)
|
||||
|
||||
payload := postLicenseRequest{
|
||||
License: license,
|
||||
}
|
||||
|
||||
if payloadMarshalled, err := json.Marshal(payload); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to marshal the payload:")
|
||||
} else {
|
||||
var resp *http.Response
|
||||
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
|
||||
log.Warn().Err(err).Msg("Failed sending the license to Hub.")
|
||||
} else {
|
||||
log.Debug().Str("license", license).Msg("Reported license to Hub:")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (connector *Connector) PostEnv(env map[string]interface{}) {
|
||||
if len(env) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
postEnvUrl := fmt.Sprintf("%s/scripts/env", connector.url)
|
||||
|
||||
if envMarshalled, err := json.Marshal(env); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to marshal the env:")
|
||||
} else {
|
||||
ok := false
|
||||
for !ok {
|
||||
var resp *http.Response
|
||||
if resp, err = utils.Post(postEnvUrl, "application/json", bytes.NewBuffer(envMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
|
||||
if _, ok := err.(*url.Error); ok {
|
||||
break
|
||||
}
|
||||
log.Warn().Err(err).Msg("Failed sending the scripting environment variables to Hub. Retrying...")
|
||||
} else {
|
||||
log.Debug().Interface("env", env).Msg("Reported scripting environment variables to Hub:")
|
||||
return
|
||||
}
|
||||
time.Sleep(DefaultSleep)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (connector *Connector) PostScript(script *misc.Script) (index int64, err error) {
|
||||
postScriptUrl := fmt.Sprintf("%s/scripts", connector.url)
|
||||
|
||||
var scriptMarshalled []byte
|
||||
if scriptMarshalled, err = json.Marshal(script); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to marshal the script:")
|
||||
} else {
|
||||
ok := false
|
||||
for !ok {
|
||||
var resp *http.Response
|
||||
if resp, err = utils.Post(postScriptUrl, "application/json", bytes.NewBuffer(scriptMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
|
||||
if _, ok := err.(*url.Error); ok {
|
||||
break
|
||||
}
|
||||
log.Warn().Err(err).Msg("Failed creating script Hub:")
|
||||
} else {
|
||||
|
||||
var j map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&j)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
val, ok := j["index"]
|
||||
if !ok {
|
||||
err = errors.New("Response does not contain `key` field!")
|
||||
return
|
||||
}
|
||||
|
||||
index = int64(val.(float64))
|
||||
|
||||
log.Debug().Int("index", int(index)).Interface("script", script).Msg("Created script on Hub:")
|
||||
return
|
||||
}
|
||||
time.Sleep(DefaultSleep)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (connector *Connector) PutScript(script *misc.Script, index int64) (err error) {
|
||||
putScriptUrl := fmt.Sprintf("%s/scripts/%d", connector.url, index)
|
||||
|
||||
var scriptMarshalled []byte
|
||||
if scriptMarshalled, err = json.Marshal(script); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to marshal the script:")
|
||||
} else {
|
||||
ok := false
|
||||
for !ok {
|
||||
client := &http.Client{}
|
||||
|
||||
var req *http.Request
|
||||
req, err = http.NewRequest(http.MethodPut, putScriptUrl, bytes.NewBuffer(scriptMarshalled))
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("License-Key", config.Config.License)
|
||||
|
||||
var resp *http.Response
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if _, ok := err.(*url.Error); ok {
|
||||
break
|
||||
}
|
||||
log.Warn().Err(err).Msg("Failed updating script on Hub:")
|
||||
} else {
|
||||
log.Debug().Int("index", int(index)).Interface("script", script).Msg("Updated script on Hub:")
|
||||
return
|
||||
}
|
||||
time.Sleep(DefaultSleep)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (connector *Connector) DeleteScript(index int64) (err error) {
|
||||
deleteScriptUrl := fmt.Sprintf("%s/scripts/%d", connector.url, index)
|
||||
|
||||
ok := false
|
||||
for !ok {
|
||||
client := &http.Client{}
|
||||
|
||||
var req *http.Request
|
||||
req, err = http.NewRequest(http.MethodDelete, deleteScriptUrl, nil)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("License-Key", config.Config.License)
|
||||
|
||||
var resp *http.Response
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if _, ok := err.(*url.Error); ok {
|
||||
break
|
||||
}
|
||||
log.Warn().Err(err).Msg("Failed deleting script on Hub:")
|
||||
} else {
|
||||
log.Debug().Int("index", int(index)).Msg("Deleted script on Hub:")
|
||||
return
|
||||
}
|
||||
time.Sleep(DefaultSleep)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (connector *Connector) PostScriptDone() {
|
||||
postScripDonetUrl := fmt.Sprintf("%s/scripts/done", connector.url)
|
||||
|
||||
ok := false
|
||||
var err error
|
||||
for !ok {
|
||||
var resp *http.Response
|
||||
if resp, err = utils.Post(postScripDonetUrl, "application/json", nil, connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
|
||||
if _, ok := err.(*url.Error); ok {
|
||||
break
|
||||
}
|
||||
log.Warn().Err(err).Msg("Failed sending the POST scripts done to Hub. Retrying...")
|
||||
} else {
|
||||
log.Debug().Msg("Reported POST scripts done to Hub.")
|
||||
return
|
||||
}
|
||||
time.Sleep(DefaultSleep)
|
||||
}
|
||||
}
|
||||
|
||||
func (connector *Connector) PostPcapsMerge(out *os.File) {
|
||||
postEnvUrl := fmt.Sprintf("%s/pcaps/merge", connector.url)
|
||||
|
||||
|
||||
139
kubernetes/config.go
Normal file
139
kubernetes/config.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/rs/zerolog/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
SUFFIX_SECRET = "secret"
|
||||
SUFFIX_CONFIG_MAP = "config-map"
|
||||
SECRET_LICENSE = "LICENSE"
|
||||
CONFIG_POD_REGEX = "POD_REGEX"
|
||||
CONFIG_NAMESPACES = "NAMESPACES"
|
||||
CONFIG_EXCLUDED_NAMESPACES = "EXCLUDED_NAMESPACES"
|
||||
CONFIG_SCRIPTING_ENV = "SCRIPTING_ENV"
|
||||
CONFIG_INGRESS_ENABLED = "INGRESS_ENABLED"
|
||||
CONFIG_INGRESS_HOST = "INGRESS_HOST"
|
||||
CONFIG_PROXY_FRONT_PORT = "PROXY_FRONT_PORT"
|
||||
CONFIG_AUTH_ENABLED = "AUTH_ENABLED"
|
||||
CONFIG_AUTH_TYPE = "AUTH_TYPE"
|
||||
CONFIG_AUTH_SAML_IDP_METADATA_URL = "AUTH_SAML_IDP_METADATA_URL"
|
||||
CONFIG_SCRIPTING_SCRIPTS = "SCRIPTING_SCRIPTS"
|
||||
CONFIG_SCRIPTING_ACTIVE_SCRIPTS = "SCRIPTING_ACTIVE_SCRIPTS"
|
||||
CONFIG_PCAP_DUMP_ENABLE = "PCAP_DUMP_ENABLE"
|
||||
CONFIG_TIME_INTERVAL = "TIME_INTERVAL"
|
||||
CONFIG_MAX_TIME = "MAX_TIME"
|
||||
CONFIG_MAX_SIZE = "MAX_SIZE"
|
||||
)
|
||||
|
||||
func SetSecret(provider *Provider, key string, value string) (updated bool, err error) {
|
||||
var secret *v1.Secret
|
||||
secret, err = provider.clientSet.CoreV1().Secrets(config.Config.Tap.Release.Namespace).Get(context.TODO(), SELF_RESOURCES_PREFIX+SUFFIX_SECRET, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if secret.StringData[key] != value {
|
||||
updated = true
|
||||
}
|
||||
secret.Data[key] = []byte(value)
|
||||
|
||||
_, err = provider.clientSet.CoreV1().Secrets(config.Config.Tap.Release.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{})
|
||||
if err == nil {
|
||||
if updated {
|
||||
log.Info().Str("secret", key).Str("value", value).Msg("Updated:")
|
||||
}
|
||||
} else {
|
||||
log.Error().Str("secret", key).Err(err).Send()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func GetConfig(provider *Provider, key string) (value string, err error) {
|
||||
var configMap *v1.ConfigMap
|
||||
configMap, err = provider.clientSet.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Get(context.TODO(), SELF_RESOURCES_PREFIX+SUFFIX_CONFIG_MAP, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
value = configMap.Data[key]
|
||||
return
|
||||
}
|
||||
|
||||
func SetConfig(provider *Provider, key string, value string) (updated bool, err error) {
|
||||
var configMap *v1.ConfigMap
|
||||
configMap, err = provider.clientSet.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Get(context.TODO(), SELF_RESOURCES_PREFIX+SUFFIX_CONFIG_MAP, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if configMap.Data[key] != value {
|
||||
updated = true
|
||||
}
|
||||
configMap.Data[key] = value
|
||||
|
||||
_, err = provider.clientSet.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{})
|
||||
if err == nil {
|
||||
if updated {
|
||||
log.Info().
|
||||
Str("config", key).
|
||||
Str("value", func() string {
|
||||
if len(value) > 10 {
|
||||
return value[:10]
|
||||
}
|
||||
return value
|
||||
}()).
|
||||
Int("length", len(value)).
|
||||
Msg("Updated. Printing only 10 first characters of value:")
|
||||
}
|
||||
} else {
|
||||
log.Error().Str("config", key).Err(err).Send()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ConfigGetScripts(provider *Provider) (scripts map[int64]misc.ConfigMapScript, err error) {
|
||||
var data string
|
||||
data, err = GetConfig(provider, CONFIG_SCRIPTING_SCRIPTS)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(data), &scripts)
|
||||
return
|
||||
}
|
||||
|
||||
func IsActiveScript(provider *Provider, title string) bool {
|
||||
configActiveScripts, err := GetConfig(provider, CONFIG_SCRIPTING_ACTIVE_SCRIPTS)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(configActiveScripts, title)
|
||||
}
|
||||
|
||||
func DeleteActiveScriptByTitle(provider *Provider, title string) (err error) {
|
||||
configActiveScripts, err := GetConfig(provider, CONFIG_SCRIPTING_ACTIVE_SCRIPTS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
activeScripts := strings.Split(configActiveScripts, ",")
|
||||
|
||||
idx := slices.Index(activeScripts, title)
|
||||
if idx != -1 {
|
||||
activeScripts = slices.Delete(activeScripts, idx, idx+1)
|
||||
_, err = SetConfig(provider, CONFIG_SCRIPTING_ACTIVE_SCRIPTS, strings.Join(activeScripts, ","))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,11 +1,12 @@
|
||||
package kubernetes
|
||||
|
||||
const (
|
||||
SelfResourcesPrefix = "kubeshark-"
|
||||
FrontPodName = SelfResourcesPrefix + "front"
|
||||
SELF_RESOURCES_PREFIX = "kubeshark-"
|
||||
FrontPodName = SELF_RESOURCES_PREFIX + "front"
|
||||
FrontServiceName = FrontPodName
|
||||
HubPodName = SelfResourcesPrefix + "hub"
|
||||
HubPodName = SELF_RESOURCES_PREFIX + "hub"
|
||||
HubServiceName = HubPodName
|
||||
K8sAllNamespaces = ""
|
||||
MinKubernetesServerVersion = "1.16.0"
|
||||
AppLabelKey = "app.kubeshark.com/app"
|
||||
)
|
||||
|
||||
192
kubernetes/cp.go
Normal file
192
kubernetes/cp.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
|
||||
func CopyFromPod(ctx context.Context, provider *Provider, pod v1.Pod, srcPath string, dstPath string) error {
|
||||
const containerName = "sniffer"
|
||||
cmdArr := []string{"tar", "cf", "-", srcPath}
|
||||
req := provider.clientSet.CoreV1().RESTClient().
|
||||
Post().
|
||||
Namespace(pod.Namespace).
|
||||
Resource("pods").
|
||||
Name(pod.Name).
|
||||
SubResource("exec").
|
||||
VersionedParams(&v1.PodExecOptions{
|
||||
Container: containerName,
|
||||
Command: cmdArr,
|
||||
Stdin: true,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
TTY: false,
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
exec, err := remotecommand.NewSPDYExecutor(&provider.clientConfig, "POST", req.URL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reader, outStream := io.Pipe()
|
||||
errReader, errStream := io.Pipe()
|
||||
go logErrors(errReader, pod)
|
||||
go func() {
|
||||
defer outStream.Close()
|
||||
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
|
||||
Stdin: os.Stdin,
|
||||
Stdout: outStream,
|
||||
Stderr: errStream,
|
||||
Tty: false,
|
||||
})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("pod", pod.Name).Msg("SPDYExecutor:")
|
||||
}
|
||||
}()
|
||||
|
||||
prefix := getPrefix(srcPath)
|
||||
prefix = path.Clean(prefix)
|
||||
prefix = stripPathShortcuts(prefix)
|
||||
dstPath = path.Join(dstPath, path.Base(prefix))
|
||||
err = untarAll(reader, dstPath, prefix)
|
||||
// fo(reader)
|
||||
return err
|
||||
}
|
||||
|
||||
// func fo(fi io.Reader) {
|
||||
// fo, err := os.Create("output.tar")
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
|
||||
// // make a buffer to keep chunks that are read
|
||||
// buf := make([]byte, 1024)
|
||||
// for {
|
||||
// // read a chunk
|
||||
// n, err := fi.Read(buf)
|
||||
// if err != nil && err != io.EOF {
|
||||
// panic(err)
|
||||
// }
|
||||
// if n == 0 {
|
||||
// break
|
||||
// }
|
||||
|
||||
// // write a chunk
|
||||
// if _, err := fo.Write(buf[:n]); err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
func logErrors(reader io.Reader, pod v1.Pod) {
|
||||
r := bufio.NewReader(reader)
|
||||
for {
|
||||
msg, _, err := r.ReadLine()
|
||||
log.Warn().Str("pod", pod.Name).Str("msg", string(msg)).Msg("SPDYExecutor:")
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Error().Err(err).Send()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func untarAll(reader io.Reader, destDir, prefix string) error {
|
||||
tarReader := tar.NewReader(reader)
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(header.Name, prefix) {
|
||||
return fmt.Errorf("tar contents corrupted")
|
||||
}
|
||||
|
||||
mode := header.FileInfo().Mode()
|
||||
destFileName := filepath.Join(destDir, header.Name[len(prefix):])
|
||||
|
||||
baseName := filepath.Dir(destFileName)
|
||||
if err := os.MkdirAll(baseName, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if header.FileInfo().IsDir() {
|
||||
if err := os.MkdirAll(destFileName, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
evaledPath, err := filepath.EvalSymlinks(baseName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
linkname := header.Linkname
|
||||
|
||||
if !filepath.IsAbs(linkname) {
|
||||
_ = filepath.Join(evaledPath, linkname)
|
||||
}
|
||||
|
||||
if err := os.Symlink(linkname, destFileName); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
outFile, err := os.Create(destFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer outFile.Close()
|
||||
if _, err := io.Copy(outFile, tarReader); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := outFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPrefix(file string) string {
|
||||
return strings.TrimLeft(file, "/")
|
||||
}
|
||||
|
||||
func stripPathShortcuts(p string) string {
|
||||
newPath := p
|
||||
trimmed := strings.TrimPrefix(newPath, "../")
|
||||
|
||||
for trimmed != newPath {
|
||||
newPath = trimmed
|
||||
trimmed = strings.TrimPrefix(newPath, "../")
|
||||
}
|
||||
|
||||
// trim leftover {".", ".."}
|
||||
if newPath == "." || newPath == ".." {
|
||||
newPath = ""
|
||||
}
|
||||
|
||||
if len(newPath) > 0 && string(newPath[0]) == "/" {
|
||||
return newPath[1:]
|
||||
}
|
||||
|
||||
return newPath
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
@@ -8,12 +9,14 @@ import (
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/kubeshark/kubeshark/misc"
|
||||
"github.com/kubeshark/kubeshark/semver"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/tanqiangyes/grep-go/reader"
|
||||
core "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -142,7 +145,7 @@ func (provider *Provider) ListPodsByAppLabel(ctx context.Context, namespaces str
|
||||
return pods.Items, err
|
||||
}
|
||||
|
||||
func (provider *Provider) GetPodLogs(ctx context.Context, namespace string, podName string, containerName string) (string, error) {
|
||||
func (provider *Provider) GetPodLogs(ctx context.Context, namespace string, podName string, containerName string, grep string) (string, error) {
|
||||
podLogOpts := core.PodLogOptions{Container: containerName}
|
||||
req := provider.clientSet.CoreV1().Pods(namespace).GetLogs(podName, &podLogOpts)
|
||||
podLogs, err := req.Stream(ctx)
|
||||
@@ -154,8 +157,26 @@ func (provider *Provider) GetPodLogs(ctx context.Context, namespace string, podN
|
||||
if _, err = io.Copy(buf, podLogs); err != nil {
|
||||
return "", fmt.Errorf("error copy information from podLogs to buf, ns: %s, pod: %s, %w", namespace, podName, err)
|
||||
}
|
||||
str := buf.String()
|
||||
return str, nil
|
||||
|
||||
if grep != "" {
|
||||
finder, err := reader.NewFinder(grep, true, true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
read, err := reader.NewStdReader(bufio.NewReader(buf), []reader.Finder{finder})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
read.Run()
|
||||
result := read.Result()[0]
|
||||
|
||||
log.Info().Str("namespace", namespace).Str("pod", podName).Str("container", containerName).Int("lines", len(result.Lines)).Str("grep", grep).Send()
|
||||
return strings.Join(result.MatchString, "\n"), nil
|
||||
} else {
|
||||
log.Info().Str("namespace", namespace).Str("pod", podName).Str("container", containerName).Send()
|
||||
return buf.String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *Provider) GetNamespaceEvents(ctx context.Context, namespace string) (string, error) {
|
||||
@@ -206,12 +227,28 @@ func (provider *Provider) GetKubernetesVersion() (*semver.SemVersion, error) {
|
||||
return &serverVersionSemVer, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) GetNamespaces() []string {
|
||||
func (provider *Provider) GetNamespaces() (namespaces []string) {
|
||||
if len(config.Config.Tap.Namespaces) > 0 {
|
||||
return utils.Unique(config.Config.Tap.Namespaces)
|
||||
namespaces = utils.Unique(config.Config.Tap.Namespaces)
|
||||
} else {
|
||||
return []string{K8sAllNamespaces}
|
||||
namespaceList, err := provider.clientSet.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
log.Error().Err(err).Send()
|
||||
return
|
||||
}
|
||||
|
||||
for _, ns := range namespaceList.Items {
|
||||
namespaces = append(namespaces, ns.Name)
|
||||
}
|
||||
}
|
||||
|
||||
namespaces = utils.Diff(namespaces, config.Config.Tap.ExcludedNamespaces)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (provider *Provider) GetClientSet() *kubernetes.Clientset {
|
||||
return provider.clientSet
|
||||
}
|
||||
|
||||
func getClientSet(config *rest.Config) (*kubernetes.Clientset, error) {
|
||||
|
||||
@@ -72,6 +72,10 @@ func GetProxyOnPort(port uint16) string {
|
||||
return fmt.Sprintf("http://%s:%d", config.Config.Tap.Proxy.Host, port)
|
||||
}
|
||||
|
||||
func GetHubUrl() string {
|
||||
return fmt.Sprintf("%s/api", GetProxyOnPort(config.Config.Tap.Proxy.Front.Port))
|
||||
}
|
||||
|
||||
func getRerouteHttpHandlerSelfAPI(proxyHandler http.Handler, selfNamespace string, selfServiceName string) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
@@ -102,7 +106,7 @@ func getRerouteHttpHandlerSelfStatic(proxyHandler http.Handler, selfNamespace st
|
||||
}
|
||||
|
||||
func NewPortForward(kubernetesProvider *Provider, namespace string, podRegex *regexp.Regexp, srcPort uint16, dstPort uint16, ctx context.Context) (*portforward.PortForwarder, error) {
|
||||
pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, namespace, map[string]string{"app.kubeshark.co/app": "hub"})
|
||||
pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, namespace, map[string]string{AppLabelKey: "front"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(pods) == 0 {
|
||||
@@ -134,6 +138,7 @@ func NewPortForward(kubernetesProvider *Provider, namespace string, podRegex *re
|
||||
go func() {
|
||||
if err = forwarder.ForwardPorts(); err != nil {
|
||||
log.Error().Err(err).Msg("While Kubernetes port-forwarding!")
|
||||
log.Info().Str("command", fmt.Sprintf("kubectl port-forward -n %s service/kubeshark-front 8899:80", config.Config.Tap.Release.Namespace)).Msg("Please try running:")
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -23,13 +23,12 @@ kubectl delete clusterrolebinding kubeshark-cluster-role-binding
|
||||
kubectl delete clusterrole kubeshark-cluster-role
|
||||
```
|
||||
|
||||
## Accesing
|
||||
## Accessing
|
||||
|
||||
Do the port forwarding:
|
||||
|
||||
```shell
|
||||
kubectl port-forward -n kubeshark service/kubeshark-hub 8898:80 & \
|
||||
kubectl port-forward -n kubeshark service/kubeshark-front 8899:80
|
||||
kubectl port-forward service/kubeshark-front 8899:80
|
||||
```
|
||||
|
||||
Visit [localhost:8899](http://localhost:8899)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
25
manifests/prometheus/kube_prometheus_stack.yaml
Normal file
25
manifests/prometheus/kube_prometheus_stack.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
grafana:
|
||||
additionalDataSources: []
|
||||
prometheus:
|
||||
prometheusSpec:
|
||||
scrapeInterval: 10s
|
||||
evaluationInterval: 30s
|
||||
additionalScrapeConfigs: |
|
||||
- job_name: 'kubeshark-worker-metrics'
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
target_label: pod
|
||||
- source_labels: [__meta_kubernetes_pod_node_name]
|
||||
target_label: node
|
||||
- source_labels: [__meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: ^metrics$
|
||||
- source_labels: [__address__, __meta_kubernetes_endpoint_port_number]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?
|
||||
replacement: $1:49100
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: info@kubeshark.co
|
||||
email: info@kubeshark.com
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod-key
|
||||
solvers:
|
||||
|
||||
204
mcp/README.md
Normal file
204
mcp/README.md
Normal file
@@ -0,0 +1,204 @@
|
||||
# Kubeshark MCP Server
|
||||
|
||||
[Kubeshark](https://kubeshark.com) MCP (Model Context Protocol) server enables AI assistants like Claude Desktop, Cursor, and other MCP-compatible clients to query real-time Kubernetes network traffic.
|
||||
|
||||
## Features
|
||||
|
||||
- **L7 API Traffic Analysis**: Query HTTP, gRPC, Redis, Kafka, DNS transactions
|
||||
- **L4 Network Flows**: View TCP/UDP flows with traffic statistics
|
||||
- **Cluster Management**: Start/stop Kubeshark deployments (with safety controls)
|
||||
- **PCAP Snapshots**: Create and export network captures
|
||||
- **Built-in Prompts**: Pre-configured prompts for common analysis tasks
|
||||
|
||||
## Installation
|
||||
|
||||
### 1. Install Kubeshark CLI
|
||||
|
||||
```bash
|
||||
# macOS
|
||||
brew install kubeshark
|
||||
|
||||
# Linux
|
||||
sh <(curl -Ls https://kubeshark.com/install)
|
||||
|
||||
# Windows (PowerShell)
|
||||
choco install kubeshark
|
||||
```
|
||||
|
||||
Or download from [GitHub Releases](https://github.com/kubeshark/kubeshark/releases).
|
||||
|
||||
### 2. Configure Claude Desktop
|
||||
|
||||
Add to your Claude Desktop configuration:
|
||||
|
||||
**macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json`
|
||||
**Windows**: `%APPDATA%\Claude\claude_desktop_config.json`
|
||||
|
||||
#### URL Mode (Recommended for existing deployments)
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp", "--url", "https://kubeshark.example.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Proxy Mode (Requires kubectl access)
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp", "--kubeconfig", "/path/to/.kube/config"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
or:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### With Destructive Operations
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp", "--allow-destructive", "--kubeconfig", "/path/to/.kube/config"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Generate Configuration
|
||||
|
||||
Use the CLI to generate configuration:
|
||||
|
||||
```bash
|
||||
kubeshark mcp --mcp-config --url https://kubeshark.example.com
|
||||
```
|
||||
|
||||
## Available Tools
|
||||
|
||||
### Traffic Analysis (All Modes)
|
||||
|
||||
| Tool | Description |
|
||||
|------|-------------|
|
||||
| `list_workloads` | List pods, services, namespaces with observed traffic |
|
||||
| `list_api_calls` | Query L7 API transactions with KFL filtering |
|
||||
| `get_api_call` | Get detailed info about a specific API call |
|
||||
| `get_api_stats` | Get aggregated API statistics |
|
||||
| `list_l4_flows` | List L4 (TCP/UDP) network flows |
|
||||
| `get_l4_flow_summary` | Get L4 connectivity summary |
|
||||
| `list_snapshots` | List all PCAP snapshots |
|
||||
| `create_snapshot` | Create a new PCAP snapshot |
|
||||
| `get_dissection_status` | Check L7 protocol parsing status |
|
||||
| `enable_dissection` | Enable L7 protocol dissection |
|
||||
| `disable_dissection` | Disable L7 protocol dissection |
|
||||
|
||||
### Cluster Management (Proxy Mode Only)
|
||||
|
||||
| Tool | Description | Requires |
|
||||
|------|-------------|----------|
|
||||
| `check_kubeshark_status` | Check if Kubeshark is running | - |
|
||||
| `start_kubeshark` | Deploy Kubeshark to cluster | `--allow-destructive` |
|
||||
| `stop_kubeshark` | Remove Kubeshark from cluster | `--allow-destructive` |
|
||||
|
||||
## Available Prompts
|
||||
|
||||
| Prompt | Description |
|
||||
|--------|-------------|
|
||||
| `analyze_traffic` | Analyze API traffic patterns and identify issues |
|
||||
| `find_errors` | Find and summarize API errors and failures |
|
||||
| `trace_request` | Trace a request path through microservices |
|
||||
| `show_topology` | Show service communication topology |
|
||||
| `latency_analysis` | Analyze latency patterns and identify slow endpoints |
|
||||
| `security_audit` | Audit traffic for security concerns |
|
||||
| `compare_traffic` | Compare traffic patterns between time periods |
|
||||
| `debug_connection` | Debug connectivity issues between services |
|
||||
|
||||
## Example Conversations
|
||||
|
||||
```
|
||||
User: Show me all HTTP 500 errors in the last hour
|
||||
|
||||
Claude: I'll query the API traffic for 500 errors.
|
||||
[Calling list_api_calls with kfl="http and response.status == 500"]
|
||||
|
||||
Found 12 HTTP 500 errors:
|
||||
1. POST /api/checkout -> payment-service (500)
|
||||
Time: 10:23:45 | Latency: 2340ms
|
||||
...
|
||||
```
|
||||
|
||||
```
|
||||
User: What services are communicating with the database?
|
||||
|
||||
Claude: Let me check the L4 flows to the database.
|
||||
[Calling list_l4_flows with dst_filter="postgres"]
|
||||
|
||||
Found 5 services connecting to postgres:5432:
|
||||
- orders-service: 456KB transferred
|
||||
- users-service: 123KB transferred
|
||||
...
|
||||
```
|
||||
|
||||
## CLI Options
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--url` | Direct URL to Kubeshark Hub |
|
||||
| `--kubeconfig` | Path to kubeconfig file |
|
||||
| `--allow-destructive` | Enable start/stop operations |
|
||||
| `--list-tools` | List available tools and exit |
|
||||
| `--mcp-config` | Print Claude Desktop config JSON |
|
||||
|
||||
## KFL (Kubeshark Filter Language)
|
||||
|
||||
Query traffic using KFL syntax:
|
||||
|
||||
```
|
||||
# HTTP requests to a specific path
|
||||
http and request.path == "/api/users"
|
||||
|
||||
# Errors only
|
||||
response.status >= 400
|
||||
|
||||
# Specific source pod
|
||||
src.pod.name == "frontend-.*"
|
||||
|
||||
# Multiple conditions
|
||||
http and src.namespace == "default" and response.status == 500
|
||||
```
|
||||
|
||||
## MCP Registry
|
||||
|
||||
Kubeshark is published to the [MCP Registry](https://registry.modelcontextprotocol.io/) automatically on each release.
|
||||
|
||||
The `server.json` in this directory is a reference file. The actual registry metadata (version, SHA256 hashes) is auto-generated during the release workflow. See [`.github/workflows/release.yml`](../.github/workflows/release.yml) for details.
|
||||
|
||||
## Links
|
||||
|
||||
- [Documentation](https://docs.kubeshark.com/en/mcp)
|
||||
- [GitHub](https://github.com/kubeshark/kubeshark)
|
||||
- [Website](https://kubeshark.com)
|
||||
- [MCP Registry](https://registry.modelcontextprotocol.io/)
|
||||
|
||||
## License
|
||||
|
||||
Apache-2.0
|
||||
178
mcp/server.json
Normal file
178
mcp/server.json
Normal file
@@ -0,0 +1,178 @@
|
||||
{
|
||||
"$schema": "https://static.modelcontextprotocol.io/schemas/2025-12-11/server.schema.json",
|
||||
"name": "io.github.kubeshark/mcp",
|
||||
"displayName": "Kubeshark",
|
||||
"description": "Real-time Kubernetes network traffic visibility and API analysis for HTTP, gRPC, Redis, Kafka, DNS.",
|
||||
"icon": "https://raw.githubusercontent.com/kubeshark/assets/refs/heads/master/logo/ico/icon.ico",
|
||||
"repository": {
|
||||
"url": "https://github.com/kubeshark/kubeshark",
|
||||
"source": "github"
|
||||
},
|
||||
"homepage": "https://kubeshark.com",
|
||||
"license": "Apache-2.0",
|
||||
"version": "AUTO_GENERATED_AT_RELEASE",
|
||||
"authors": [
|
||||
{
|
||||
"name": "Kubeshark",
|
||||
"url": "https://kubeshark.com"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"kubernetes",
|
||||
"networking",
|
||||
"observability",
|
||||
"debugging",
|
||||
"security"
|
||||
],
|
||||
"_note": "version and packages.fileSha256 are auto-generated at release time by .github/workflows/release.yml",
|
||||
"tags": ["kubernetes", "network", "traffic", "api", "http", "grpc", "kafka", "redis", "dns", "pcap", "wireshark", "tcpdump", "observability", "debugging", "microservices"],
|
||||
"packages": [
|
||||
{
|
||||
"registryType": "mcpb",
|
||||
"identifier": "https://github.com/kubeshark/kubeshark/releases/download/vX.Y.Z/kubeshark-mcp_darwin_arm64.mcpb",
|
||||
"fileSha256": "AUTO_GENERATED",
|
||||
"transport": { "type": "stdio" }
|
||||
},
|
||||
{
|
||||
"registryType": "mcpb",
|
||||
"identifier": "https://github.com/kubeshark/kubeshark/releases/download/vX.Y.Z/kubeshark-mcp_darwin_amd64.mcpb",
|
||||
"fileSha256": "AUTO_GENERATED",
|
||||
"transport": { "type": "stdio" }
|
||||
},
|
||||
{
|
||||
"registryType": "mcpb",
|
||||
"identifier": "https://github.com/kubeshark/kubeshark/releases/download/vX.Y.Z/kubeshark-mcp_linux_arm64.mcpb",
|
||||
"fileSha256": "AUTO_GENERATED",
|
||||
"transport": { "type": "stdio" }
|
||||
},
|
||||
{
|
||||
"registryType": "mcpb",
|
||||
"identifier": "https://github.com/kubeshark/kubeshark/releases/download/vX.Y.Z/kubeshark-mcp_linux_amd64.mcpb",
|
||||
"fileSha256": "AUTO_GENERATED",
|
||||
"transport": { "type": "stdio" }
|
||||
},
|
||||
{
|
||||
"registryType": "mcpb",
|
||||
"identifier": "https://github.com/kubeshark/kubeshark/releases/download/vX.Y.Z/kubeshark-mcp_windows_amd64.mcpb",
|
||||
"fileSha256": "AUTO_GENERATED",
|
||||
"transport": { "type": "stdio" }
|
||||
}
|
||||
],
|
||||
"tools": [
|
||||
{
|
||||
"name": "check_kubeshark_status",
|
||||
"description": "Check if Kubeshark is currently running in the cluster. Read-only operation.",
|
||||
"mode": "proxy"
|
||||
},
|
||||
{
|
||||
"name": "start_kubeshark",
|
||||
"description": "Deploy Kubeshark to the Kubernetes cluster. Requires --allow-destructive flag.",
|
||||
"mode": "proxy",
|
||||
"destructive": true
|
||||
},
|
||||
{
|
||||
"name": "stop_kubeshark",
|
||||
"description": "Remove Kubeshark from the Kubernetes cluster. Requires --allow-destructive flag.",
|
||||
"mode": "proxy",
|
||||
"destructive": true
|
||||
},
|
||||
{
|
||||
"name": "list_workloads",
|
||||
"description": "List pods, services, namespaces, and nodes with observed L7 traffic.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "list_api_calls",
|
||||
"description": "Query L7 API transactions (HTTP, gRPC, Redis, Kafka, DNS) with KFL filtering.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "get_api_call",
|
||||
"description": "Get detailed information about a specific API call including headers and body.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "get_api_stats",
|
||||
"description": "Get aggregated API statistics and metrics.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "list_l4_flows",
|
||||
"description": "List L4 (TCP/UDP) network flows with traffic statistics.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "get_l4_flow_summary",
|
||||
"description": "Get L4 connectivity summary including top talkers and cross-namespace traffic.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "list_snapshots",
|
||||
"description": "List all PCAP snapshots.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "create_snapshot",
|
||||
"description": "Create a new PCAP snapshot of captured traffic.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "get_dissection_status",
|
||||
"description": "Check L7 protocol parsing status.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "enable_dissection",
|
||||
"description": "Enable L7 protocol dissection.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "disable_dissection",
|
||||
"description": "Disable L7 protocol dissection.",
|
||||
"mode": "all"
|
||||
}
|
||||
],
|
||||
"prompts": [
|
||||
{ "name": "analyze_traffic", "description": "Analyze API traffic patterns and identify issues" },
|
||||
{ "name": "find_errors", "description": "Find and summarize API errors and failures" },
|
||||
{ "name": "trace_request", "description": "Trace a request path through microservices" },
|
||||
{ "name": "show_topology", "description": "Show service communication topology" },
|
||||
{ "name": "latency_analysis", "description": "Analyze latency patterns and identify slow endpoints" },
|
||||
{ "name": "security_audit", "description": "Audit traffic for security concerns" },
|
||||
{ "name": "compare_traffic", "description": "Compare traffic patterns between time periods" },
|
||||
{ "name": "debug_connection", "description": "Debug connectivity issues between services" }
|
||||
],
|
||||
"configuration": {
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "Direct URL to Kubeshark Hub (e.g., https://kubeshark.example.com). When set, connects directly without kubectl/proxy.",
|
||||
"examples": ["https://kubeshark.example.com", "http://localhost:8899"]
|
||||
},
|
||||
"kubeconfig": {
|
||||
"type": "string",
|
||||
"description": "Path to kubeconfig file for proxy mode.",
|
||||
"examples": ["~/.kube/config", "/path/to/.kube/config"]
|
||||
},
|
||||
"allow-destructive": {
|
||||
"type": "boolean",
|
||||
"description": "Enable destructive operations (start_kubeshark, stop_kubeshark). Default: false for safety.",
|
||||
"default": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"modes": {
|
||||
"url": {
|
||||
"description": "Connect directly to an existing Kubeshark deployment via URL. Cluster management tools are disabled.",
|
||||
"args": ["mcp", "--url", "${url}"]
|
||||
},
|
||||
"proxy": {
|
||||
"description": "Connect via kubectl port-forward. Requires kubeconfig access to the cluster.",
|
||||
"args": ["mcp", "--kubeconfig", "${kubeconfig}"]
|
||||
},
|
||||
"proxy-destructive": {
|
||||
"description": "Proxy mode with destructive operations enabled.",
|
||||
"args": ["mcp", "--kubeconfig", "${kubeconfig}", "--allow-destructive"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,10 +10,10 @@ var (
|
||||
Software = "Kubeshark"
|
||||
Program = "kubeshark"
|
||||
Description = "The API Traffic Analyzer for Kubernetes"
|
||||
Website = "https://kubeshark.co"
|
||||
Email = "info@kubeshark.co"
|
||||
Ver = "0.0"
|
||||
Branch = "develop"
|
||||
Website = "https://kubeshark.com"
|
||||
Email = "support@kubeshark.com"
|
||||
Ver = "0.0.0"
|
||||
Branch = "master"
|
||||
GitCommitHash = "" // this var is overridden using ldflags in makefile when building
|
||||
BuildTimestamp = "" // this var is overridden using ldflags in makefile when building
|
||||
RBACVersion = "v1"
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath string) error {
|
||||
podExactRegex := regexp.MustCompile("^" + kubernetes.SelfResourcesPrefix)
|
||||
func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath string, grep string) error {
|
||||
podExactRegex := regexp.MustCompile("^" + kubernetes.SELF_RESOURCES_PREFIX)
|
||||
pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.Tap.Release.Namespace})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -34,7 +34,7 @@ func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath strin
|
||||
|
||||
for _, pod := range pods {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
logs, err := provider.GetPodLogs(ctx, pod.Namespace, pod.Name, container.Name)
|
||||
logs, err := provider.GetPodLogs(ctx, pod.Namespace, pod.Name, container.Name, grep)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to get logs!")
|
||||
continue
|
||||
|
||||
@@ -10,9 +10,24 @@ import (
|
||||
)
|
||||
|
||||
type Script struct {
|
||||
Path string `json:"path"`
|
||||
Title string `json:"title"`
|
||||
Code string `json:"code"`
|
||||
Path string `json:"path"`
|
||||
Title string `json:"title"`
|
||||
Code string `json:"code"`
|
||||
Active bool `json:"active"`
|
||||
}
|
||||
|
||||
type ConfigMapScript struct {
|
||||
Title string `json:"title"`
|
||||
Code string `json:"code"`
|
||||
Active bool `json:"active"`
|
||||
}
|
||||
|
||||
func (s *Script) ConfigMap() ConfigMapScript {
|
||||
return ConfigMapScript{
|
||||
Title: s.Title,
|
||||
Code: s.Code,
|
||||
Active: s.Active,
|
||||
}
|
||||
}
|
||||
|
||||
func ReadScriptFile(path string) (script *Script, err error) {
|
||||
@@ -46,9 +61,10 @@ func ReadScriptFile(path string) (script *Script, err error) {
|
||||
}
|
||||
|
||||
script = &Script{
|
||||
Path: path,
|
||||
Title: title,
|
||||
Code: code,
|
||||
Path: path,
|
||||
Title: title,
|
||||
Code: code,
|
||||
Active: false,
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
@@ -8,10 +8,21 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
X_KUBESHARK_CAPTURE_HEADER_KEY = "X-Kubeshark-Capture"
|
||||
X_KUBESHARK_CAPTURE_HEADER_IGNORE_VALUE = "ignore"
|
||||
)
|
||||
|
||||
// Get - When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func Get(url string, client *http.Client) (*http.Response, error) {
|
||||
return checkError(client.Get(url))
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
AddIgnoreCaptureHeader(req)
|
||||
|
||||
return checkError(client.Do(req))
|
||||
}
|
||||
|
||||
// Post - When err is nil, resp always contains a non-nil resp.Body.
|
||||
@@ -21,6 +32,7 @@ func Post(url, contentType string, body io.Reader, client *http.Client, licenseK
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
AddIgnoreCaptureHeader(req)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("License-Key", licenseKey)
|
||||
|
||||
@@ -51,3 +63,7 @@ func checkError(response *http.Response, errInOperation error) (*http.Response,
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func AddIgnoreCaptureHeader(req *http.Request) {
|
||||
req.Header.Set(X_KUBESHARK_CAPTURE_HEADER_KEY, X_KUBESHARK_CAPTURE_HEADER_IGNORE_VALUE)
|
||||
}
|
||||
|
||||
@@ -2,29 +2,15 @@ package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
"github.com/goccy/go-yaml"
|
||||
)
|
||||
|
||||
func PrettyYaml(data interface{}) (result string, err error) {
|
||||
var marshalled []byte
|
||||
marshalled, err = json.Marshal(data)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var unmarshalled interface{}
|
||||
err = json.Unmarshal(marshalled, &unmarshalled)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
encoder := yaml.NewEncoder(buffer)
|
||||
encoder.SetIndent(2)
|
||||
encoder := yaml.NewEncoder(buffer, yaml.Indent(2))
|
||||
|
||||
err = encoder.Encode(unmarshalled)
|
||||
err = encoder.Encode(data)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -37,3 +37,18 @@ func EqualStringSlices(slice1 []string, slice2 []string) bool {
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Diff returns the elements in `a` that aren't in `b`.
|
||||
func Diff(a, b []string) []string {
|
||||
mb := make(map[string]struct{}, len(b))
|
||||
for _, x := range b {
|
||||
mb[x] = struct{}{}
|
||||
}
|
||||
var diff []string
|
||||
for _, x := range a {
|
||||
if _, found := mb[x]; !found {
|
||||
diff = append(diff, x)
|
||||
}
|
||||
}
|
||||
return diff
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user