Compare commits

...

247 Commits

Author SHA1 Message Date
Amir Malka
e1f7e06d45 Merge branch 'master' of github.com:armosec/kubescape into scan-workload 2023-07-20 19:39:17 +03:00
David Wertenteil
fcbcb53995 Merge pull request #1276 from amirmalka/time-based-cached-policies
Time-based cached policies
2023-07-20 16:56:39 +03:00
YiscahLevySilas1
17c43fd366 support related objects (#1272)
* support related objects

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update pkg versions

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update go mod

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* fix test

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* fix test

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* only add ids of related resource

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* fixes following review

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* add test for processRule

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

---------

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-07-20 16:23:58 +03:00
YiscahLevySilas1
d44746cb85 allow adding a fw name when running all (#1286)
* allow adding a fw name when running all

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

clean code

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* fix following review

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

---------

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-07-20 14:07:38 +03:00
Matthias Bertschy
61dac76369 Merge pull request #1283 from kubescape/remove-website
Remove website folder
2023-07-19 16:29:34 +02:00
Amir Malka
bacf15eeb8 cache control inputs
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-07-18 15:56:16 +03:00
Daniel Grunberger
ab634debe4 add cmd
Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>
2023-07-17 15:28:41 +03:00
Craig Box
0a5af235e3 Remove website folder
Signed-off-by: Craig Box <craigb@armosec.io>
2023-07-17 20:09:34 +12:00
David Wertenteil
6fec02caff Merge pull request #1281 from XDRAGON2002/issue_1280
fix: stuck spinner
2023-07-17 09:27:26 +03:00
DRAGON
067655d003 fix: stuck spinner
Signed-off-by: DRAGON <anantvijay3@gmail.com>
2023-07-14 01:24:46 +05:30
Amir Malka
e470fce6ed initial implementation of OpenTelemetry metrics collection (#1269)
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-07-10 14:22:26 +03:00
Amir Malka
ea3172eda6 time-based cached policies
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-07-10 10:54:56 +03:00
David Wertenteil
f060d02fbc Merge pull request #1267 from dwertent/submit-untracked-files
feat(file scanning): Submit untracked files
2023-07-06 09:40:21 +03:00
David Wertenteil
43975ddafe Merge pull request #1266 from batazor/patch-1
Update grafana-kubescape-dashboard.json
2023-07-06 09:40:06 +03:00
David Wertenteil
abe0477249 Merge pull request #1265 from dwertent/update-submit-message
Update submit message
2023-07-06 09:39:04 +03:00
David Wertenteil
5f197eb27c submit file scanning
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-06 09:25:34 +03:00
Victor Login
84b43d2b03 Update grafana-kubescape-dashboard.json
Signed-off-by: Victor Login <batazor@evrone.com>
2023-07-05 19:03:27 +02:00
David Wertenteil
b149e00d1a Merge pull request #1264 from dwertent/deprecate-image-controls
core(adaptors): Ignore adaptors when credentials are not set
2023-07-05 17:48:12 +03:00
David Wertenteil
f98b394ec2 Merge pull request #1254 from kubescape/rbac-fix
initialize ns in case we don't have one in YAML
2023-07-05 17:47:42 +03:00
David Wertenteil
492b08c995 Merge pull request #1259 from kubescape/update_regolibrary_version
Update regolibrary version
2023-07-05 17:46:35 +03:00
David Wertenteil
8fa15688fb Merge pull request #1260 from dwertent/deprecate-host-scanner
Deprecated host-scanner from CLI
2023-07-05 17:46:12 +03:00
David Wertenteil
1a3e140e56 Merge pull request #1261 from Oshratn/master
English language fix on Kubescape output
2023-07-05 12:59:19 +03:00
David Wertenteil
72f6988bb4 update messaging based on Oshrats comments
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-05 10:40:22 +03:00
David Wertenteil
780be45392 update submit message
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-05 10:38:59 +03:00
David Wertenteil
676771e8b3 deprecate the login flags
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-05 10:16:09 +03:00
David Wertenteil
06f5c24b7d ignore adaptors if credentials are not set
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-05 10:13:21 +03:00
David Wertenteil
c17415d6e9 deprecate host-scan-yaml flag
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-05 09:02:21 +03:00
David Wertenteil
b5bed7bfbb remove unused file
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-05 08:59:57 +03:00
Oshrat Nir
3c38021f7c Changed Assistance Remediation to Assited Remediation
Signed-off-by: Oshrat Nir <oshratn@gmail.com>
2023-07-04 13:13:50 +03:00
David Wertenteil
8989cc1679 Deprecated host-scanner
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-04 09:43:10 +03:00
kooomix
0ab9c32715 fxi test jsons 2023-07-02 13:29:28 +03:00
kooomix
868db91801 update regolibrary to v1.0.286-rc.0 2023-07-02 13:25:37 +03:00
Craig Box
aa0fe21a2e Merge pull request #1257 from dwertent/update-armo-landing-page
docs(Installation): update installation steps
2023-06-27 08:22:58 +01:00
David Wertenteil
1b181a47ef Update docs/providers/armo.md
Co-authored-by: Craig Box <craig.box@gmail.com>
2023-06-27 07:42:42 +03:00
David Wertenteil
30487dcd0e Update docs/providers/armo.md
Co-authored-by: Craig Box <craig.box@gmail.com>
2023-06-27 07:42:33 +03:00
David Wertenteil
46ad069fe5 Updating overview
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-06-26 13:54:00 +03:00
David Wertenteil
05d5de17d5 fixed wording
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-06-26 09:49:00 +03:00
David Wertenteil
6bc79458b0 Split the installation command from scanning
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-06-26 09:46:13 +03:00
David Wertenteil
ab85ca2b28 update installation steps
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-06-26 09:40:21 +03:00
Matthias Bertschy
99938ecbee initialize ns in case we don't have one in YAML
Signed-off-by: Matthias Bertschy <matthias.bertschy@gmail.com>
2023-06-19 07:47:29 +02:00
Matthias Bertschy
e2f8e273ad Merge pull request #1252 from testwill/ioutil
chore: remove refs to deprecated io/ioutil
2023-06-15 08:00:05 +02:00
guoguangwu
be63e1ef7c chore: remove refs to deprecated io/ioutil
Signed-off-by: guoguangwu <guoguangwu@magic-shield.com>
2023-06-14 16:33:24 +08:00
guangwu
5e5b9d564c fix: CVE-2023-28840 CVE-2023-28841 CVE-2023-28842 CVE-2022-41723 etc. (#1221)
* fix: CVE-2023-28840 CVE-2023-28841 CVE-2023-28842 CVE-2022-41723GHSA-vvpx-j8f3-3w6h CVE-2022-23524 CVE-2022-23525 CVE-2022-23526 CVE-2022-36055 CVE-2023-25165

Signed-off-by: guoguangwu <guoguangwu@magic-shield.com>

* restore go.sum

Signed-off-by: David Wertenteil <dwertent@armosec.io>

---------

Signed-off-by: guoguangwu <guoguangwu@magic-shield.com>
Signed-off-by: David Wertenteil <dwertent@armosec.io>
Co-authored-by: David Wertenteil <dwertent@armosec.io>
2023-06-13 11:39:25 +03:00
YiscahLevySilas1
8ee72895b9 Fix statuses - Manual review and Requires configuration (#1251)
* fix statuses - req. review, configurations, manual

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update opa-utils version

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update opa-utils version

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update opa-utils version

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* use const for inner info

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

---------

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-06-12 10:38:35 +03:00
Nitish Chauhan
6cefada215 correcting the formating of the table in pdf output (#1244)
* correcting the formatting of the table in pdf output

Signed-off-by: ntishchauhan0022 <nitishchauhan0022@gmail.com>

* adding some starting unit tests

Signed-off-by: ntishchauhan0022 <nitishchauhan0022@gmail.com>

* resolving the mod error

Signed-off-by: ntishchauhan0022 <nitishchauhan0022@gmail.com>

---------

Signed-off-by: ntishchauhan0022 <nitishchauhan0022@gmail.com>
2023-06-04 15:21:07 +03:00
David Wertenteil
211ee487b3 core(metrics api): Update API default behavior (#1250)
* scan default frameworks

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* wip: update context

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* do not trigger host scan

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* adding unitests

Signed-off-by: David Wertenteil <dwertent@armosec.io>

---------

Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-06-04 15:12:31 +03:00
David Wertenteil
bbe46c9fab Merge pull request #1247 from kubescape/fix/remove-kubelet-command-line-endpoint
fix: remove deprecated endpoint
2023-05-31 17:17:09 +03:00
Alessio Greggi
ce7fde582c fix: update host-scanner version
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-05-31 14:14:29 +02:00
David Wertenteil
1c31e1f015 Merge pull request #1246 from dwertent/cli-updates
core(cmd): Minor CLI updates
2023-05-30 11:55:38 +03:00
Alessio Greggi
9e2fe607d8 fix: remove deprecated endpoint
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-05-30 10:50:31 +02:00
David Wertenteil
5a5ec9b641 wip: remove secretKey and clientID from list cmd
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-05-28 22:37:48 +03:00
David Wertenteil
24c608e204 wip: add example for exclude-namespaces flag
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-05-28 22:35:45 +03:00
David Wertenteil
ac43036b4a Merge pull request #1241 from kubescape/hostsensor-improvements
host-scanner improvements
2023-05-28 08:57:11 +03:00
David Wertenteil
03b89047f8 Merge pull request #1243 from anubhav06/update-go-git-url
update kubescape/go-git-url version
2023-05-28 08:46:22 +03:00
Anubhav Gupta
07a5c6488b update kubescape/go-git-url version
Signed-off-by: Anubhav Gupta <mail.anubhav06@gmail.com>
2023-05-26 18:13:39 +05:30
Alessio Greggi
c486b4fed7 feat: add log coupling for hostsensorutils
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-05-24 14:46:34 +02:00
Alessio Greggi
00c48d756d fix(hostsensorutils): add finalizers deletion
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-05-24 11:49:15 +02:00
Alessio Greggi
b49563ae8c fix(hostsensorutils): reduce periods of readiness probe
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-05-24 11:34:04 +02:00
Alessio Greggi
7840ecb5da fix: move host-scanner to kubescape namespace
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-05-24 09:45:12 +02:00
David Wertenteil
e151c5bf81 Merge pull request #1240 from amirmalka/memory-optimizations
bump opa-utils version for memory optimizations
2023-05-23 20:37:27 +03:00
Amir Malka
225545476c update opa-utils
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-05-23 19:18:45 +03:00
Amir Malka
987f97102d bump opa-utils version for memory optimizations
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-05-22 16:44:11 +03:00
Craig Box
7bffed2afe Merge pull request #1239 from yrs147/master 2023-05-18 16:39:25 +03:00
Hollow Man
3357713903 Add back new line at the end of the file
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-18 13:38:35 +03:00
Yash Raj Singh
efd48eab08 Removed instructions and added the wiki link
Co-authored-by: Hollow Man <hollowman@opensuse.org>

Signed-off-by: Yash Raj Singh <98258627+yrs147@users.noreply.github.com>
2023-05-18 13:38:27 +03:00
Yash Raj
231d9c231a Added instructions to setup kubescape locally
Signed-off-by: Yash Raj <yashraj14700728@gmail.com>
2023-05-18 13:34:13 +05:30
Craig Box
91e705a3eb Merge pull request #1238 from HollowMan6/macm1 2023-05-17 16:37:13 +03:00
Songlin Jiang
a92d573cb8 Fix downloading arm64 binary for kubescape
Signed-off-by: Songlin Jiang <songlin.jiang@csc.fi>
2023-05-17 15:34:32 +03:00
David Wertenteil
e8c72b9883 Merge pull request #1235 from kubescape/revert-1213-windows-latest
Revert "Deprecate kubescape-windows-latest"
2023-05-16 10:07:39 +03:00
David Wertenteil
d380b2cb00 Revert "Deprecate kubescape-windows-latest" 2023-05-16 10:03:41 +03:00
Yuval Leibovich
50b3d0f313 Merge pull request #1233 from kubescape/update-compliance-score-with-readme
updating readme file to support compliance
2023-05-15 17:31:40 +03:00
David Wertenteil
474b6d07ed Merge pull request #1234 from kubescape/timeout
start with a new context, extracting span from request
2023-05-15 16:47:25 +03:00
Matthias Bertschy
2cddc4b395 start with a new context, extracting span from request
Signed-off-by: Matthias Bertschy <matthias.bertschy@gmail.com>
2023-05-15 14:55:46 +02:00
Yuval Leibovich
b5fb355a22 updating readme file to support compliance 2023-05-15 15:31:14 +03:00
David Wertenteil
d1bc6d0190 Merge pull request #1213 from HollowMan6/windows-latest
Deprecate kubescape-windows-latest
2023-05-15 13:23:32 +03:00
Amir Malka
0a0ef10d50 Control parallelism of opa rule processing by env var (#1230)
* control parallelism of opa rule processing by env var

Signed-off-by: Amir Malka <amirm@armosec.io>

* go 1.20

Signed-off-by: Amir Malka <amirm@armosec.io>

* update go.mod go.sum

Signed-off-by: Amir Malka <amirm@armosec.io>

---------

Signed-off-by: Amir Malka <amirm@armosec.io>
2023-05-14 14:59:21 +03:00
David Wertenteil
4523dc8456 Merge pull request #1232 from amirmalka/update-golang-version
Update go version 1.19->1.20
2023-05-14 11:18:18 +03:00
Amir Malka
b26f83d0bd update go version 1.19->1.20
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-05-14 10:04:30 +03:00
David Wertenteil
9cc3053d74 Merge pull request #1214 from HollowMan6/dispatch
Add ref to workflow dispatch
2023-05-11 15:13:24 +01:00
Yuval Leibovich
84842a6a91 Merge pull request #1219 from kubescape/add-system-test
add compliance score system test
2023-05-08 10:26:00 +03:00
YiscahLevySilas1
aff8cc480e add compliance score system test
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-05-07 16:07:39 +03:00
David Wertenteil
7feea43421 Merge pull request #1216 from HollowMan6/install
Make powershell Windows installation user path available immediately
2023-05-03 10:58:47 +03:00
David Wertenteil
04ec32c9f4 Merge pull request #1218 from dwertent/hot-fix-2.3.0
fix(adaptors) Check response size
2023-05-03 10:05:17 +03:00
David Wertenteil
b805f22038 add test
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-05-03 08:58:12 +03:00
David Wertenteil
092f37a636 if the response is empty, return an empty string
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-05-03 08:39:59 +03:00
Hollow Man
9a2eb46f65 Make powershell Windows installation user path available immediately
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-02 00:23:01 +03:00
Hollow Man
c637c1a589 Add ref to workflow dispatch
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-01 22:07:50 +03:00
David Wertenteil
7609a4aa5d Merge pull request #1199 from HollowMan6/install
Update installation script
2023-05-01 21:53:16 +03:00
Hollow Man
75d31c22d9 Deprecate kubescape-windows-latest
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-01 17:38:46 +03:00
David Wertenteil
b93a97a8c8 Merge pull request #1186 from HollowMan6/dispatch
Invoke packaging workflow to update after release
2023-05-01 16:45:10 +03:00
David Wertenteil
88696ca233 Merge pull request #1169 from HollowMan6/exe
Add kubescape.exe to the release assets
2023-05-01 16:40:04 +03:00
David Wertenteil
87d94d16ff Merge pull request #1212 from kubescape/token
change basic auth username to x-token-auth
2023-05-01 14:56:36 +03:00
Hollow Man
1843bcdaf8 invoke only if the repository owner is kubescape
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-01 13:46:39 +03:00
Hollow Man
cdaff7ddbe Revert install.ps1 change, to update after release
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-01 13:24:40 +03:00
Hollow Man
ec7bc26f64 Add kubescape.exe to the release assets
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-01 13:24:35 +03:00
Matthias Bertschy
75b64d58f3 change basic auth username to x-token-auth
Signed-off-by: Matthias Bertschy <matthias.bertschy@gmail.com>
2023-05-01 10:55:07 +02:00
David Wertenteil
dce1d762c6 Merge pull request #1209 from kubescape/new-threshold-flag
Add compliance score to controls
2023-05-01 11:23:20 +03:00
YiscahLevySilas1
f3225855d0 rerun workflows
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-05-01 09:19:09 +03:00
David Wertenteil
5ae421dbc2 Merge pull request #1210 from HollowMan6/master
ci: update before install packages
2023-04-30 15:09:55 +03:00
Hollow Man
d4b75dcb0c ci: update before install packages
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-27 15:50:21 +00:00
YiscahLevySilas1
b7935276e3 Merge branch 'master' of github.com:kubescape/kubescape into new-threshold-flag
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-27 15:56:12 +03:00
YiscahLevySilas1
d6edd818b8 add compliance score to new field in controls for backward compatibility
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-27 15:53:47 +03:00
David Wertenteil
a73081c816 Merge pull request #1203 from kubescape/fix/remove-outdated-endpoints
fix: remove outdated enpoints
2023-04-27 11:23:18 +03:00
David Wertenteil
dd961b9e55 Merge pull request #1208 from kubescape/fix/hostsensor-http-probe-attributes
fix(hostsensorutils): fix indentation of probe attributes
2023-04-27 11:22:50 +03:00
Alessio Greggi
76ced13a26 fix(hostsensorutils): fix indentation of probe attributes
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-04-26 16:16:29 +02:00
YiscahLevySilas1
95e88f8581 add compliance-threshold, deprecate fail-threshold (#1197)
* add compliance-threshold, deprecate fail-threshold

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update opa-utils version

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update opa-utils version for fix in compliance score

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

---------

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-24 15:33:30 +03:00
David Wertenteil
5955247f01 Merge pull request #1207 from dwertent/fix-workflow
fix(workflow): Fix workflow
2023-04-23 13:26:48 +03:00
David Wertenteil
c0530b4f88 wip: fixed github actions
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-04-23 11:47:54 +03:00
David Wertenteil
c23d6a17cc wip: update fix command example
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-04-23 11:45:46 +03:00
David Wertenteil
d448de131f Merge pull request #1148 from HollowMan6/master
arm64 release binaries for CI and Krew
2023-04-23 09:48:05 +03:00
Alessio Greggi
b48c04da63 fix: remove outdated enpoints
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-04-21 19:47:24 +02:00
David Wertenteil
ecf770c756 Merge pull request #1189 from dwertent/hotfix-2.2.6-add_data
fix(submit): set default report time
2023-04-20 16:39:51 +03:00
Hollow Man
6e33f37aee Update installation script
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-20 13:05:51 +03:00
Hollow Man
03f792e968 Revert change to install.sh
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-20 11:21:34 +03:00
David Wertenteil
b017d77b86 Merge pull request #1184 from HollowMan6/sarif-fix
feat(sarif): add fix object in generated reports
2023-04-20 11:15:49 +03:00
Craig Box
2cde591180 Merge pull request #1196 from HollowMan6/doc
Move building instructions to wiki, add more installation instructions
2023-04-20 14:55:52 +12:00
YiscahLevySilas1
f25d573f32 update opa-utils version for fix in compliance score
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-19 18:34:10 +03:00
Hollow Man
ebf3e49f53 Update snap installation
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-18 02:02:43 +03:00
YiscahLevySilas1
acaf6e78da update opa-utils version
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-17 20:25:27 +03:00
YiscahLevySilas1
344e9188f6 add compliance-threshold, deprecate fail-threshold
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-17 16:08:38 +03:00
Hollow Man
3f69f06df1 Move Building to wiki and installation back to docs
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-17 14:10:47 +03:00
Hollow Man
e0b296c124 Move installation instructions to wiki
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-16 19:35:11 +03:00
David Wertenteil
108bbd8bc4 Merge pull request #1193 from suhasgumma/master
Fix: Empty Frameworks Column when listing controls
2023-04-16 09:13:38 +03:00
Hollow Man
5c1a41e920 nit
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-13 10:54:25 +03:00
Hollow Man
0b8d207615 Add more error check
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-13 10:54:25 +03:00
Hollow Man
539b6c51b9 Add unit testcase
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-13 10:54:25 +03:00
Hollow Man
19ca590e2f S1023: redundant break statement (gosimple)
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-13 10:54:25 +03:00
Hollow Man
4de50f82c0 feat(sarif): add fix object in generated reports
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-13 10:54:19 +03:00
Hollow Man
ab41d5dbf4 Invoke workflow to update github action
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-12 22:33:02 +03:00
Matthias Bertschy
fa6de6dc3f Merge pull request #1192 from Sindhuinti/Sindhuinti/broken-link
fix: broken link
2023-04-11 14:14:27 +02:00
Suhas Gumma
96e959c3b7 Fix: Empty Frameworks Column when listing controls
Signed-off-by: Suhas Gumma <suhasgumma2001@gmail.com>
2023-04-11 15:20:07 +05:30
Sindhu Inti
28fdee0dd2 fix: broken link
Signed-off-by: Sindhuinti <iamsindhuinti23@gmail.com>
2023-04-11 13:10:31 +05:30
Sindhu Inti
9ce25c45fe fix: broken link 2023-04-11 13:01:49 +05:30
MathoAvito
d44b9f7a31 Change wf (#1190)
* added coveralls coverage tests 

Signed-off-by: Matan Avital <matavital13@gmail.com>
2023-04-09 18:29:23 +03:00
David Wertenteil
c7af6266fd Merge pull request #1185 from HollowMan6/fix-changes
fix(fix): mixed up change summary list
2023-04-09 11:19:44 +02:00
David Wertenteil
91c13381b2 set default report time
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-04-09 11:15:06 +02:00
Hollow Man
30ad3adbb6 Invoke workflow to update after release
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 15:49:25 +03:00
Hollow Man
64e3b08641 fix(fix): mixed up change list
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 01:07:49 +03:00
Hollow Man
6d7a89bb74 Add ARM64 binary installation
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
e8d92ffd43 Resume test core pkg under ubuntu arm64
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
48a15e1a8d Disable multi-platform test with commits
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
d02f15ef6f merge pr scanner build into binary-build-and-e2e-tests
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
9327f70e1a Fix naming
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
c498026208 Disable core pkg test for ubuntu arm64
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
3238555df3 add cross compilation for ubuntu arm64
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
0c77d89bfc add cross compilation for mac m1
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
yuleib
875deb7ec3 adding compliance score updates (#1181)
Signed-off-by: Yuval Leibovich <yuvall@armosec.io>
2023-04-04 16:03:40 +03:00
David Wertenteil
eae234136b Merge pull request #1178 from YiscahLevySilas1/update-k8s-interface
update version k8s-interface for cloud resources
2023-04-03 13:54:02 +03:00
YiscahLevySilas1
93a35fffbd comment failing test because of many requests
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-03 10:45:51 +03:00
David Wertenteil
fc97b0ad19 Merge pull request #1179 from kubescape/change_wf
delete BUILD_AND_TEST_LOCAL_KUBESCAPE_CLI input for b-binary-build-an…
2023-04-03 10:26:56 +03:00
YiscahLevySilas1
9a3767ef72 update version k8s-interface for cloud resources
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-03 09:53:22 +03:00
Matan Shkalim
173eac552c delete BUILD_AND_TEST_LOCAL_KUBESCAPE_CLI input for b-binary-build-and-e2e-tests wf
Signed-off-by: Matan Shkalim <shekel8@gmail.com>
2023-04-03 07:40:09 +01:00
YiscahLevySilas1
9420fd5e79 update version k8s-interface for cloud resources
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-03 09:34:22 +03:00
Matthias Bertschy
eeda903c76 Merge pull request #1177 from kubescape/update-meeting
Add new meeting location
2023-04-03 07:27:08 +02:00
Craig Box
fd17a87788 Add new meeting location
Changed Zoom URL and added timezone calculator.

Signed-off-by: Craig Box <craigb@armosec.io>
2023-04-03 16:45:26 +12:00
David Wertenteil
1de14ce1e3 Merge pull request #1171 from kubescape/feat/add-progress-bar-during-cloud-resources-download
feat: add progress bar during cloud resources download
2023-04-02 13:53:44 +03:00
David Wertenteil
143d1bb601 Merge pull request #1161 from kubescape/change_wf
change trigger for wf
2023-04-02 13:51:54 +03:00
Alessio Greggi
feb39ed130 test: fix test with new function argument
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-03-28 16:39:00 +02:00
David Wertenteil
83363d68e6 Merge pull request #1170 from dwertent/fix-get-account-id
fix(config): Load account details
2023-03-28 17:19:56 +03:00
Alessio Greggi
f010364c98 feat: add progress bar during cloud resources download
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-03-28 16:10:55 +02:00
David Wertenteil
64b8f48469 clean code
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-28 16:54:02 +03:00
David Wertenteil
de8d365919 load account details
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-28 16:03:31 +03:00
David Wertenteil
db2259d3d0 Merge pull request #1167 from dwertent/update-host-scanner-tag
core(host-scanner): Update host scanner image tag
2023-03-26 22:59:12 +03:00
David Wertenteil
7b9ad26e8e update host scanner image tag
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-26 15:07:06 +03:00
Amir Malka
e35029934b updated createTenant path (#1166)
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-03-26 13:21:30 +03:00
Matthias Bertschy
181ebc27e1 Merge pull request #1154 from fredbi/refact/refacf-host-sensor-exports
refact(hostsensorutils): refactors host sensor exports
2023-03-25 09:56:33 +01:00
Frédéric BIDON
a090a296fa refact(hostsensorutils): unexported fields that don't need to be exposed
Also:
* declared scanner resources as an enum type
* replaced stdlib json, added uit tests for skipped resources
* unexported worker pool
* more unexported methods (i.e. everything that is not part of the interface)
* refact(core): clarified mock injection logic and added a few unit tests at the caller's (CLI init utils)

Signed-off-by: Frederic BIDON <fredbi@yahoo.com>
2023-03-25 09:37:24 +01:00
Matthias Bertschy
1e1a48bd9a Merge pull request #1064 from fredbi/perf/opaprocessor-process
perf(opaprocessor): run OPA rule compilation and evaluation in parallel
2023-03-24 15:38:14 +01:00
Matthias Bertschy
5923ce5703 Merge pull request #1147 from HollowMan6/install
Change installation path to ~/.kubescape/bin
2023-03-24 12:46:13 +01:00
Hollow Man
d2dcd29089 fix shellcheck warning and info
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-03-24 13:39:15 +02:00
Matthias Bertschy
8a40bab43a Merge pull request #1165 from fredbi/refact/test-utils
Refact(test utils): introduce internal/testutils
2023-03-24 12:38:14 +01:00
Frederic BIDON
dee3a10bac test(utils): introduced internal/testutils package to factorize testing utilities
Signed-off-by: Frederic BIDON <fredbi@yahoo.com>
Signed-off-by: Matthias Bertschy <matthias.bertschy@gmail.com>

Conflicts:
	core/pkg/hostsensorutils/hostsensordeploy_test.go
2023-03-24 11:15:25 +01:00
Matthias Bertschy
9e3ac4b0f7 Merge pull request #1118 from fredbi/chore/refact-kscloud-client
refact(getter): refactor the KS Cloud client
2023-03-24 11:01:31 +01:00
Matthias Bertschy
58f29523a8 Merge pull request #1141 from fredbi/refact/factorize-hostsensor-api-calls
refact(hostsensorutils): refactors the host sensor
2023-03-24 10:52:52 +01:00
Frédéric BIDON
5b62b0b749 addressed review from David: reverted on unconditional loop exit
Signed-off-by: Frédéric BIDON <fredbi@yahoo.com>
2023-03-23 16:56:37 +01:00
Frédéric BIDON
e4f34f6173 refact(host-sensor): refactors the host sensor
This PR factorizes the list of calls to the host-scanner API in a loop.

More godoc-friendly doc strings are added.

Signed-off-by: Frédéric BIDON <fredbi@yahoo.com>
2023-03-23 16:56:37 +01:00
Frédéric BIDON
4a9f26b27c perf(opaprocessor): run OPA rule compilation and evaluation in parallel
This parallelize the Process() portion of the OPA processor.

The main change is that called methods to evaluate a rule no longer
mutate the internal state of the opaprocessor and allocate maps (less
often, in larger chunks) that are merged at the end of the processing.

Signed-off-by: Frédéric BIDON <fredbi@yahoo.com>
2023-03-23 16:56:21 +01:00
Frederic BIDON
548955fc16 refact(getter): refactor the KS Cloud client
* Interfaces are unchanged

* Deprecated: low-level API funcs marked for deprecation:
  HttpPost, HttpGetter, HttpDelete (an augmented version of the KS Cloud
  client will expose the post report API, which is currently the sole
  use-case of low-level API)

* Doc: the package is now godoc-friendly

* Style & code layout:
  * listed all exposed types via aliases, for clarity/less confusing
    imports
  * unexported private types
  * factorized query param logic
  * factorized type <-> JSON using generic func & io.Reader
  * "utils" are now limited to a few common utility functions
  * centralized hard-coded strings as (unexported) constants
  * concision: use higher-level http definitions such as constants,
    cookie methods, etc
  * included type-safety guards to verify that interfaces are
    actually implemented by the exported types

* Tests: existing test assertions are unchanged
  * tests are beefed-up to assert proper authentication flow (token & cookie).
  * added unit tests for utility methods

* Perf:
  * unmarshalling API responses is now flowing without extraneous memory allocation via string representation
  * request headers are now passed withot extraneous map allocation
  * JSON operations are now fully supported by jsoniter (no longer use encoding/json)

* Changes in functionality:
  * the client is now fully extensible with KSCloudOption
  * use the option functor idiom to keep constructors short
  * methods that used to mute errors (i.e. return nil, nil) now bubble up errors
  * the captured cookie is now captured in full, not just its value
  (other cookie parameters returned are stored)
  * added a request/response dump option, for debugging
  * added support for SubmitReport and retrieval of UI url's
  * backported utm changes (reports use case)

Signed-off-by: Frederic BIDON <fredbi@yahoo.com>
2023-03-23 16:47:23 +01:00
David Wertenteil
ac2bc6c950 Merge to master - PR number: 1164 2023-03-23 12:49:52 +02:00
MathoAvito
ea27c619d4 Revert "added validation for if ORIGIN_TAG=null" 2023-03-23 12:47:42 +02:00
matanshk
e4150b2bb4 Merge pull request #1163 from kubescape/change-wf
added validation for if ORIGIN_TAG=null
2023-03-23 11:17:06 +02:00
Matan Avital
86c7215a72 added validation for if ORIGIN_TAG=null
Signed-off-by: Matan Avital <matavital13@gmail.com>
2023-03-23 11:16:03 +02:00
Hollow Man
5c24267ee9 check KUBESCAPE_EXEC is not empty before deletion
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-03-21 13:15:21 +02:00
Matan Shkalim
bb576610ff change concurrency in 00-pr-scanner
Signed-off-by: Matan Shkalim <shekel8@gmail.com>
2023-03-21 08:05:40 +00:00
Matan Shkalim
085be86197 remove merge action
Signed-off-by: Matan Shkalim <shekel8@gmail.com>
2023-03-21 08:01:59 +00:00
David Wertenteil
b4180b34e7 core(logs): Enhance logs (#1158)
* adding ks version

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* Initialize scanInfo

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* print if logger level is lower than warning

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* wip: scan default frameworks when scanning files

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* change print to log

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* wip: Add end-line after last log

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* wip: silent spinner when logger is warn

Signed-off-by: David Wertenteil <dwertent@armosec.io>

---------

Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-20 17:49:51 +02:00
Matan Shkalim
6a750671c3 change trigger for wf
Signed-off-by: Matan Shkalim <shekel8@gmail.com>
2023-03-20 15:40:52 +00:00
David Wertenteil
bb5fedc661 Merge to master - PR number: 1160 2023-03-20 17:36:17 +02:00
Matan Avital
678ef2b787 changed ks_branch to release
Signed-off-by: Matan Avital <matavital13@gmail.com>
2023-03-20 17:34:48 +02:00
David Wertenteil
8c238232a1 Merge to master - PR number: 1159 2023-03-20 17:25:56 +02:00
Matan Avital
2ea9e1a596 moved the output TEST_NAMES to wf-preparation job (was check-secret job) and added step export_tests..
Signed-off-by: Matan Avital <matavital13@gmail.com>
2023-03-20 17:22:52 +02:00
matanshk
e788d68f2c Merge pull request #1157 from kubescape/change-wf
Change wf
2023-03-20 14:12:28 +02:00
Matan Avital
62e3d3263d fixed syntax error
Signed-off-by: Matan Avital <matavital13@gmail.com>
2023-03-20 14:11:09 +02:00
Matan Avital
650d489c26 fixed syntax error
Signed-off-by: Matan Avital <matavital13@gmail.com>
2023-03-20 14:09:04 +02:00
matanshk
ea4914057e Merge pull request #1156 from kubescape/change-wf
added input to make the binary build and test dynamic
2023-03-20 13:49:50 +02:00
Matan Avital
100822f48d added input to make the binary build and test dynamic
Signed-off-by: Matan Avital <matavital13@gmail.com>
2023-03-20 13:45:31 +02:00
matanshk
a5f254bebd Merge pull request #1155 from kubescape/change-wf
added CHECKOUT_REPO input parameter
2023-03-19 18:22:12 +02:00
Matan Avital
e3d5a8c3c6 added CHECKOUT_REPO input parameter
Signed-off-by: Matan Avital <matavital13@gmail.com>
2023-03-19 18:19:48 +02:00
Matthias Bertschy
63ff0f5dc9 Merge pull request #1151 from docwhat/patch-1
fix references to kubectl in completion help
2023-03-18 21:55:38 +01:00
David Wertenteil
5173016a1e Merge pull request #1152 from dwertent/update-otel-events
fix(otel): Update otel events
2023-03-16 14:09:58 +02:00
David Wertenteil
4a95e29d5d Merge to master - PR number: 1150 2023-03-16 10:28:44 +02:00
David Wertenteil
d0b5c7c2c2 update host scanner image tag
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-16 09:45:12 +02:00
David Wertenteil
6671ac46f4 change failed to submit message
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-16 09:42:39 +02:00
David Wertenteil
28531859f3 Signed-off-by: David Wertenteil <dwertent@armosec.io>
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-15 21:26:10 +02:00
Christian Höltje
4ee209c1ea fix references to kubectl in completion help
Signed-off-by: Christian Höltje <docwhat@gerf.org>
2023-03-15 14:30:38 -04:00
David Wertenteil
4edeec146a Set scanning event
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-15 18:34:02 +02:00
David Wertenteil
ec4a098b1c replace error by warning
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-15 17:17:29 +02:00
David Wertenteil
a29fe367dc Added context to HandleResults
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-15 16:58:02 +02:00
Avraham Shalev
aceb4eb0de add dependencies to httphandler
Signed-off-by: Avraham Shalev <8184528+avrahams@users.noreply.github.com>
2023-03-15 14:49:47 +02:00
David Wertenteil
e7afe45706 Merge to master - PR number: 1149 2023-03-15 14:26:56 +02:00
Avraham Shalev
55ce7086d7 upgrade opa-utils and armo api
Signed-off-by: Avraham Shalev <8184528+avrahams@users.noreply.github.com>
2023-03-15 13:53:30 +02:00
Hollow Man
bb04e98d69 Add prompt for removing old way of installation
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-03-15 00:04:21 +02:00
Hollow Man
0ae4ef2244 Clean uninstall of old installation
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-03-14 22:23:29 +02:00
Hollow Man
f9e38fd6a2 Change installation path to ~/.kubescape/bin
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-03-14 21:16:25 +02:00
Amir Malka
106db84a66 bump go-logger (#1144)
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-03-14 10:00:08 +02:00
David Wertenteil
1930004e60 Merge to master - PR number: 1146 2023-03-14 08:17:49 +02:00
Craig Box
015476bf97 Update CONTRIBUTING.md
Fix the link to correcting the DCO.

Signed-off-by: Craig Box <craigb@armosec.io>
2023-03-14 16:33:26 +13:00
David Wertenteil
1e0b9563a1 Merge to master - PR number: 1129 2023-03-13 13:43:07 +02:00
Alessio Greggi
5aa56b1c0a feat: integrate support to retrieve eks policies
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-03-13 11:35:07 +01:00
David Wertenteil
fd92411593 Merge pull request #1140 from HollowMan6/master
ci(release): fix publishing krew plugin; add '.exe' extension to Windows binary
2023-03-13 10:42:54 +02:00
David Wertenteil
cb97a424fd Merge pull request #1139 from matthyx/fixcontext
initialize context in Prometheus handler
2023-03-12 16:37:50 +02:00
Hollow Man
2542692f25 Revert add '.exe' to Win release binary
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-03-12 11:58:43 +02:00
Hollow Man
640483e991 ci(release): fix publishing krew plugin; add .exe suffix to Win binary
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-03-12 00:39:34 +02:00
Matthias Bertschy
1004902f51 initialize context in Prometheus handler
Signed-off-by: Matthias Bertschy <matthias.bertschy@gmail.com>
2023-03-09 14:05:26 +01:00
Matthias Bertschy
3b9ce494f5 Merge pull request #1131 from fredbi/test/more-tests-report-receiver
test(reports): adds unit test to the report receiver
2023-03-08 16:56:51 +01:00
Matthias Bertschy
5a37045d9b Merge pull request #1138 from fredbi/test/unit-tests-hostsensorutils
test(hostsensorutils): added unit tests to the hostsensorutils package
2023-03-08 11:12:26 +01:00
Frederic BIDON
91af277a1c fixup unit test: error handling
Signed-off-by: Frederic BIDON <fredbi@yahoo.com>
2023-03-08 08:53:28 +01:00
Frederic BIDON
556962a7e1 test(hostsensorutils): added unit tests to the hostsensorutils package
This PR introduces a (limited) mock for the kubernetes client API.

Signed-off-by: Frederic BIDON <fredbi@yahoo.com>
2023-03-07 20:35:29 +01:00
Frederic BIDON
306da021db test(reports): adds unit test to the report receiver
Signed-off-by: Frederic BIDON <fredbi@yahoo.com>

replace mock

Signed-off-by: Daniel-GrunbergerCA@armosec.com
2023-03-07 19:59:31 +01:00
David Wertenteil
03b0147e39 Merge pull request #1130 from dwertent/update-utm-link-v2
docs(links): Update URLs
2023-03-06 14:08:25 +02:00
Matthias Bertschy
ff9652bd77 Merge pull request #1136 from fredbi/chore/linting-again
chore(linting): run another pass of linting with the rules already in place
2023-03-05 21:17:45 +01:00
Frederic BIDON
7174f49f87 chore(lintin): run another pass of linting with the rules already in place
Signed-off-by: Frederic BIDON <fredbi@yahoo.com>
2023-03-05 20:16:37 +01:00
David Wertenteil
7dfbbe7e39 Merge pull request #1133 from amirmalka/remove-otel-middleware-from-some-endpoints
Removed otel middleware from some APIs
2023-03-05 14:26:40 +02:00
Amir Malka
b3079df8ae removed otel middleware from some APIs
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-03-05 11:49:00 +02:00
David Wertenteil
0698c99241 wip: update UTMs & display UTM only on first scan
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-04 23:05:38 +02:00
David Wertenteil
2cda4864e7 wip: do not add message when account ID is empty
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-04 23:05:02 +02:00
David Wertenteil
c2b0e5c0a2 Do not display URL when message is empty
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-04 23:04:26 +02:00
David Wertenteil
6c54aff451 wip: removed unused code
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-04 22:46:20 +02:00
David Wertenteil
dea5649e01 wip: update link in docs
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-03-04 22:34:08 +02:00
Matthias Bertschy
9e6c9e0f65 Merge pull request #1127 from irLinja/master
refactor: update node scanner daemonset tolerations
2023-03-03 11:49:25 +01:00
Arash Haghighat
3dfd758a82 refactor: update node scanner daemonset tolerations
Signed-off-by: Arash Haghighat <arash@linja.pro>
2023-03-01 16:36:08 +01:00
David Wertenteil
0526f58657 Merge to master - PR number: 1121 2023-02-28 07:40:20 +02:00
Alessio Greggi
e419af6c03 ci: pin workflows versions to fixed commits
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-02-27 21:43:09 +01:00
Matthias Bertschy
03766ec0cd Merge pull request #1120 from alegrey91/fix/remove-hostnetwork-and-hostport-from-host-scanner-deployment
fix(hostsensorsutils): remove hostNetwork and hostPort from deployment
2023-02-27 19:12:05 +01:00
Alessio Greggi
39e2e34fc0 fix(hostsensorsutils): remove hostNet and hostPort from deployment
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-02-27 18:20:55 +01:00
David Wertenteil
245331b82a Merge pull request #1119 from amirmalka/added-cluster-name-to-otel-init
added clusterName to otel initialization
2023-02-26 19:27:14 +02:00
Amir Malka
cec4e5ca39 added clusterName to otel initialization
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-02-26 18:07:38 +02:00
168 changed files with 122828 additions and 3932 deletions

View File

@@ -17,7 +17,14 @@ runs:
using: "composite"
steps:
- run: |
SUB='-rc'
if [[ -z "${{ inputs.ORIGINAL_TAG }}" ]]; then
echo "The value of ORIGINAL_TAG is ${{ inputs.ORIGINAL_TAG }}"
echo "Setting the value of ORIGINAL_TAG to ${{ github.ref_name }}"
echo ORIGINAL_TAG="${{ github.ref_name }}" >> $GITHUB_ENV
fi
shell: bash
- run: |
if [[ "${{ inputs.ORIGINAL_TAG }}" == *"${{ inputs.SUB_STRING }}"* ]]; then
echo "Release candidate tag found."
else

View File

@@ -1,9 +1,8 @@
name: 00-pr_scanner
on:
pull_request:
types: [opened, reopened, synchronize, ready_for_review]
branches:
branches:
- 'master'
- 'main'
- 'dev'
@@ -16,15 +15,14 @@ on:
- 'docs/*'
- 'build/*'
- '.github/*'
concurrency:
group: ${{ github.head_ref }}
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
pr-scanner:
permissions:
permissions:
pull-requests: write
uses: ./.github/workflows/a-pr-scanner.yaml
with:

View File

@@ -1,57 +0,0 @@
name: 01-code_review_approved
on:
pull_request_review:
types: [submitted]
branches:
- 'master'
- 'main'
paths-ignore:
- '**.yaml'
- '**.md'
- '**.sh'
- 'website/*'
- 'examples/*'
- 'docs/*'
- 'build/*'
- '.github/*'
concurrency:
group: code-review-approved
cancel-in-progress: true
jobs:
binary-build:
if: ${{ github.event.review.state == 'approved' &&
contains( github.event.pull_request.labels.*.name, 'trigger-integration-test') &&
github.event.pull_request.base.ref == 'master' }} ## run only if labeled as "trigger-integration-test" and base branch is master
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
with:
COMPONENT_NAME: kubescape
CGO_ENABLED: 1
GO111MODULE: ""
GO_VERSION: "1.19"
RELEASE: ""
CLIENT: test
secrets: inherit
merge-to-master:
needs: binary-build
env:
GH_PERSONAL_ACCESS_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
if: ${{ (github.event.review.state == 'approved' && github.event.pull_request.base.ref == 'master') &&
(always() && (contains(needs.*.result, 'success') || contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }}
runs-on: ubuntu-latest
steps:
- name: merge-to-master
if: ${{ env.GH_PERSONAL_ACCESS_TOKEN }}
uses: pascalgn/automerge-action@v0.15.5
env:
GITHUB_TOKEN: "${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}"
MERGE_COMMIT_MESSAGE: "Merge to master - PR number: {pullRequest.number}"
MERGE_ERROR_FAIL: "true"
MERGE_METHOD: "merge"
MERGE_LABELS: ""
UPDATE_LABELS: ""

34
.github/workflows/01-pr-merged.yaml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: 01-pr-merged
on:
pull_request_target:
types: [closed]
branches:
- 'master'
- 'main'
paths-ignore:
- '**.yaml'
- '**.md'
- '**.sh'
- 'website/*'
- 'examples/*'
- 'docs/*'
- 'build/*'
- '.github/*'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
binary-build:
if: ${{ github.event.pull_request.merged == true && contains( github.event.pull_request.labels.*.name, 'trigger-integration-test') && github.event.pull_request.base.ref == 'master' }} ## run only if labeled as "trigger-integration-test" and base branch is master
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
with:
COMPONENT_NAME: kubescape
CGO_ENABLED: 1
GO111MODULE: ""
GO_VERSION: "1.20"
RELEASE: ""
CLIENT: test
secrets: inherit

View File

@@ -1,23 +1,19 @@
name: 02-create_release
on:
push:
tags:
- 'v*.*.*-rc.*'
- 'v*.*.*-rc.*'
jobs:
retag:
outputs:
NEW_TAG: ${{ steps.tag-calculator.outputs.NEW_TAG }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
- id: tag-calculator
uses: ./.github/actions/tag-action
with:
SUB_STRING: "-rc"
binary-build:
needs: [retag]
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
@@ -25,44 +21,27 @@ jobs:
COMPONENT_NAME: kubescape
CGO_ENABLED: 1
GO111MODULE: ""
GO_VERSION: "1.19"
GO_VERSION: "1.20"
RELEASE: ${{ needs.retag.outputs.NEW_TAG }}
CLIENT: release
secrets: inherit
create-release:
permissions:
contents: write
contents: write
needs: [retag, binary-build]
uses: ./.github/workflows/c-create-release.yaml
with:
RELEASE_NAME: "Release ${{ needs.retag.outputs.NEW_TAG }}"
TAG: ${{ needs.retag.outputs.NEW_TAG }}
DRAFT: false
secrets: inherit
publish-krew-plugin:
name: Publish Krew plugin
runs-on: ubuntu-latest
if: "${{ github.repository_owner }} == kubescape"
needs: create-release
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Update new version in krew-index
env:
# overriding the GITHUB_REF so the action can extract the right tag -> https://github.com/rajatjindal/krew-release-bot/blob/v0.0.43/pkg/cicd/github/actions.go#L25
GITHUB_REF: refs/tags/${{ needs.retag.outputs.NEW_TAG }}
uses: rajatjindal/krew-release-bot@v0.0.43
secrets: inherit
publish-image:
permissions:
id-token: write
packages: write
contents: read
contents: read
uses: ./.github/workflows/d-publish-image.yaml
needs: [ create-release, retag ]
needs: [create-release, retag]
with:
client: "image-release"
image_name: "quay.io/${{ github.repository_owner }}/kubescape"

View File

@@ -1,19 +1,41 @@
name: 03-create_release_digests
name: 03-post_release
on:
release:
types: [ published ]
types: [published]
branches:
- 'master'
- 'main'
- 'master'
- 'main'
jobs:
create_release_digests:
name: Creating digests
post_release:
name: Post release jobs
runs-on: ubuntu-latest
steps:
- name: Digest
uses: MCJack123/ghaction-generate-release-hashes@v1
uses: MCJack123/ghaction-generate-release-hashes@c03f3111b39432dde3edebe401c5a8d1ffbbf917 # ratchet:MCJack123/ghaction-generate-release-hashes@v1
with:
hash-type: sha1
file-name: kubescape-release-digests
- name: Invoke workflow to update packaging
uses: benc-uk/workflow-dispatch@v1
if: github.repository_owner == 'kubescape'
with:
workflow: release.yml
repo: kubescape/packaging
ref: refs/heads/main
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
- name: Invoke workflow to update homebrew tap
uses: benc-uk/workflow-dispatch@v1
if: github.repository_owner == 'kubescape'
with:
workflow: release.yml
repo: kubescape/homebrew-tap
ref: refs/heads/main
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
- name: Invoke workflow to update github action
uses: benc-uk/workflow-dispatch@v1
if: github.repository_owner == 'kubescape'
with:
workflow: release.yaml
repo: kubescape/github-action
ref: refs/heads/main
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}

View File

@@ -0,0 +1,16 @@
name: 04-publish_krew_plugin
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
jobs:
publish_krew_plugin:
name: Publish Krew plugin
runs-on: ubuntu-latest
if: github.repository_owner == 'kubescape'
steps:
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
with:
submodules: recursive
- name: Update new version in krew-index
uses: rajatjindal/krew-release-bot@92da038bbf995803124a8e50ebd438b2f37bbbb0 # ratchet:rajatjindal/krew-release-bot@v0.0.43

View File

@@ -1,5 +1,4 @@
name: a-pr-scanner
on:
workflow_call:
inputs:
@@ -11,27 +10,27 @@ on:
description: 'Client name'
required: true
type: string
UNIT_TESTS_PATH:
required: false
type: string
default: "./..."
jobs:
scanners:
env:
GITGUARDIAN_API_KEY: ${{ secrets.GITGUARDIAN_API_KEY }}
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
GITGUARDIAN_API_KEY: ${{ secrets.GITGUARDIAN_API_KEY }}
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
name: PR Scanner
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
with:
fetch-depth: 0
submodules: recursive
- uses: actions/setup-go@v3 # Install go because go-licenses use it
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # Install go because go-licenses use it ratchet:actions/setup-go@v3
name: Installing go
with:
go-version: '1.19'
go-version: '1.20'
cache: true
- name: Scanning - Forbidden Licenses (go-licenses)
id: licenses-scan
continue-on-error: true
@@ -40,142 +39,63 @@ jobs:
go install github.com/google/go-licenses@latest
echo "## Scanning for forbiden licenses ##"
go-licenses check .
- name: Scanning - Credentials (GitGuardian)
if: ${{ env.GITGUARDIAN_API_KEY }}
continue-on-error: true
continue-on-error: true
id: credentials-scan
uses: GitGuardian/ggshield-action@master
uses: GitGuardian/ggshield-action@4ab2994172fadab959240525e6b833d9ae3aca61 # ratchet:GitGuardian/ggshield-action@master
with:
args: -v --all-policies
args: -v --all-policies
env:
GITHUB_PUSH_BEFORE_SHA: ${{ github.event.before }}
GITHUB_PUSH_BASE_SHA: ${{ github.event.base }}
GITHUB_PULL_BASE_SHA: ${{ github.event.pull_request.base.sha }}
GITHUB_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
GITGUARDIAN_API_KEY: ${{ secrets.GITGUARDIAN_API_KEY }}
- name: Scanning - Vulnerabilities (Snyk)
if: ${{ env.SNYK_TOKEN }}
id: vulnerabilities-scan
continue-on-error: true
uses: snyk/actions/golang@master
uses: snyk/actions/golang@806182742461562b67788a64410098c9d9b96adb # ratchet:snyk/actions/golang@master
with:
command: test --all-projects
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
- name: Test coverage
id: unit-test
run: go test -v ${{ inputs.UNIT_TESTS_PATH }} -covermode=count -coverprofile=coverage.out
- name: Convert coverage count to lcov format
uses: jandelgado/gcov2lcov-action@v1
- name: Submit coverage tests to Coveralls
continue-on-error: true
uses: coverallsapp/github-action@v1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
path-to-lcov: coverage.lcov
- name: Comment results to PR
continue-on-error: true # Warning: This might break opening PRs from forks
uses: peter-evans/create-or-update-comment@v2.1.0
uses: peter-evans/create-or-update-comment@5adcb0bb0f9fb3f95ef05400558bdb3f329ee808 # ratchet:peter-evans/create-or-update-comment@v2.1.0
with:
issue-number: ${{ github.event.pull_request.number }}
issue-number: ${{ github.event.pull_request.number }}
body: |
Scan results:
- License scan: ${{ steps.licenses-scan.outcome }}
- Credentials scan: ${{ steps.credentials-scan.outcome }}
- Vulnerabilities scan: ${{ steps.vulnerabilities-scan.outcome }}
reactions: 'eyes'
basic-tests:
needs: scanners
name: Create cross-platform build
runs-on: ${{ matrix.os }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
with:
COMPONENT_NAME: kubescape
CGO_ENABLED: 1
GO111MODULE: ""
GO_VERSION: "1.20"
RELEASE: ${{ inputs.RELEASE }}
CLIENT: ${{ inputs.CLIENT }}
strategy:
matrix:
os: [ubuntu-20.04, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Cache Go modules (Linux)
if: matrix.os == 'ubuntu-latest'
uses: actions/cache@v3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Cache Go modules (macOS)
if: matrix.os == 'macos-latest'
uses: actions/cache@v3
with:
path: |
~/Library/Caches/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Cache Go modules (Windows)
if: matrix.os == 'windows-latest'
uses: actions/cache@v3
with:
path: |
~\AppData\Local\go-build
~\go\pkg\mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.19
- name: Install MSYS2 & libgit2 (Windows)
shell: cmd
run: .\build.bat all
if: matrix.os == 'windows-latest'
- name: Install pkg-config (macOS)
run: brew install pkg-config
if: matrix.os == 'macos-latest'
- name: Install libgit2 (Linux/macOS)
run: make libgit2
if: matrix.os != 'windows-latest'
- name: Test core pkg
run: go test "-tags=static,gitenabled" -v ./...
- name: Test httphandler pkg
run: cd httphandler && go test "-tags=static,gitenabled" -v ./...
- name: Build
env:
RELEASE: ${{ inputs.RELEASE }}
CLIENT: ${{ inputs.CLIENT }}
CGO_ENABLED: 1
run: python3 --version && python3 build.py
- name: Smoke Testing (Windows / MacOS)
env:
RELEASE: ${{ inputs.RELEASE }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-${{ matrix.os }}
if: matrix.os != 'ubuntu-20.04'
- name: Smoke Testing (Linux)
env:
RELEASE: ${{ inputs.RELEASE }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-ubuntu-latest
if: matrix.os == 'ubuntu-20.04'
- name: golangci-lint
if: matrix.os == 'ubuntu-20.04'
continue-on-error: true
uses: golangci/golangci-lint-action@v3
with:
version: latest
args: --timeout 10m --build-tags=static
only-new-issues: true
CHECKOUT_REPO: ${{ github.repository }}
secrets: inherit

View File

@@ -13,7 +13,7 @@ on:
type: string
GO_VERSION:
type: string
default: "1.19"
default: "1.20"
GO111MODULE:
required: true
type: string
@@ -22,73 +22,65 @@ on:
default: 1
BINARY_TESTS:
type: string
default: '[
"scan_nsa",
"scan_mitre",
"scan_with_exceptions",
"scan_repository",
"scan_local_file",
"scan_local_glob_files",
"scan_local_list_of_files",
"scan_nsa_and_submit_to_backend",
"scan_mitre_and_submit_to_backend",
"scan_local_repository_and_submit_to_backend",
"scan_repository_from_url_and_submit_to_backend",
"scan_with_exception_to_backend",
"scan_with_custom_framework",
"scan_customer_configuration",
"host_scanner"
]'
default: '[ "scan_nsa", "scan_mitre", "scan_with_exceptions", "scan_repository", "scan_local_file", "scan_local_glob_files", "scan_local_list_of_files", "scan_nsa_and_submit_to_backend", "scan_mitre_and_submit_to_backend", "scan_local_repository_and_submit_to_backend", "scan_repository_from_url_and_submit_to_backend", "scan_with_exception_to_backend", "scan_with_custom_framework", "scan_customer_configuration", "host_scanner", "scan_compliance_score" ]'
CHECKOUT_REPO:
required: false
type: string
jobs:
check-secret:
wf-preparation:
name: secret-validator
runs-on: ubuntu-latest
outputs:
TEST_NAMES: ${{ steps.export_tests_to_env.outputs.TEST_NAMES }}
is-secret-set: ${{ steps.check-secret-set.outputs.is-secret-set }}
steps:
- name: check if the necessary secrets are set in github secrets
id: check-secret-set
env:
CUSTOMER: ${{ secrets.CUSTOMER }}
USERNAME: ${{ secrets.USERNAME }}
PASSWORD: ${{ secrets.PASSWORD }}
CLIENT_ID: ${{ secrets.CLIENT_ID_PROD }}
SECRET_KEY: ${{ secrets.SECRET_KEY_PROD }}
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
run: |
echo "is-secret-set=${{ env.CUSTOMER != '' &&
env.USERNAME != '' &&
env.PASSWORD != '' &&
env.CLIENT_ID != '' &&
env.SECRET_KEY != '' &&
env.REGISTRY_USERNAME != '' &&
env.REGISTRY_PASSWORD != ''
}}" >> $GITHUB_OUTPUT
CUSTOMER: ${{ secrets.CUSTOMER }}
USERNAME: ${{ secrets.USERNAME }}
PASSWORD: ${{ secrets.PASSWORD }}
CLIENT_ID: ${{ secrets.CLIENT_ID_PROD }}
SECRET_KEY: ${{ secrets.SECRET_KEY_PROD }}
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
run: "echo \"is-secret-set=${{ env.CUSTOMER != '' && \n env.USERNAME != '' &&\n env.PASSWORD != '' &&\n env.CLIENT_ID != '' &&\n env.SECRET_KEY != '' &&\n env.REGISTRY_USERNAME != '' &&\n env.REGISTRY_PASSWORD != ''\n }}\" >> $GITHUB_OUTPUT\n"
- id: export_tests_to_env
name: set test name
run: |
echo "TEST_NAMES=$input" >> $GITHUB_OUTPUT
env:
input: ${{ inputs.BINARY_TESTS }}
binary-build:
name: Create cross-platform build
outputs:
TEST_NAMES: ${{ steps.export_tests_to_env.outputs.TEST_NAMES }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GOARCH: ${{ matrix.arch }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-20.04, macos-latest, windows-latest]
arch: ["", arm64]
exclude:
- os: windows-latest
arch: arm64
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
with:
repository: ${{inputs.CHECKOUT_REPO}}
fetch-depth: 0
submodules: recursive
- name: Cache Go modules (Linux)
if: matrix.os == 'ubuntu-20.04'
uses: actions/cache@v3
if: matrix.os == 'ubuntu-20.04'
uses: actions/cache@69d9d449aced6a2ede0bc19182fadc3a0a42d2b0 # ratchet:actions/cache@v3
with:
path: |
~/.cache/go-build
@@ -98,8 +90,8 @@ jobs:
${{ runner.os }}-go-
- name: Cache Go modules (macOS)
if: matrix.os == 'macos-latest'
uses: actions/cache@v3
if: matrix.os == 'macos-latest'
uses: actions/cache@69d9d449aced6a2ede0bc19182fadc3a0a42d2b0 # ratchet:actions/cache@v3
with:
path: |
~/Library/Caches/go-build
@@ -110,7 +102,7 @@ jobs:
- name: Cache Go modules (Windows)
if: matrix.os == 'windows-latest'
uses: actions/cache@v3
uses: actions/cache@69d9d449aced6a2ede0bc19182fadc3a0a42d2b0 # ratchet:actions/cache@v3
with:
path: |
~\AppData\Local\go-build
@@ -119,12 +111,29 @@ jobs:
restore-keys: |
${{ runner.os }}-go-
- uses: actions/setup-go@v3
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # ratchet:actions/setup-go@v3
name: Installing go
with:
go-version: ${{ inputs.GO_VERSION }}
cache: true
- name: start ${{ matrix.arch }} environment in container
run: |
sudo apt-get update
sudo apt-get install -y binfmt-support qemu-user-static
sudo docker run --platform linux/${{ matrix.arch }} -e RELEASE=${{ inputs.RELEASE }} \
-e CLIENT=${{ inputs.CLIENT }} -e CGO_ENABLED=${{ inputs.CGO_ENABLED }} \
-e KUBESCAPE_SKIP_UPDATE_CHECK=true -e GOARCH=${{ matrix.arch }} -v ${PWD}:/work \
-w /work -v ~/go/pkg/mod:/root/go/pkg/mod -v ~/.cache/go-build:/root/.cache/go-build \
-d --name build golang:${{ inputs.GO_VERSION }}-bullseye sleep 21600
sudo docker ps
DOCKER_CMD="sudo docker exec build"
${DOCKER_CMD} apt update
${DOCKER_CMD} apt install -y cmake python3
${DOCKER_CMD} git config --global --add safe.directory '*'
echo "DOCKER_CMD=${DOCKER_CMD}" >> $GITHUB_ENV;
if: matrix.os == 'ubuntu-20.04' && matrix.arch != ''
- name: Install MSYS2 & libgit2 (Windows)
shell: cmd
run: .\build.bat all
@@ -135,79 +144,80 @@ jobs:
if: matrix.os == 'macos-latest'
- name: Install libgit2 (Linux/macOS)
run: make libgit2
run: ${{ env.DOCKER_CMD }} make libgit2${{ matrix.arch }}
if: matrix.os != 'windows-latest'
- name: Test core pkg
run: go test "-tags=static,gitenabled" -v ./...
run: ${{ env.DOCKER_CMD }} go test "-tags=static,gitenabled" -v ./...
if: "!startsWith(github.ref, 'refs/tags') && matrix.os == 'ubuntu-20.04' && matrix.arch == '' || startsWith(github.ref, 'refs/tags') && (matrix.os != 'macos-latest' || matrix.arch != 'arm64')"
- name: Test httphandler pkg
run: cd httphandler && go test "-tags=static,gitenabled" -v ./...
run: ${{ env.DOCKER_CMD }} sh -c 'cd httphandler && go test "-tags=static,gitenabled" -v ./...'
if: "!startsWith(github.ref, 'refs/tags') && matrix.os == 'ubuntu-20.04' && matrix.arch == '' || startsWith(github.ref, 'refs/tags') && (matrix.os != 'macos-latest' || matrix.arch != 'arm64')"
- name: Build
env:
RELEASE: ${{ inputs.RELEASE }}
CLIENT: ${{ inputs.CLIENT }}
CGO_ENABLED: ${{ inputs.CGO_ENABLED }}
run: python3 --version && python3 build.py
run: ${{ env.DOCKER_CMD }} python3 --version && ${{ env.DOCKER_CMD }} python3 build.py
- name: Smoke Testing (Windows / MacOS)
env:
RELEASE: ${{ inputs.RELEASE }}
RELEASE: ${{ inputs.RELEASE }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-${{ matrix.os }}
if: matrix.os != 'ubuntu-20.04'
if: startsWith(github.ref, 'refs/tags') && matrix.os != 'ubuntu-20.04' && matrix.arch == ''
- name: Smoke Testing (Linux)
- name: Smoke Testing (Linux amd64)
env:
RELEASE: ${{ inputs.RELEASE }}
RELEASE: ${{ inputs.RELEASE }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-ubuntu-latest
if: matrix.os == 'ubuntu-20.04'
run: ${{ env.DOCKER_CMD }} python3 smoke_testing/init.py ${PWD}/build/kubescape-ubuntu-latest
if: matrix.os == 'ubuntu-20.04' && matrix.arch == ''
- name: Smoke Testing (Linux ${{ matrix.arch }})
env:
RELEASE: ${{ inputs.RELEASE }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: ${{ env.DOCKER_CMD }} python3 smoke_testing/init.py ./build/kubescape-${{ matrix.arch }}-ubuntu-latest
if: startsWith(github.ref, 'refs/tags') && matrix.os == 'ubuntu-20.04' && matrix.arch != ''
- name: golangci-lint
if: matrix.os == 'ubuntu-20.04'
if: matrix.os == 'ubuntu-20.04'
continue-on-error: true
uses: golangci/golangci-lint-action@v3
uses: golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5 # ratchet:golangci/golangci-lint-action@v3
with:
version: latest
args: --timeout 10m --build-tags=static
only-new-issues: true
- id: export_tests_to_env
name: set test name
run: |
echo "TEST_NAMES=$input" >> $GITHUB_OUTPUT
env:
input: ${{ inputs.BINARY_TESTS }}
- uses: actions/upload-artifact@v3.1.1
- uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # ratchet:actions/upload-artifact@v3.1.1
name: Upload artifact (Linux)
if: matrix.os == 'ubuntu-20.04'
with:
name: kubescape-ubuntu-latest
name: kubescape${{ matrix.arch }}-ubuntu-latest
path: build/
if-no-files-found: error
- uses: actions/upload-artifact@v3.1.1
- uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # ratchet:actions/upload-artifact@v3.1.1
name: Upload artifact (MacOS, Win)
if: matrix.os != 'ubuntu-20.04'
with:
name: kubescape-${{ matrix.os }}
name: kubescape${{ matrix.arch }}-${{ matrix.os }}
path: build/
if-no-files-found: error
run-tests:
strategy:
fail-fast: false
fail-fast: false
matrix:
TEST: ${{ fromJson(needs.binary-build.outputs.TEST_NAMES) }}
needs: [check-secret, binary-build]
if: needs.check-secret.outputs.is-secret-set == 'true'
TEST: ${{ fromJson(needs.wf-preparation.outputs.TEST_NAMES) }}
needs: [wf-preparation, binary-build]
if: ${{ (needs.wf-preparation.outputs.is-secret-set == 'true') && (always() && (contains(needs.*.result, 'success') || contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }}
runs-on: ubuntu-latest # This cannot change
steps:
- uses: actions/download-artifact@v3.0.2
- uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # ratchet:actions/download-artifact@v3.0.2
id: download-artifact
with:
name: kubescape-ubuntu-latest
@@ -219,12 +229,12 @@ jobs:
run: chmod +x -R ${{steps.download-artifact.outputs.download-path}}/kubescape-ubuntu-latest
- name: Checkout systests repo
uses: actions/checkout@v3
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
with:
repository: armosec/system-tests
path: .
- uses: actions/setup-python@v4
- uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # ratchet:actions/setup-python@v4
with:
python-version: '3.8.13'
cache: 'pip'
@@ -234,16 +244,16 @@ jobs:
- name: Generate uuid
id: uuid
run: |
run: |
echo "RANDOM_UUID=$(uuidgen)" >> $GITHUB_OUTPUT
- name: Create k8s Kind Cluster
id: kind-cluster-install
uses: helm/kind-action@v1.3.0
uses: helm/kind-action@d08cf6ff1575077dee99962540d77ce91c62387d # ratchet:helm/kind-action@v1.3.0
with:
cluster_name: ${{ steps.uuid.outputs.RANDOM_UUID }}
- name: run-tests
- name: run-tests-on-local-built-kubescape
env:
CUSTOMER: ${{ secrets.CUSTOMER }}
USERNAME: ${{ secrets.USERNAME }}
@@ -252,7 +262,6 @@ jobs:
SECRET_KEY: ${{ secrets.SECRET_KEY_PROD }}
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
run: |
echo "Test history:"
echo " ${{ matrix.TEST }} " >/tmp/testhistory
@@ -266,14 +275,12 @@ jobs:
--duration 3 \
--logger DEBUG \
--kwargs kubescape=${{steps.download-artifact.outputs.download-path}}/kubescape-ubuntu-latest
deactivate
- name: Test Report
uses: mikepenz/action-junit-report@v3.6.1
uses: mikepenz/action-junit-report@6e9933f4a97f4d2b99acef4d7b97924466037882 # ratchet:mikepenz/action-junit-report@v3.6.1
if: always() # always run even if the previous step fails
with:
report_paths: '**/results_xml_format/**.xml'
commit: ${{github.event.workflow_run.head_sha}}

View File

@@ -15,43 +15,58 @@ on:
required: false
type: boolean
default: false
jobs:
create-release:
name: create-release
runs-on: ubuntu-latest
env:
MAC_OS: macos-latest
UBUNTU_OS: ubuntu-latest
WINDOWS_OS: windows-latest
# permissions:
# contents: write
# contents: write
steps:
- uses: actions/download-artifact@v3.0.2
- uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # ratchet:actions/download-artifact@v3.0.2
id: download-artifact
with:
path: .
# TODO: kubescape-windows-latest is deprecated and should be removed
- name: Get kubescape.exe from kubescape-windows-latest
run: cp ./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }} ./kubescape-${{ env.WINDOWS_OS }}/kubescape.exe
- name: Set release token
run: |
if [ "${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}" != "" ]; then
echo "TOKEN=${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}" >> $GITHUB_ENV;
else
echo "TOKEN=${{ secrets.GITHUB_TOKEN }}" >> $GITHUB_ENV;
fi
- name: Release
uses: softprops/action-gh-release@v1
env:
MAC_OS: macos-latest
UBUNTU_OS: ubuntu-latest
WINDOWS_OS: windows-latest
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # ratchet:softprops/action-gh-release@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
token: ${{ env.TOKEN }}
name: ${{ inputs.RELEASE_NAME }}
tag_name: ${{ inputs.TAG }}
body: ${{ github.event.pull_request.body }}
draft: ${{ inputs.DRAFT }}
fail_on_unmatched_files: true
prerelease: false
# TODO: kubescape-windows-latest is deprecated and should be removed
files: |
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}
./kubescape-${{ env.MAC_OS }}/kubescape-${{ env.MAC_OS }}
./kubescape-${{ env.MAC_OS }}/kubescape-${{ env.MAC_OS }}.sha256
./kubescape-${{ env.MAC_OS }}/kubescape-${{ env.MAC_OS }}.tar.gz
./kubescape-${{ env.UBUNTU_OS }}/kubescape-${{ env.UBUNTU_OS }}
./kubescape-${{ env.UBUNTU_OS }}/kubescape-${{ env.UBUNTU_OS }}.sha256
./kubescape-${{ env.UBUNTU_OS }}/kubescape-${{ env.UBUNTU_OS }}.tar.gz
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}
./kubescape-${{ env.WINDOWS_OS }}/kubescape.exe
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}.sha256
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}.tar.gz
./kubescapearm64-${{ env.MAC_OS }}/kubescape-arm64-${{ env.MAC_OS }}
./kubescapearm64-${{ env.MAC_OS }}/kubescape-arm64-${{ env.MAC_OS }}.sha256
./kubescapearm64-${{ env.MAC_OS }}/kubescape-arm64-${{ env.MAC_OS }}.tar.gz
./kubescapearm64-${{ env.UBUNTU_OS }}/kubescape-arm64-${{ env.UBUNTU_OS }}
./kubescapearm64-${{ env.UBUNTU_OS }}/kubescape-arm64-${{ env.UBUNTU_OS }}.sha256
./kubescapearm64-${{ env.UBUNTU_OS }}/kubescape-arm64-${{ env.UBUNTU_OS }}.tar.gz

View File

@@ -1,5 +1,4 @@
name: d-publish-image
on:
workflow_call:
inputs:
@@ -25,7 +24,6 @@ on:
default: true
type: boolean
description: 'support amd64/arm64'
jobs:
check-secret:
name: check if QUAYIO_REGISTRY_USERNAME & QUAYIO_REGISTRY_PASSWORD is set in github secrets
@@ -36,44 +34,36 @@ jobs:
- name: check if QUAYIO_REGISTRY_USERNAME & QUAYIO_REGISTRY_PASSWORD is set in github secrets
id: check-secret-set
env:
QUAYIO_REGISTRY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
QUAYIO_REGISTRY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
QUAYIO_REGISTRY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
QUAYIO_REGISTRY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
run: |
echo "is-secret-set=${{ env.QUAYIO_REGISTRY_USERNAME != '' && env.QUAYIO_REGISTRY_PASSWORD != '' }}" >> $GITHUB_OUTPUT
echo "is-secret-set=${{ env.QUAYIO_REGISTRY_USERNAME != '' && env.QUAYIO_REGISTRY_PASSWORD != '' }}" >> $GITHUB_OUTPUT
build-image:
needs: [check-secret]
if: needs.check-secret.outputs.is-secret-set == 'true'
name: Build image and upload to registry
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
with:
submodules: recursive
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18 # ratchet:docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@f03ac48505955848960e80bbb68046aa35c7b9e7 # ratchet:docker/setup-buildx-action@v2
- name: Login to Quay.io
env:
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
- name: Build and push image
if: ${{ inputs.support_platforms }}
run: docker buildx build . --file build/Dockerfile --tag ${{ inputs.image_name }}:${{ inputs.image_tag }} --tag ${{ inputs.image_name }}:latest --build-arg image_version=${{ inputs.image_tag }} --build-arg client=${{ inputs.client }} --push --platform linux/amd64,linux/arm64
- name: Build and push image without amd64/arm64 support
if: ${{ !inputs.support_platforms }}
run: docker buildx build . --file build/Dockerfile --tag ${{ inputs.image_name }}:${{ inputs.image_tag }} --tag ${{ inputs.image_name }}:latest --build-arg image_version=${{ inputs.image_tag }} --build-arg client=${{ inputs.client }} --push
run: docker buildx build . --file build/Dockerfile --tag ${{ inputs.image_name }}:${{ inputs.image_tag }} --tag ${{ inputs.image_name }}:latest --build-arg image_version=${{ inputs.image_tag }} --build-arg client=${{ inputs.client }} --push
- name: Install cosign
uses: sigstore/cosign-installer@main
uses: sigstore/cosign-installer@4079ad3567a89f68395480299c77e40170430341 # ratchet:sigstore/cosign-installer@main
with:
cosign-release: 'v1.12.0'
- name: sign kubescape container image
@@ -81,5 +71,4 @@ jobs:
env:
COSIGN_EXPERIMENTAL: "true"
run: |
cosign sign --force ${{ inputs.image_name }}
cosign sign --force ${{ inputs.image_name }}

View File

@@ -1,23 +1,19 @@
on:
issues:
types: [opened, labeled]
jobs:
open_PR_message:
if: github.event.label.name == 'typo'
runs-on: ubuntu-latest
steps:
- uses: ben-z/actions-comment-on-issue@1.0.2
- uses: ben-z/actions-comment-on-issue@10be23f9c43ac792663043420fda29dde07e2f0f # ratchet:ben-z/actions-comment-on-issue@1.0.2
with:
message: "Hello! :wave:\n\nThis issue is being automatically closed, Please open a PR with a relevant fix."
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
auto_close_issues:
runs-on: ubuntu-latest
steps:
- uses: lee-dohm/close-matching-issues@v2
- uses: lee-dohm/close-matching-issues@e9e43aad2fa6f06a058cedfd8fb975fd93b56d8f # ratchet:lee-dohm/close-matching-issues@v2
with:
query: 'label:typo'
token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -16,15 +16,27 @@ spec:
arch: amd64
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-macos-latest.tar.gz" .TagName }}
bin: kubescape
- selector:
matchLabels:
os: darwin
arch: arm64
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-arm64-macos-latest.tar.gz" .TagName }}
bin: kubescape
- selector:
matchLabels:
os: linux
arch: amd64
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-ubuntu-latest.tar.gz" .TagName }}
bin: kubescape
- selector:
matchLabels:
os: linux
arch: arm64
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-arm64-ubuntu-latest.tar.gz" .TagName }}
bin: kubescape
- selector:
matchLabels:
os: windows
arch: amd64
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-windows-latest.tar.gz" .TagName }}
bin: kubescape
bin: kubescape.exe

View File

@@ -15,6 +15,10 @@ so the maintainers are able to help guide you and let you know if you are going
Please follow our [code of conduct](CODE_OF_CONDUCT.md) in all of your interactions within the project.
## Build and test locally
Please follow the [instructions here](https://github.com/kubescape/kubescape/wiki/Building).
## Pull Request Process
1. Ensure any install or build dependencies are removed before the end of the layer when doing a
@@ -47,7 +51,7 @@ Add [`-s`](https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s)
```git commit -s -m "Fix issue 64738"```
This is tedious, and if you forget, you'll have to [amend your commit](#f)
This is tedious, and if you forget, you'll have to [amend your commit](#fixing-a-commit-where-the-dco-failed).
### Configure a repository to always include sign off

View File

@@ -10,6 +10,14 @@ libgit2:
-git submodule update --init --recursive
cd git2go; make install-static
# build and install libgit2 for macOS m1
libgit2arm64:
git submodule update --init --recursive
if [ "$(shell uname -s)" = "Darwin" ]; then \
sed -i '' 's/cmake -D/cmake -DCMAKE_OSX_ARCHITECTURES="arm64" -D/' git2go/script/build-libgit2.sh; \
fi
cd git2go; make install-static
# go build tags
TAGS = "gitenabled,static"

View File

@@ -37,11 +37,11 @@ curl -s https://raw.githubusercontent.com/kubescape/kubescape/master/install.sh
Learn more about:
* [Installing Kubescape](docs/getting-started.md#install-kubescape)
* [Installing Kubescape](docs/installation.md)
* [Running your first scan](docs/getting-started.md#run-your-first-scan)
* [Usage](docs/getting-started.md#examples)
* [Architecture](docs/architecture.md)
* [Building Kubescape from source](docs/building.md)
* [Building Kubescape from source](https://github.com/kubescape/kubescape/wiki/Building)
_Did you know you can use Kubescape in all these places?_
@@ -65,7 +65,7 @@ It retrieves Kubernetes objects from the API server and runs a set of [Rego snip
Kubescape is an open source project, we welcome your feedback and ideas for improvement. We are part of the Kubernetes community and are building more tests and controls as the ecosystem develops.
We hold [community meetings](https://us02web.zoom.us/j/84020231442) on Zoom, on the first Tuesday of every month, at 14:00 GMT.
We hold [community meetings](https://zoom.us/j/95174063585) on Zoom, on the first Tuesday of every month, at 14:00 GMT. ([See that in your local time zone](https://time.is/compare/1400_in_GMT)).
The Kubescape project follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

View File

@@ -6,6 +6,7 @@ import subprocess
import tarfile
BASE_GETTER_CONST = "github.com/kubescape/kubescape/v2/core/cautils/getter"
CURRENT_PLATFORM = platform.system()
platformSuffixes = {
"Windows": "windows-latest",
@@ -24,11 +25,15 @@ def get_build_dir():
def get_package_name():
current_platform = platform.system()
if CURRENT_PLATFORM not in platformSuffixes: raise OSError("Platform %s is not supported!" % (CURRENT_PLATFORM))
if current_platform not in platformSuffixes: raise OSError("Platform %s is not supported!" % (current_platform))
# # TODO: kubescape-windows-latest is deprecated and should be removed
# if CURRENT_PLATFORM == "Windows": return "kubescape.exe"
return "kubescape-" + platformSuffixes[current_platform]
package_name = "kubescape-"
if os.getenv("GOARCH"):
package_name += os.getenv("GOARCH") + "-"
return package_name + platformSuffixes[CURRENT_PLATFORM]
def main():
@@ -76,7 +81,10 @@ def main():
kube_sha.write(sha256.hexdigest())
with tarfile.open(tar_file, 'w:gz') as archive:
archive.add(ks_file, "kubescape")
name = "kubescape"
if CURRENT_PLATFORM == "Windows":
name += ".exe"
archive.add(ks_file, name)
archive.add("LICENSE", "LICENSE")
print("Build Done")

View File

@@ -1,4 +1,4 @@
FROM golang:1.19-alpine as builder
FROM golang:1.20-alpine as builder
ARG image_version
ARG client

View File

@@ -15,8 +15,8 @@ var completionCmdExamples = fmt.Sprintf(`
$ echo 'source <(%[1]s completion bash)' >> ~/.bashrc
# Enable ZSH shell autocompletion
$ source <(kubectl completion zsh)
$ echo 'source <(kubectl completion zsh)' >> "${fpath[1]}/_kubectl"
$ source <(%[1]s completion zsh)
$ echo 'source <(%[1]s completion zsh)' >> "${fpath[1]}/_%[1]s"
`, cautils.ExecName())
func GetCompletionCmd() *cobra.Command {

View File

@@ -17,7 +17,7 @@ var fixCmdExamples = fmt.Sprintf(`
Use with caution, this command will change your files in-place.
# Fix kubernetes YAML manifest files based on a scan command output (output.json)
1) %[1]s scan --format json --format-version v2 --output output.json
1) %[1]s scan . --format json --output output.json
2) %[1]s fix output.json
`, cautils.ExecName())

View File

@@ -63,8 +63,6 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
},
}
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-print'/'json'")
listCmd.PersistentFlags().MarkDeprecated("id", "Control ID's are included in list outputs")

View File

@@ -85,6 +85,11 @@ func initEnvironment() {
if len(urlSlices) >= 4 {
ksAuthURL = urlSlices[3]
}
getter.SetKSCloudAPIConnector(getter.NewKSCloudAPICustomized(ksEventReceiverURL, ksBackendURL, ksFrontendURL, ksAuthURL))
getter.SetKSCloudAPIConnector(getter.NewKSCloudAPICustomized(
ksBackendURL, ksAuthURL,
getter.WithReportURL(ksEventReceiverURL),
getter.WithFrontendURL(ksFrontendURL),
))
}
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/meta"
"github.com/enescakir/emoji"
"github.com/spf13/cobra"
)
@@ -106,11 +105,14 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
logger.L().Fatal(err.Error())
}
if !scanInfo.VerboseMode {
cautils.SimpleDisplay(os.Stderr, "%s Run with '--verbose'/'-v' flag for detailed resources view\n\n", emoji.Detective)
logger.L().Info("Run with '--verbose'/'-v' flag for detailed resources view\n")
}
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
}
if results.GetComplianceScore() < float32(scanInfo.ComplianceThreshold) {
logger.L().Fatal("scan compliance-score is below permitted threshold", helpers.String("compliance score", fmt.Sprintf("%.2f", results.GetComplianceScore())), helpers.String("compliance-threshold", fmt.Sprintf("%.2f", scanInfo.ComplianceThreshold)))
}
enforceSeverityThresholds(results.GetResults().SummaryDetails.GetResourcesSeverityCounters(), scanInfo, terminateOnExceedingSeverity)
return nil

View File

@@ -119,11 +119,14 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
logger.L().Fatal(err.Error())
}
if !scanInfo.VerboseMode {
cautils.SimpleDisplay(os.Stderr, "Run with '--verbose'/'-v' flag for detailed resources view\n\n")
logger.L().Info("Run with '--verbose'/'-v' flag for detailed resources view\n")
}
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
}
if results.GetComplianceScore() < float32(scanInfo.ComplianceThreshold) {
logger.L().Fatal("scan compliance-score is below permitted threshold", helpers.String("compliance-score", fmt.Sprintf("%.2f", results.GetComplianceScore())), helpers.String("compliance-threshold", fmt.Sprintf("%.2f", scanInfo.ComplianceThreshold)))
}
enforceSeverityThresholds(results.GetData().Report.SummaryDetails.GetResourcesSeverityCounters(), scanInfo, terminateOnExceedingSeverity)
return nil
@@ -204,6 +207,9 @@ func validateFrameworkScanInfo(scanInfo *cautils.ScanInfo) error {
if scanInfo.Submit && scanInfo.Local {
return fmt.Errorf("you can use `keep-local` or `submit`, but not both")
}
if 100 < scanInfo.ComplianceThreshold || 0 > scanInfo.ComplianceThreshold {
return fmt.Errorf("bad argument: out of range threshold")
}
if 100 < scanInfo.FailThreshold || 0 > scanInfo.FailThreshold {
return fmt.Errorf("bad argument: out of range threshold")
}

View File

@@ -16,7 +16,7 @@ var scanCmdExamples = fmt.Sprintf(`
Scan command is for scanning an existing cluster or kubernetes manifest files based on pre-defined frameworks
# Scan current cluster with all frameworks
%[1]s scan --enable-host-scan --verbose
%[1]s scan
# Scan kubernetes YAML manifest files
%[1]s scan .
@@ -43,7 +43,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
Args: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
if args[0] != "framework" && args[0] != "control" {
return getFrameworkCmd(ks, &scanInfo).RunE(cmd, append([]string{"all"}, args...))
return getFrameworkCmd(ks, &scanInfo).RunE(cmd, append([]string{strings.Join(getter.NativeFrameworks, ",")}, args...))
}
}
return nil
@@ -66,15 +66,14 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
scanCmd.PersistentFlags().BoolVar(&scanInfo.CreateAccount, "create-account", false, "Create a Kubescape SaaS account ID account ID is not found in cache. After creating the account, the account ID will be saved in cache. In addition, the scanning results will be uploaded to the Kubescape SaaS")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
scanCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "kube-context", "", "", "Kube context. Default will use the current-context")
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
scanCmd.PersistentFlags().StringVar(&scanInfo.UseArtifactsFrom, "use-artifacts-from", "", "Load artifacts from local directory. If not used will download them")
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Notice, when running with `exclude-namespace` kubescape does not scan cluster-scoped objects.")
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. e.g: --exclude-namespaces ns-a,ns-b. Notice, when running with `exclude-namespace` kubescape does not scan cluster-scoped objects.")
scanCmd.PersistentFlags().Float32VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
scanCmd.PersistentFlags().Float32VarP(&scanInfo.ComplianceThreshold, "compliance-threshold", "", 0, "Compliance threshold is the percent below which the command fails and returns exit code 1")
scanCmd.PersistentFlags().StringVar(&scanInfo.FailThresholdSeverity, "severity-threshold", "", "Severity threshold is the severity of failed controls at which the command fails and returns exit code 1")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "", `Output file format. Supported formats: "pretty-printer", "json", "junit", "prometheus", "pdf", "html", "sarif"`)
@@ -93,9 +92,14 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
scanCmd.PersistentFlags().BoolVarP(&scanInfo.PrintAttackTree, "print-attack-tree", "", false, "Print attack tree")
scanCmd.PersistentFlags().MarkDeprecated("silent", "use '--logger' flag instead. Flag will be removed at 1.May.2022")
scanCmd.PersistentFlags().MarkDeprecated("fail-threshold", "use '--compliance-threshold' flag instead. Flag will be removed at 1.Dec.2023")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
scanCmd.PersistentFlags().MarkDeprecated("client-id", "login to Kubescape SaaS will be unsupported, please contact the Kubescape maintainers for more information")
scanCmd.PersistentFlags().MarkDeprecated("secret-key", "login to Kubescape SaaS will be unsupported, please contact the Kubescape maintainers for more information")
// hidden flags
scanCmd.PersistentFlags().MarkHidden("host-scan-yaml") // this flag should be used very cautiously. We prefer users will not use it at all unless the DaemonSet can not run pods on the nodes
scanCmd.PersistentFlags().MarkHidden("omit-raw-resources")
scanCmd.PersistentFlags().MarkHidden("print-attack-tree")
@@ -105,9 +109,15 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy Kubescape host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://github.com/kubescape/kubescape/blob/master/core/pkg/hostsensorutils/hostsensor.yaml")
hostF.NoOptDefVal = "true"
hostF.DefValue = "false, for no TTY in stdin"
scanCmd.PersistentFlags().MarkHidden("enable-host-scan")
scanCmd.PersistentFlags().MarkDeprecated("enable-host-scan", "To activate the host scanner capability, proceed with the installation of the kubescape operator chart found here: https://github.com/kubescape/helm-charts/tree/main/charts/kubescape-cloud-operator. The flag will be removed at 1.Dec.2023")
scanCmd.PersistentFlags().MarkHidden("host-scan-yaml") // this flag should be used very cautiously. We prefer users will not use it at all unless the DaemonSet can not run pods on the nodes
scanCmd.PersistentFlags().MarkDeprecated("host-scan-yaml", "To activate the host scanner capability, proceed with the installation of the kubescape operator chart found here: https://github.com/kubescape/helm-charts/tree/main/charts/kubescape-cloud-operator. The flag will be removed at 1.Dec.2023")
scanCmd.AddCommand(getControlCmd(ks, &scanInfo))
scanCmd.AddCommand(getFrameworkCmd(ks, &scanInfo))
scanCmd.AddCommand(getWorkloadCmd(ks, &scanInfo))
return scanCmd
}

75
cmd/scan/workload.go Normal file
View File

@@ -0,0 +1,75 @@
package scan
import (
"context"
"fmt"
"strings"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/meta"
"github.com/spf13/cobra"
)
var (
workloadExample = fmt.Sprintf(`
# Scan an workload
%[1]s scan workload <kind>/<name>
# Scan an workload in a specific namespace
%[1]s scan workload <kind>/<name> --namespace <namespace>
# Scan an workload from a file path
%[1]s scan workload <kind>/<name> --file-path <file path>
# Scan an workload from a helm-chart template
%[1]s scan workload <kind>/<name> --chart-path <chart path>
`, cautils.ExecName())
)
var namespace string
// controlCmd represents the control command
func getWorkloadCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Command {
workloadCmd := &cobra.Command{
Use: "workload <kind>/<name> [`<glob pattern>`/`-`] [flags]",
Short: fmt.Sprint("The workload you wish to scan"),
Example: workloadExample,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return fmt.Errorf("usage: <kind>/<name>")
}
wlIdentifier := strings.Split(args[0], "/")
if len(wlIdentifier) != 2 || wlIdentifier[0] == "" || wlIdentifier[1] == "" {
return fmt.Errorf("usage: <kind>/<name>")
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
var wlIdentifier string
if namespace != "" {
wlIdentifier = fmt.Sprintf("%s/", namespace)
}
wlIdentifier += args[0]
scanInfo.WorkloadIdentifier = wlIdentifier
ctx := context.TODO()
_, err := ks.Scan(ctx, scanInfo)
if err != nil {
logger.L().Fatal(err.Error())
}
return nil
},
}
workloadCmd.PersistentFlags().StringVarP(&namespace, "namespace", "n", "", "Namespace of the workload. Default will be empty.")
return workloadCmd
}

View File

@@ -94,6 +94,9 @@ type ITenantConfig interface {
// ============================ Local Config ============================================
// ======================================================================================
// Config when scanning YAML files or URL but not a Kubernetes cluster
var _ ITenantConfig = &LocalConfig{}
type LocalConfig struct {
backendAPI getter.IBackend
configObj *ConfigObj
@@ -146,6 +149,8 @@ func NewLocalConfig(
}
logger.L().Debug("Kubescape Cloud URLs", helpers.String("api", lc.backendAPI.GetCloudAPIURL()), helpers.String("auth", lc.backendAPI.GetCloudAuthURL()), helpers.String("report", lc.backendAPI.GetCloudReportURL()), helpers.String("UI", lc.backendAPI.GetCloudUIURL()))
initializeCloudAPI(lc)
return lc
}
@@ -220,6 +225,8 @@ KS_SECRET_KEY
TODO - support:
KS_CACHE // path to cached files
*/
var _ ITenantConfig = &ClusterConfig{}
type ClusterConfig struct {
backendAPI getter.IBackend
k8s *k8sinterface.KubernetesApi
@@ -235,7 +242,7 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
backendAPI: backendAPI,
configObj: &ConfigObj{},
configMapName: getConfigMapName(),
configMapNamespace: getConfigMapNamespace(),
configMapNamespace: GetConfigMapNamespace(),
}
// first, load from configMap
@@ -288,6 +295,8 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
}
logger.L().Debug("Kubescape Cloud URLs", helpers.String("api", c.backendAPI.GetCloudAPIURL()), helpers.String("auth", c.backendAPI.GetCloudAuthURL()), helpers.String("report", c.backendAPI.GetCloudReportURL()), helpers.String("UI", c.backendAPI.GetCloudUIURL()))
initializeCloudAPI(c)
return c
}
@@ -548,7 +557,8 @@ func getConfigMapName() string {
return "kubescape"
}
func getConfigMapNamespace() string {
// GetConfigMapNamespace returns the namespace of the cluster config, which is the same for all in-cluster components
func GetConfigMapNamespace() string {
if n := os.Getenv("KS_DEFAULT_CONFIGMAP_NAMESPACE"); n != "" {
return n
}
@@ -622,3 +632,15 @@ func updateCloudURLs(configObj *ConfigObj) {
}
}
func initializeCloudAPI(c ITenantConfig) {
cloud := getter.GetKSCloudAPIConnector()
cloud.SetAccountID(c.GetAccountID())
cloud.SetClientID(c.GetClientID())
cloud.SetSecretKey(c.GetSecretKey())
cloud.SetCloudAuthURL(c.GetCloudAuthURL())
cloud.SetCloudReportURL(c.GetCloudReportURL())
cloud.SetCloudUIURL(c.GetCloudUIURL())
cloud.SetCloudAPIURL(c.GetCloudAPIURL())
getter.SetKSCloudAPIConnector(cloud)
}

View File

@@ -5,6 +5,7 @@ import (
"os"
"testing"
"github.com/kubescape/kubescape/v2/core/cautils/getter"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
)
@@ -268,3 +269,64 @@ func TestUpdateCloudURLs(t *testing.T) {
updateCloudURLs(co)
assert.Equal(t, co.CloudAPIURL, mockCloudAPIURL)
}
func Test_initializeCloudAPI(t *testing.T) {
type args struct {
c ITenantConfig
}
tests := []struct {
name string
args args
}{
{
name: "test",
args: args{
c: mockClusterConfig(),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
initializeCloudAPI(tt.args.c)
cloud := getter.GetKSCloudAPIConnector()
assert.Equal(t, tt.args.c.GetCloudAPIURL(), cloud.GetCloudAPIURL())
assert.Equal(t, tt.args.c.GetCloudAuthURL(), cloud.GetCloudAuthURL())
assert.Equal(t, tt.args.c.GetCloudUIURL(), cloud.GetCloudUIURL())
assert.Equal(t, tt.args.c.GetCloudReportURL(), cloud.GetCloudReportURL())
assert.Equal(t, tt.args.c.GetAccountID(), cloud.GetAccountID())
assert.Equal(t, tt.args.c.GetClientID(), cloud.GetClientID())
assert.Equal(t, tt.args.c.GetSecretKey(), cloud.GetSecretKey())
})
}
}
func TestGetConfigMapNamespace(t *testing.T) {
tests := []struct {
name string
env string
want string
}{
{
name: "no env",
want: "default",
},
{
name: "default ns",
env: "kubescape",
want: "kubescape",
},
{
name: "custom ns",
env: "my-ns",
want: "my-ns",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.env != "" {
_ = os.Setenv("KS_DEFAULT_CONFIGMAP_NAMESPACE", tt.env)
}
assert.Equalf(t, tt.want, GetConfigMapNamespace(), "GetConfigMapNamespace()")
})
}
}

View File

@@ -6,6 +6,8 @@ import (
spinnerpkg "github.com/briandowns/spinner"
"github.com/fatih/color"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/mattn/go-isatty"
"github.com/schollz/progressbar/v3"
)
@@ -22,6 +24,10 @@ var DescriptionDisplay = color.New(color.Faint, color.FgWhite).FprintfFunc()
var spinner *spinnerpkg.Spinner
func StartSpinner() {
if helpers.ToLevel(logger.L().GetLevel()) >= helpers.WarningLevel {
return
}
if spinner != nil {
if !spinner.Active() {
spinner.Start()
@@ -42,8 +48,8 @@ func StopSpinner() {
}
type ProgressHandler struct {
title string
pb *progressbar.ProgressBar
title string
}
func NewProgressHandler(title string) *ProgressHandler {
@@ -51,11 +57,11 @@ func NewProgressHandler(title string) *ProgressHandler {
}
func (p *ProgressHandler) Start(allSteps int) {
if isatty.IsTerminal(os.Stderr.Fd()) {
p.pb = progressbar.Default(int64(allSteps), p.title)
} else {
if !isatty.IsTerminal(os.Stderr.Fd()) || helpers.ToLevel(logger.L().GetLevel()) >= helpers.WarningLevel {
p.pb = progressbar.DefaultSilent(int64(allSteps), p.title)
return
}
p.pb = progressbar.Default(int64(allSteps), p.title)
}
func (p *ProgressHandler) ProgressJob(step int, message string) {

View File

@@ -0,0 +1,32 @@
package cautils
import (
"testing"
"github.com/kubescape/go-logger"
)
func TestStartSpinner(t *testing.T) {
tests := []struct {
name string
loggerLevel string
enabled bool
}{
{
name: "TestStartSpinner - disabled",
loggerLevel: "warning",
enabled: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
logger.L().SetLevel(tt.loggerLevel)
StartSpinner()
if !tt.enabled {
if spinner != nil {
t.Errorf("spinner should be nil")
}
}
})
}
}

View File

@@ -48,7 +48,7 @@ func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[stri
if err == nil {
wls, errs := chart.GetWorkloadsWithDefaultValues()
if len(errs) > 0 {
logger.L().Ctx(ctx).Error(fmt.Sprintf("Rendering of Helm chart template '%s', failed: %v", chart.GetName(), errs))
logger.L().Ctx(ctx).Warning(fmt.Sprintf("Rendering of Helm chart template '%s', failed: %v", chart.GetName(), errs))
continue
}
@@ -88,7 +88,7 @@ func LoadResourcesFromKustomizeDirectory(ctx context.Context, basePath string) (
kustomizeDirectoryName := GetKustomizeDirectoryName(newBasePath)
if len(errs) > 0 {
logger.L().Ctx(ctx).Error(fmt.Sprintf("Rendering yaml from Kustomize failed: %v", errs))
logger.L().Ctx(ctx).Warning(fmt.Sprintf("Rendering yaml from Kustomize failed: %v", errs))
}
for k, v := range wls {
@@ -100,15 +100,16 @@ func LoadResourcesFromKustomizeDirectory(ctx context.Context, basePath string) (
func LoadResourcesFromFiles(ctx context.Context, input, rootPath string) map[string][]workloadinterface.IMetadata {
files, errs := listFiles(input)
if len(errs) > 0 {
logger.L().Ctx(ctx).Error(fmt.Sprintf("%v", errs))
logger.L().Ctx(ctx).Warning(fmt.Sprintf("%v", errs))
}
if len(files) == 0 {
logger.L().Ctx(ctx).Error("no files found to scan", helpers.String("input", input))
return nil
}
workloads, errs := loadFiles(rootPath, files)
if len(errs) > 0 {
logger.L().Ctx(ctx).Error(fmt.Sprintf("%v", errs))
logger.L().Ctx(ctx).Warning(fmt.Sprintf("%v", errs))
}
return workloads

View File

@@ -1,24 +1,61 @@
package getter
type FeLoginData struct {
Secret string `json:"secret"`
ClientId string `json:"clientId"`
}
import (
"github.com/armosec/armoapi-go/armotypes"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
)
type FeLoginResponse struct {
Token string `json:"accessToken"`
RefreshToken string `json:"refreshToken"`
Expires string `json:"expires"`
ExpiresIn int32 `json:"expiresIn"`
}
// NativeFrameworks identifies all pre-built, native frameworks.
var NativeFrameworks = []string{"allcontrols", "nsa", "mitre"}
type KSCloudSelectCustomer struct {
SelectedCustomerGuid string `json:"selectedCustomer"`
}
type (
// TenantResponse holds the credentials for a tenant.
TenantResponse struct {
TenantID string `json:"tenantId"`
Token string `json:"token"`
Expires string `json:"expires"`
AdminMail string `json:"adminMail,omitempty"`
}
type TenantResponse struct {
TenantID string `json:"tenantId"`
Token string `json:"token"`
Expires string `json:"expires"`
AdminMail string `json:"adminMail,omitempty"`
}
// AttackTrack is an alias to the API type definition for attack tracks.
AttackTrack = v1alpha1.AttackTrack
// Framework is an alias to the API type definition for a framework.
Framework = reporthandling.Framework
// Control is an alias to the API type definition for a control.
Control = reporthandling.Control
// PostureExceptionPolicy is an alias to the API type definition for posture exception policy.
PostureExceptionPolicy = armotypes.PostureExceptionPolicy
// CustomerConfig is an alias to the API type definition for a customer configuration.
CustomerConfig = armotypes.CustomerConfig
// PostureReport is an alias to the API type definition for a posture report.
PostureReport = reporthandlingv2.PostureReport
)
type (
// internal data descriptors
// feLoginData describes the input to a login challenge.
feLoginData struct {
Secret string `json:"secret"`
ClientId string `json:"clientId"`
}
// feLoginResponse describes the response to a login challenge.
feLoginResponse struct {
Token string `json:"accessToken"`
RefreshToken string `json:"refreshToken"`
Expires string `json:"expires"`
ExpiresIn int32 `json:"expiresIn"`
}
ksCloudSelectCustomer struct {
SelectedCustomerGuid string `json:"selectedCustomer"`
}
)

View File

@@ -0,0 +1,8 @@
// Package getter provides functionality to retrieve policy objects.
//
// It comes with 3 implementations:
//
// * KSCloudAPI is a client for the KS Cloud SaaS API
// * LoadPolicy exposes policy objects stored in a local repository
// * DownloadReleasedPolicy downloads policy objects from the policy library released on github: https://github.com/kubescape/regolibrary
package getter

View File

@@ -14,6 +14,12 @@ import (
// =======================================================================================================================
// ======================================== DownloadReleasedPolicy =======================================================
// =======================================================================================================================
var (
_ IPolicyGetter = &DownloadReleasedPolicy{}
_ IExceptionsGetter = &DownloadReleasedPolicy{}
_ IAttackTracksGetter = &DownloadReleasedPolicy{}
_ IControlsInputsGetter = &DownloadReleasedPolicy{}
)
// Use gitregostore to get policies from github release
type DownloadReleasedPolicy struct {
@@ -72,12 +78,12 @@ func (drp *DownloadReleasedPolicy) ListControls() ([]string, error) {
}
var controlsFrameworksList [][]string
for _, control := range controls {
controlsFrameworksList = append(controlsFrameworksList, control.FrameworkNames)
controlsFrameworksList = append(controlsFrameworksList, drp.gs.GetOpaFrameworkListByControlID(control.ControlID))
}
controlsNamesWithIDsandFrameworksList := make([]string, len(controlsIDsList))
// by design all slices have the same lengt
for i := range controlsIDsList {
controlsNamesWithIDsandFrameworksList[i] = fmt.Sprintf("%v|%v|%v", controlsIDsList[i], controlsNamesList[i], strings.Join(controlsFrameworksList[i], ","))
controlsNamesWithIDsandFrameworksList[i] = fmt.Sprintf("%v|%v|%v", controlsIDsList[i], controlsNamesList[i], strings.Join(controlsFrameworksList[i], ", "))
}
return controlsNamesWithIDsandFrameworksList, nil
}

View File

@@ -9,6 +9,8 @@ import (
"strings"
"testing"
"github.com/kubescape/kubescape/v2/internal/testutils"
jsoniter "github.com/json-iterator/go"
"github.com/stretchr/testify/require"
)
@@ -32,7 +34,7 @@ func TestReleasedPolicy(t *testing.T) {
require.NoError(t, err)
require.NotEmpty(t, controlIDs)
sampleSize := min(len(controlIDs), 10)
sampleSize := int(min(int64(len(controlIDs)), 10))
for _, toPin := range controlIDs[:sampleSize] {
// Example of a returned "ID": `C-0154|Ensure_that_the_--client-cert-auth_argument_is_set_to_true|`
@@ -128,14 +130,6 @@ func TestReleasedPolicy(t *testing.T) {
})
}
func min(a, b int) int {
if a > b {
return b
}
return a
}
func hydrateReleasedPolicyFromMock(t testing.TB, p *DownloadReleasedPolicy) {
regoFile := testRegoFile("policy")
@@ -161,10 +155,10 @@ func hydrateReleasedPolicyFromMock(t testing.TB, p *DownloadReleasedPolicy) {
require.NoError(t, err)
require.NoError(t,
json.Unmarshal(buf, p.gs),
jsoniter.Unmarshal(buf, p.gs),
)
}
func testRegoFile(framework string) string {
return filepath.Join(currentDir(), "testdata", fmt.Sprintf("%s.json", framework))
return filepath.Join(testutils.CurrentDir(), "testdata", fmt.Sprintf("%s.json", framework))
}

View File

@@ -1,47 +0,0 @@
package getter
import (
"github.com/armosec/armoapi-go/armotypes"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
)
type IPolicyGetter interface {
GetFramework(name string) (*reporthandling.Framework, error)
GetFrameworks() ([]reporthandling.Framework, error)
GetControl(ID string) (*reporthandling.Control, error)
ListFrameworks() ([]string, error)
ListControls() ([]string, error)
}
type IExceptionsGetter interface {
GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error)
}
type IBackend interface {
GetAccountID() string
GetClientID() string
GetSecretKey() string
GetCloudReportURL() string
GetCloudAPIURL() string
GetCloudUIURL() string
GetCloudAuthURL() string
SetAccountID(accountID string)
SetClientID(clientID string)
SetSecretKey(secretKey string)
SetCloudReportURL(cloudReportURL string)
SetCloudAPIURL(cloudAPIURL string)
SetCloudUIURL(cloudUIURL string)
SetCloudAuthURL(cloudAuthURL string)
GetTenant() (*TenantResponse, error)
}
type IControlsInputsGetter interface {
GetControlsInputs(clusterName string) (map[string][]string, error)
}
type IAttackTracksGetter interface {
GetAttackTracks() ([]v1alpha1.AttackTrack, error)
}

View File

@@ -10,19 +10,23 @@ import (
"strings"
)
// GetDefaultPath returns a location under the local dot files for kubescape.
//
// This is typically located under $HOME/.kubescape
func GetDefaultPath(name string) string {
return filepath.Join(DefaultLocalStore, name)
}
func SaveInFile(policy interface{}, pathStr string) error {
encodedData, err := json.MarshalIndent(policy, "", " ")
// SaveInFile serializes any object as a JSON file.
func SaveInFile(object interface{}, targetFile string) error {
encodedData, err := json.MarshalIndent(object, "", " ")
if err != nil {
return err
}
err = os.WriteFile(pathStr, encodedData, 0644) //nolint:gosec
err = os.WriteFile(targetFile, encodedData, 0644) //nolint:gosec
if err != nil {
if os.IsNotExist(err) {
pathDir := filepath.Dir(pathStr)
pathDir := filepath.Dir(targetFile)
// pathDir could contain subdirectories
if erm := os.MkdirAll(pathDir, 0755); erm != nil {
return erm
@@ -31,7 +35,7 @@ func SaveInFile(policy interface{}, pathStr string) error {
return err
}
err = os.WriteFile(pathStr, encodedData, 0644) //nolint:gosec
err = os.WriteFile(targetFile, encodedData, 0644) //nolint:gosec
if err != nil {
return err
}
@@ -39,6 +43,9 @@ func SaveInFile(policy interface{}, pathStr string) error {
return nil
}
// HttpDelete provides a low-level capability to send a HTTP DELETE request and serialize the response as a string.
//
// Deprecated: use methods of the KSCloudAPI client instead.
func HttpDelete(httpClient *http.Client, fullURL string, headers map[string]string) (string, error) {
req, err := http.NewRequest("DELETE", fullURL, nil)
@@ -58,8 +65,10 @@ func HttpDelete(httpClient *http.Client, fullURL string, headers map[string]stri
return respStr, nil
}
// HttpGetter provides a low-level capability to send a HTTP GET request and serialize the response as a string.
//
// Deprecated: use methods of the KSCloudAPI client instead.
func HttpGetter(httpClient *http.Client, fullURL string, headers map[string]string) (string, error) {
req, err := http.NewRequest("GET", fullURL, nil)
if err != nil {
return "", err
@@ -77,8 +86,10 @@ func HttpGetter(httpClient *http.Client, fullURL string, headers map[string]stri
return respStr, nil
}
// HttpPost provides a low-level capability to send a HTTP POST request and serialize the response as a string.
//
// Deprecated: use methods of the KSCloudAPI client instead.
func HttpPost(httpClient *http.Client, fullURL string, headers map[string]string, body []byte) (string, error) {
req, err := http.NewRequest("POST", fullURL, bytes.NewReader(body))
if err != nil {
return "", err
@@ -103,7 +114,7 @@ func setHeaders(req *http.Request, headers map[string]string) {
}
}
// HTTPRespToString parses the body as string and checks the HTTP status code, it closes the body reader at the end
// httpRespToString parses the body as string and checks the HTTP status code, it closes the body reader at the end
func httpRespToString(resp *http.Response) (string, error) {
if resp == nil || resp.Body == nil {
return "", nil
@@ -113,6 +124,7 @@ func httpRespToString(resp *http.Response) (string, error) {
if resp.ContentLength > 0 {
strBuilder.Grow(int(resp.ContentLength))
}
_, err := io.Copy(&strBuilder, resp.Body)
respStr := strBuilder.String()
if err != nil {

View File

@@ -1,6 +1,7 @@
package getter
import (
"net/http"
"os"
"path/filepath"
"testing"
@@ -66,3 +67,31 @@ func TestSaveInFile(t *testing.T) {
require.Error(t, SaveInFile(badPolicy, target))
})
}
func TestHttpMethods(t *testing.T) {
client := http.DefaultClient
hdrs := map[string]string{"key": "value"}
srv := mockAPIServer(t)
t.Cleanup(srv.Close)
t.Run("HttpGetter should GET", func(t *testing.T) {
resp, err := HttpGetter(client, srv.URL(pathTestGet), hdrs)
require.NoError(t, err)
require.EqualValues(t, "body-get", resp)
})
t.Run("HttpPost should POST", func(t *testing.T) {
body := []byte("body-post")
resp, err := HttpPost(client, srv.URL(pathTestPost), hdrs, body)
require.NoError(t, err)
require.EqualValues(t, string(body), resp)
})
t.Run("HttpDelete should DELETE", func(t *testing.T) {
resp, err := HttpDelete(client, srv.URL(pathTestDelete), hdrs)
require.NoError(t, err)
require.EqualValues(t, "body-delete", resp)
})
}

View File

@@ -0,0 +1,55 @@
package getter
import (
"github.com/armosec/armoapi-go/armotypes"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
)
type (
// IPolicyGetter knows how to retrieve policies, i.e. frameworks and their controls.
IPolicyGetter interface {
GetFramework(name string) (*reporthandling.Framework, error)
GetFrameworks() ([]reporthandling.Framework, error)
GetControl(ID string) (*reporthandling.Control, error)
ListFrameworks() ([]string, error)
ListControls() ([]string, error)
}
// IExceptionsGetter knows how to retrieve exceptions.
IExceptionsGetter interface {
GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error)
}
// IControlsInputsGetter knows how to retrieve controls inputs.
IControlsInputsGetter interface {
GetControlsInputs(clusterName string) (map[string][]string, error)
}
// IAttackTracksGetter knows how to retrieve attack tracks.
IAttackTracksGetter interface {
GetAttackTracks() ([]v1alpha1.AttackTrack, error)
}
// IBackend knows how to configure a KS Cloud client
IBackend interface {
GetAccountID() string
GetClientID() string
GetSecretKey() string
GetCloudReportURL() string
GetCloudAPIURL() string
GetCloudUIURL() string
GetCloudAuthURL() string
SetAccountID(accountID string)
SetClientID(clientID string)
SetSecretKey(secretKey string)
SetCloudReportURL(cloudReportURL string)
SetCloudAPIURL(cloudAPIURL string)
SetCloudUIURL(cloudUIURL string)
SetCloudAuthURL(cloudAuthURL string)
GetTenant() (*TenantResponse, error)
}
)

View File

@@ -1,16 +1,13 @@
package getter
import (
"io"
"strings"
stdjson "encoding/json"
jsoniter "github.com/json-iterator/go"
)
var (
json jsoniter.API
)
var json jsoniter.API
func init() {
// NOTE(fredbi): attention, this configuration rounds floats down to 6 digits
@@ -18,9 +15,24 @@ func init() {
json = jsoniter.ConfigFastest
}
// JSONDecoder returns JSON decoder for given string
func JSONDecoder(origin string) *stdjson.Decoder {
dec := stdjson.NewDecoder(strings.NewReader(origin))
// JSONDecoder provides a low-level utility that returns a JSON decoder for given string.
//
// Deprecated: use higher level methods from the KSCloudAPI client instead.
func JSONDecoder(origin string) *jsoniter.Decoder {
dec := jsoniter.NewDecoder(strings.NewReader(origin))
dec.UseNumber()
return dec
}
func decode[T any](rdr io.Reader) (T, error) {
var receiver T
dec := newDecoder(rdr)
err := dec.Decode(&receiver)
return receiver, err
}
func newDecoder(rdr io.Reader) *jsoniter.Decoder {
return json.NewDecoder(rdr)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,16 @@
package getter
import (
"os"
"path/filepath"
"testing"
"github.com/armosec/armoapi-go/armotypes"
jsoniter "github.com/json-iterator/go"
"github.com/kubescape/kubescape/v2/internal/testutils"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
"github.com/stretchr/testify/require"
)
func mockAttackTracks() []v1alpha1.AttackTrack {
@@ -263,11 +270,25 @@ func mockCustomerConfig(cluster, scope string) func() *armotypes.CustomerConfig
}
}
func mockLoginResponse() *FeLoginResponse {
return &FeLoginResponse{
func mockLoginResponse() *feLoginResponse {
return &feLoginResponse{
Token: "access-token",
RefreshToken: "refresh-token",
Expires: "expiry-time",
ExpiresIn: 123,
}
}
func mockPostureReport(t testing.TB, reportID, cluster string) *PostureReport {
fixture := filepath.Join(testutils.CurrentDir(), "testdata", "mock_posture_report.json")
buf, err := os.ReadFile(fixture)
require.NoError(t, err)
var report PostureReport
require.NoError(t,
jsoniter.Unmarshal(buf, &report),
)
return &report
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,199 +0,0 @@
package getter
import (
"bytes"
"fmt"
"net/http"
"net/url"
"strings"
)
var NativeFrameworks = []string{"allcontrols", "nsa", "mitre"}
func (api *KSCloudAPI) getFrameworkURL(frameworkName string) string {
u := url.URL{}
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
u.Path = "api/v1/armoFrameworks"
q := u.Query()
q.Add("customerGUID", api.getCustomerGUIDFallBack())
if isNativeFramework(frameworkName) {
q.Add("frameworkName", strings.ToUpper(frameworkName))
} else {
// For customer framework has to be the way it was added
q.Add("frameworkName", frameworkName)
}
u.RawQuery = q.Encode()
return u.String()
}
func (api *KSCloudAPI) getAttackTracksURL() string {
u := url.URL{}
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
u.Path = "api/v1/attackTracks"
q := u.Query()
q.Add("customerGUID", api.getCustomerGUIDFallBack())
u.RawQuery = q.Encode()
return u.String()
}
func (api *KSCloudAPI) getListFrameworkURL() string {
u := url.URL{}
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
u.Path = "api/v1/armoFrameworks"
q := u.Query()
q.Add("customerGUID", api.getCustomerGUIDFallBack())
u.RawQuery = q.Encode()
return u.String()
}
func (api *KSCloudAPI) getExceptionsURL(clusterName string) string {
u := url.URL{}
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
u.Path = "api/v1/armoPostureExceptions"
q := u.Query()
q.Add("customerGUID", api.getCustomerGUIDFallBack())
// if clusterName != "" { // TODO - fix customer name support in Armo BE
// q.Add("clusterName", clusterName)
// }
u.RawQuery = q.Encode()
return u.String()
}
func (api *KSCloudAPI) exceptionsURL(exceptionsPolicyName string) string {
u := url.URL{}
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
u.Path = "api/v1/postureExceptionPolicy"
q := u.Query()
q.Add("customerGUID", api.getCustomerGUIDFallBack())
if exceptionsPolicyName != "" { // for delete
q.Add("policyName", exceptionsPolicyName)
}
u.RawQuery = q.Encode()
return u.String()
}
func (api *KSCloudAPI) getAccountConfigDefault(clusterName string) string {
config := api.getAccountConfig(clusterName)
url := config + "&scope=customer"
return url
}
func (api *KSCloudAPI) getAccountConfig(clusterName string) string {
u := url.URL{}
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
u.Path = "api/v1/armoCustomerConfiguration"
q := u.Query()
q.Add("customerGUID", api.getCustomerGUIDFallBack())
if clusterName != "" { // TODO - fix customer name support in Armo BE
q.Add("clusterName", clusterName)
}
u.RawQuery = q.Encode()
return u.String()
}
func (api *KSCloudAPI) getAccountURL() string {
u := url.URL{}
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
u.Path = "api/v1/createTenant"
return u.String()
}
func (api *KSCloudAPI) getApiToken() string {
u := url.URL{}
u.Scheme, u.Host = parseHost(api.GetCloudAuthURL())
u.Path = "identity/resources/auth/v1/api-token"
return u.String()
}
func (api *KSCloudAPI) getOpenidCustomers() string {
u := url.URL{}
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
u.Path = "api/v1/openid_customers"
return u.String()
}
func (api *KSCloudAPI) getAuthCookie() (string, error) {
selectCustomer := KSCloudSelectCustomer{SelectedCustomerGuid: api.accountID}
requestBody, _ := json.Marshal(selectCustomer)
client := &http.Client{}
httpRequest, err := http.NewRequest(http.MethodPost, api.getOpenidCustomers(), bytes.NewBuffer(requestBody))
if err != nil {
return "", err
}
httpRequest.Header.Set("Content-Type", "application/json")
httpRequest.Header.Set("Authorization", fmt.Sprintf("Bearer %s", api.feToken.Token))
httpResponse, err := client.Do(httpRequest)
if err != nil {
return "", err
}
defer httpResponse.Body.Close()
if httpResponse.StatusCode != http.StatusOK {
return "", fmt.Errorf("failed to get cookie from %s: status %d", api.getOpenidCustomers(), httpResponse.StatusCode)
}
cookies := httpResponse.Header.Get("set-cookie")
if len(cookies) == 0 {
return "", fmt.Errorf("no cookie field in response from %s", api.getOpenidCustomers())
}
authCookie := ""
for _, cookie := range strings.Split(cookies, ";") {
kv := strings.Split(cookie, "=")
if kv[0] == "auth" {
authCookie = kv[1]
}
}
if len(authCookie) == 0 {
return "", fmt.Errorf("no auth cookie field in response from %s", api.getOpenidCustomers())
}
return authCookie, nil
}
func (api *KSCloudAPI) appendAuthHeaders(headers map[string]string) {
if api.feToken.Token != "" {
headers["Authorization"] = fmt.Sprintf("Bearer %s", api.feToken.Token)
}
if api.authCookie != "" {
headers["Cookie"] = fmt.Sprintf("auth=%s", api.authCookie)
}
}
func (api *KSCloudAPI) getCustomerGUIDFallBack() string {
if api.accountID != "" {
return api.accountID
}
return "11111111-1111-1111-1111-111111111111"
}
func parseHost(host string) (string, string) {
if strings.HasPrefix(host, "http://") {
return "http", strings.Replace(host, "http://", "", 1)
}
// default scheme
return "https", strings.Replace(host, "https://", "", 1)
}
func isNativeFramework(framework string) bool {
return contains(NativeFrameworks, framework)
}
func contains(s []string, str string) bool {
for _, v := range s {
if strings.EqualFold(v, str) {
return true
}
}
return false
}

View File

@@ -0,0 +1,202 @@
package getter
import (
"context"
"fmt"
"log"
"net/http"
"net/http/httputil"
"time"
)
type (
// KSCloudOption allows to configure the behavior of the KS Cloud client.
KSCloudOption func(*ksCloudOptions)
// ksCloudOptions holds all the configurable parts of the KS Cloud client.
ksCloudOptions struct {
httpClient *http.Client
cloudReportURL string
cloudUIURL string
timeout *time.Duration
withTrace bool
}
// request option instructs post/get/delete to alter the outgoing request
requestOption func(*requestOptions)
// requestOptions knows how to enrich a request with headers
requestOptions struct {
withJSON bool
withToken string
withCookie *http.Cookie
withTrace bool
headers map[string]string
reqContext context.Context
}
)
// KS Cloud client options
// WithHTTPClient overrides the default http.Client used by the KS Cloud client.
func WithHTTPClient(client *http.Client) KSCloudOption {
return func(o *ksCloudOptions) {
o.httpClient = client
}
}
// WithTimeout sets a global timeout on a operations performed by the KS Cloud client.
//
// A value of 0 means no timeout.
//
// The default is 61s.
func WithTimeout(timeout time.Duration) KSCloudOption {
duration := timeout
return func(o *ksCloudOptions) {
o.timeout = &duration
}
}
// WithReportURL specifies the URL to post reports.
func WithReportURL(u string) KSCloudOption {
return func(o *ksCloudOptions) {
o.cloudReportURL = u
}
}
// WithFrontendURL specifies the URL to access the KS Cloud UI.
func WithFrontendURL(u string) KSCloudOption {
return func(o *ksCloudOptions) {
o.cloudUIURL = u
}
}
// WithTrace toggles requests dump for inspection & debugging.
func WithTrace(enabled bool) KSCloudOption {
return func(o *ksCloudOptions) {
o.withTrace = enabled
}
}
var defaultClient = &http.Client{
Timeout: 61 * time.Second,
}
// ksCloudOptionsWithDefaults sets defaults for the KS client and applies overrides.
func ksCloudOptionsWithDefaults(opts []KSCloudOption) *ksCloudOptions {
options := &ksCloudOptions{
httpClient: defaultClient,
}
for _, apply := range opts {
apply(options)
}
if options.timeout != nil {
// non-default timeout (0 means no timeout)
// clone the client and override the timeout
client := *options.httpClient
client.Timeout = *options.timeout
options.httpClient = &client
}
return options
}
// http request options
// withContentJSON sets JSON content type for a request
func withContentJSON(enabled bool) requestOption {
return func(o *requestOptions) {
o.withJSON = enabled
}
}
// withToken sets an Authorization header for a request
func withToken(token string) requestOption {
return func(o *requestOptions) {
o.withToken = token
}
}
// withCookie sets an authentication cookie for a request
func withCookie(cookie *http.Cookie) requestOption {
return func(o *requestOptions) {
o.withCookie = cookie
}
}
// withExtraHeaders adds extra headers to a request
func withExtraHeaders(headers map[string]string) requestOption {
return func(o *requestOptions) {
o.headers = headers
}
}
/* not used yet
// withContext sets the context of a request.
//
// By default, context.Background() is used.
func withContext(ctx context.Context) requestOption {
return func(o *requestOptions) {
o.reqContext = ctx
}
}
*/
// withTrace dumps requests for debugging
func withTrace(enabled bool) requestOption {
return func(o *requestOptions) {
o.withTrace = enabled
}
}
func (o *requestOptions) setHeaders(req *http.Request) {
if o.withJSON {
req.Header.Set("Content-Type", "application/json")
}
if len(o.withToken) > 0 {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", o.withToken))
}
if o.withCookie != nil {
req.AddCookie(o.withCookie)
}
for k, v := range o.headers {
req.Header.Set(k, v)
}
}
// traceReq dumps the content of an outgoing request for inspecting or debugging the client.
func (o *requestOptions) traceReq(req *http.Request) {
if !o.withTrace {
return
}
dump, _ := httputil.DumpRequestOut(req, true)
log.Printf("%s\n", dump)
}
// traceResp dumps the content of an API response for inspecting or debugging the client.
func (o *requestOptions) traceResp(resp *http.Response) {
if !o.withTrace {
return
}
dump, _ := httputil.DumpResponse(resp, true)
log.Printf("%s\n", dump)
}
func requestOptionsWithDefaults(opts []requestOption) *requestOptions {
o := &requestOptions{
reqContext: context.Background(),
}
for _, apply := range opts {
apply(o)
}
return o
}

View File

@@ -24,9 +24,13 @@ var (
ErrIDRequired = errors.New("missing required input control ID")
ErrFrameworkNotMatching = errors.New("framework from file not matching")
ErrControlNotMatching = errors.New("framework from file not matching")
)
_ IPolicyGetter = &LoadPolicy{}
_ IExceptionsGetter = &LoadPolicy{}
var (
_ IPolicyGetter = &LoadPolicy{}
_ IExceptionsGetter = &LoadPolicy{}
_ IAttackTracksGetter = &LoadPolicy{}
_ IControlsInputsGetter = &LoadPolicy{}
)
func getCacheDir() string {

View File

@@ -4,9 +4,9 @@ import (
"fmt"
"os"
"path/filepath"
"runtime"
"testing"
"github.com/kubescape/kubescape/v2/internal/testutils"
"github.com/stretchr/testify/require"
)
@@ -387,7 +387,7 @@ func TestLoadPolicy(t *testing.T) {
}
func testFrameworkFile(framework string) string {
return filepath.Join(currentDir(), "testdata", fmt.Sprintf("%s.json", framework))
return filepath.Join(testutils.CurrentDir(), "testdata", fmt.Sprintf("%s.json", framework))
}
func writeTempJSONControlInputs(t testing.TB) (string, map[string][]string) {
@@ -408,9 +408,3 @@ func writeTempJSONControlInputs(t testing.TB) (string, map[string][]string) {
return fileName, mock
}
func currentDir() string {
_, filename, _, _ := runtime.Caller(1)
return filepath.Dir(filename)
}

View File

@@ -6,6 +6,7 @@
},
"creationTime": "",
"description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png",
"typeTags": ["compliance"],
"controls": [
{
"guid": "",

View File

@@ -6,6 +6,7 @@
},
"creationTime": "",
"description": "Implement NSA security advices for K8s ",
"typeTags": ["compliance"],
"controls": [
{
"guid": "",

File diff suppressed because it is too large Load Diff

View File

@@ -25789,7 +25789,7 @@
},
{
"guid": "",
"name": "exclude-kubescape-host-scanner-resources",
"name": "exclude-host-scanner-resources",
"attributes": {
"systemException": true
},
@@ -25804,7 +25804,7 @@
"attributes": {
"kind": "DaemonSet",
"name": "host-scanner",
"namespace": "kubescape-host-scanner"
"namespace": "kubescape"
}
}
],

View File

@@ -0,0 +1,65 @@
package getter
import (
"net/url"
"path"
)
// buildAPIURL builds an URL pointing to the API backend.
func (api *KSCloudAPI) buildAPIURL(pth string, pairs ...string) string {
return buildQuery(url.URL{
Scheme: api.scheme,
Host: api.host,
Path: pth,
}, pairs...)
}
// buildUIURL builds an URL pointing to the UI frontend.
func (api *KSCloudAPI) buildUIURL(pth string, pairs ...string) string {
return buildQuery(url.URL{
Scheme: api.uischeme,
Host: api.uihost,
Path: pth,
}, pairs...)
}
// buildAuthURL builds an URL pointing to the authentication endpoint.
func (api *KSCloudAPI) buildAuthURL(pth string, pairs ...string) string {
return buildQuery(url.URL{
Scheme: api.authscheme,
Host: api.authhost,
Path: pth,
}, pairs...)
}
// buildReportURL builds an URL pointing to the reporting endpoint.
func (api *KSCloudAPI) buildReportURL(pth string, pairs ...string) string {
return buildQuery(url.URL{
Scheme: api.reportscheme,
Host: api.reporthost,
Path: pth,
}, pairs...)
}
// buildQuery builds an URL with query params.
//
// Params are provided in pairs (param name, value).
func buildQuery(u url.URL, pairs ...string) string {
if len(pairs)%2 != 0 {
panic("dev error: buildURL accepts query params in (name, value) pairs")
}
q := u.Query()
for i := 0; i < len(pairs)-1; i += 2 {
param := pairs[i]
value := pairs[i+1]
q.Add(param, value)
}
u.RawQuery = q.Encode()
u.Path = path.Clean(u.Path)
return u.String()
}

View File

@@ -0,0 +1,86 @@
package getter
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestBuildURL(t *testing.T) {
t.Parallel()
ks := NewKSCloudAPICustomized(
"api.example.com", "auth.example.com", // required
WithFrontendURL("ui.example.com"), // optional
WithReportURL("report.example.com"), // optional
)
t.Run("should build API URL with query params on https host", func(t *testing.T) {
require.Equal(t,
"https://api.example.com/path?q1=v1&q2=v2",
ks.buildAPIURL("/path", "q1", "v1", "q2", "v2"),
)
})
t.Run("should build API URL with query params on http host", func(t *testing.T) {
ku := NewKSCloudAPICustomized("http://api.example.com", "auth.example.com")
require.Equal(t,
"http://api.example.com/path?q1=v1&q2=v2",
ku.buildAPIURL("/path", "q1", "v1", "q2", "v2"),
)
})
t.Run("should panic when params are not provided in pairs", func(t *testing.T) {
require.Panics(t, func() {
// notice how the linter detects wrong args
_ = ks.buildAPIURL("/path", "q1", "v1", "q2") //nolint:staticcheck
})
})
t.Run("should build UI URL with query params on https host", func(t *testing.T) {
require.Equal(t,
"https://ui.example.com/path?q1=v1&q2=v2",
ks.buildUIURL("/path", "q1", "v1", "q2", "v2"),
)
})
t.Run("should build report URL with query params on https host", func(t *testing.T) {
require.Equal(t,
"https://report.example.com/path?q1=v1&q2=v2",
ks.buildReportURL("/path", "q1", "v1", "q2", "v2"),
)
})
}
func TestViewURL(t *testing.T) {
t.Parallel()
ks := NewKSCloudAPICustomized(
"api.example.com", "auth.example.com", // required
WithFrontendURL("ui.example.com"), // optional
WithReportURL("report.example.com"), // optional
)
ks.SetAccountID("me")
ks.SetInvitationToken("invite")
t.Run("should render UI report URL", func(t *testing.T) {
require.Equal(t, "https://ui.example.com/repository-scanning/xyz", ks.ViewReportURL("xyz"))
})
t.Run("should render UI dashboard URL", func(t *testing.T) {
require.Equal(t, "https://ui.example.com/dashboard", ks.ViewDashboardURL())
})
t.Run("should render UI RBAC URL", func(t *testing.T) {
require.Equal(t, "https://ui.example.com/rbac-visualizer", ks.ViewRBACURL())
})
t.Run("should render UI scan URL", func(t *testing.T) {
require.Equal(t, "https://ui.example.com/compliance/cluster", ks.ViewScanURL("cluster"))
})
t.Run("should render UI sign URL", func(t *testing.T) {
require.Equal(t, "https://ui.example.com/account/sign-up?customerGUID=me&invitationToken=invite&utm_medium=createaccount&utm_source=ARMOgithub", ks.ViewSignURL())
})
}

View File

@@ -0,0 +1,82 @@
package getter
import (
"fmt"
"io"
"net/http"
"strings"
)
// parseHost picks a host from a hostname or an URL and detects the scheme.
//
// The default scheme is https. This may be altered by specifying an explicit http://hostname URL.
func parseHost(host string) (string, string) {
if strings.HasPrefix(host, "http://") {
return "http", strings.Replace(host, "http://", "", 1) // cut... index ...
}
// default scheme
return "https", strings.Replace(host, "https://", "", 1)
}
func isNativeFramework(framework string) bool {
return contains(NativeFrameworks, framework)
}
func contains(s []string, str string) bool {
for _, v := range s {
if strings.EqualFold(v, str) {
return true
}
}
return false
}
func min(a, b int64) int64 {
if a < b {
return a
}
return b
}
// errAPI reports an API error, with a cap on the length of the error message.
func errAPI(resp *http.Response) error {
const maxSize = 1024
reason := new(strings.Builder)
if resp.Body != nil {
size := min(resp.ContentLength, maxSize)
if size > 0 {
reason.Grow(int(size))
}
_, _ = io.CopyN(reason, resp.Body, size)
defer resp.Body.Close()
}
return fmt.Errorf("http-error: '%s', reason: '%s'", resp.Status, reason.String())
}
// errAuth returns an authentication error.
//
// Authentication errors upon login croak a less detailed message.
func errAuth(resp *http.Response) error {
return fmt.Errorf("error authenticating: %d", resp.StatusCode)
}
func readString(rdr io.Reader, sizeHint int64) (string, error) {
// if the response is empty, return an empty string
if sizeHint < 0 {
return "", nil
}
var b strings.Builder
b.Grow(int(sizeHint))
_, err := io.Copy(&b, rdr)
return b.String(), err
}

View File

@@ -0,0 +1,99 @@
package getter
import (
"io"
"testing"
"github.com/stretchr/testify/require"
)
func TestParseHost(t *testing.T) {
t.Parallel()
t.Run("should recognize http scheme", func(t *testing.T) {
t.Parallel()
const input = "http://localhost:7555"
scheme, host := parseHost(input)
require.Equal(t, "http", scheme)
require.Equal(t, "localhost:7555", host)
})
t.Run("should recognize https scheme", func(t *testing.T) {
t.Parallel()
const input = "https://localhost:7555"
scheme, host := parseHost(input)
require.Equal(t, "https", scheme)
require.Equal(t, "localhost:7555", host)
})
t.Run("should adopt https scheme by default", func(t *testing.T) {
t.Parallel()
const input = "portal-dev.armo.cloud"
scheme, host := parseHost(input)
require.Equal(t, "https", scheme)
require.Equal(t, "portal-dev.armo.cloud", host)
})
}
func TestIsNativeFramework(t *testing.T) {
t.Parallel()
require.Truef(t, isNativeFramework("nSa"), "expected nsa to be native (case insensitive)")
require.Falsef(t, isNativeFramework("foo"), "expected framework to be custom")
}
func Test_readString(t *testing.T) {
type args struct {
rdr io.Reader
sizeHint int64
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
name: "should return empty string if sizeHint is negative",
args: args{
rdr: nil,
sizeHint: -1,
},
want: "",
wantErr: false,
},
{
name: "should return empty string if sizeHint is zero",
args: args{
rdr: &io.LimitedReader{},
sizeHint: 0,
},
want: "",
wantErr: false,
},
{
name: "should return empty string if sizeHint is positive",
args: args{
rdr: &io.LimitedReader{},
sizeHint: 1,
},
want: "",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := readString(tt.args.rdr, tt.args.sizeHint)
if (err != nil) != tt.wantErr {
t.Errorf("readString() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("readString() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -3,6 +3,7 @@ package cautils
import (
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/opa-utils/reporthandling"
helpersv1 "github.com/kubescape/opa-utils/reporthandling/helpers/v1"
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
)
@@ -54,10 +55,8 @@ func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, con
crv1.Remediation = crv2.Remediation
rulesv1 := map[string]reporthandling.RuleReport{}
iter := crv2.ListResourcesIDs().All()
for iter.HasNext() {
resourceID := iter.Next()
l := helpersv1.GetAllListsFromPool()
for resourceID := range crv2.ListResourcesIDs(l).All() {
if result, ok := opaSessionObj.ResourcesResult[resourceID]; ok {
for _, rulev2 := range result.ListRulesOfControl(crv2.GetID(), "") {
@@ -104,6 +103,7 @@ func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, con
}
}
}
helpersv1.PutAllListsToPool(l)
if len(rulesv1) > 0 {
for i := range rulesv1 {
crv1.RuleReports = append(crv1.RuleReports, rulesv1[i])

View File

@@ -118,7 +118,8 @@ type ScanInfo struct {
IncludeNamespaces string //
InputPatterns []string // Yaml files input patterns
Silent bool // Silent mode - Do not print progress logs
FailThreshold float32 // Failure score threshold
FailThreshold float32 // DEPRECATED - Failure score threshold
ComplianceThreshold float32 // Compliance score threshold
FailThresholdSeverity string // Severity at and above which the command should fail
Submit bool // Submit results to Kubescape Cloud BE
CreateAccount bool // Create account in Kubescape Cloud BE if no account found in local cache
@@ -132,6 +133,7 @@ type ScanInfo struct {
ScanAll bool // true if scan all frameworks
OmitRawResources bool // true if omit raw resources from the output
PrintAttackTree bool // true if print attack tree
WorkloadIdentifier string // workload identifier for workload scan
}
type Getters struct {
@@ -226,7 +228,7 @@ func (scanInfo *ScanInfo) contains(policyName string) bool {
func scanInfoToScanMetadata(ctx context.Context, scanInfo *ScanInfo) *reporthandlingv2.Metadata {
metadata := &reporthandlingv2.Metadata{}
metadata.ScanMetadata.Format = scanInfo.Format
metadata.ScanMetadata.Formats = []string{scanInfo.Format}
metadata.ScanMetadata.FormatVersion = scanInfo.FormatVersion
metadata.ScanMetadata.Submit = scanInfo.Submit
@@ -250,6 +252,7 @@ func scanInfoToScanMetadata(ctx context.Context, scanInfo *ScanInfo) *reporthand
metadata.ScanMetadata.KubescapeVersion = BuildNumber
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
metadata.ScanMetadata.FailThreshold = scanInfo.FailThreshold
metadata.ScanMetadata.ComplianceThreshold = scanInfo.ComplianceThreshold
metadata.ScanMetadata.HostScanner = scanInfo.HostSensorEnabled.GetBool()
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
metadata.ScanMetadata.ControlsInputs = scanInfo.ControlsInputs
@@ -335,14 +338,23 @@ func setContextMetadata(ctx context.Context, contextMetadata *reporthandlingv2.C
}
contextMetadata.RepoContextMetadata = context
case ContextDir:
contextMetadata.DirectoryContextMetadata = &reporthandlingv2.DirectoryContextMetadata{
BasePath: getAbsPath(input),
HostName: getHostname(),
contextMetadata.RepoContextMetadata = &reporthandlingv2.RepoContextMetadata{
Provider: "none",
Repo: fmt.Sprintf("path@%s", getAbsPath(input)),
Owner: getHostname(),
Branch: "none",
DefaultBranch: "none",
LocalRootPath: getAbsPath(input),
}
case ContextFile:
contextMetadata.FileContextMetadata = &reporthandlingv2.FileContextMetadata{
FilePath: getAbsPath(input),
HostName: getHostname(),
contextMetadata.RepoContextMetadata = &reporthandlingv2.RepoContextMetadata{
Provider: "none",
Repo: fmt.Sprintf("file@%s", getAbsPath(input)),
Owner: getHostname(),
Branch: "none",
DefaultBranch: "none",
LocalRootPath: getAbsPath(input),
}
case ContextGitLocal:
// local

View File

@@ -2,6 +2,9 @@ package cautils
import (
"fmt"
"os"
"sort"
"strconv"
"strings"
)
@@ -42,3 +45,32 @@ func StringInSlice(strSlice []string, str string) int {
}
return ValueNotFound
}
func StringSlicesAreEqual(a, b []string) bool {
if len(a) != len(b) {
return false
}
sort.Strings(a)
sort.Strings(b)
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
func ParseIntEnvVar(varName string, defaultValue int) (int, error) {
varValue, exists := os.LookupEnv(varName)
if !exists {
return defaultValue, nil
}
intValue, err := strconv.Atoi(varValue)
if err != nil {
return defaultValue, fmt.Errorf("failed to parse %s env var as int: %w", varName, err)
}
return intValue, nil
}

View File

@@ -2,8 +2,11 @@ package cautils
import (
"fmt"
"os"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestConvertLabelsToString(t *testing.T) {
@@ -33,3 +36,102 @@ func TestConvertStringToLabels(t *testing.T) {
t.Errorf("%s != %s", fmt.Sprintf("%v", rstrMap), fmt.Sprintf("%v", strMap))
}
}
func TestParseIntEnvVar(t *testing.T) {
testCases := []struct {
expectedErr string
name string
varName string
varValue string
defaultValue int
expected int
}{
{
name: "Variable does not exist",
varName: "DOES_NOT_EXIST",
varValue: "",
defaultValue: 123,
expected: 123,
expectedErr: "",
},
{
name: "Variable exists and is a valid integer",
varName: "MY_VAR",
varValue: "456",
defaultValue: 123,
expected: 456,
expectedErr: "",
},
{
name: "Variable exists but is not a valid integer",
varName: "MY_VAR",
varValue: "not_an_integer",
defaultValue: 123,
expected: 123,
expectedErr: "failed to parse MY_VAR env var as int",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
if tc.varValue != "" {
os.Setenv(tc.varName, tc.varValue)
} else {
os.Unsetenv(tc.varName)
}
actual, err := ParseIntEnvVar(tc.varName, tc.defaultValue)
if tc.expectedErr != "" {
assert.NotNil(t, err)
assert.ErrorContains(t, err, tc.expectedErr)
} else {
assert.Nil(t, err)
}
assert.Equalf(t, tc.expected, actual, "unexpected result")
})
}
}
func TestStringSlicesAreEqual(t *testing.T) {
tt := []struct {
name string
a []string
b []string
want bool
}{
{
name: "equal unsorted slices",
a: []string{"foo", "bar", "baz"},
b: []string{"baz", "foo", "bar"},
want: true,
},
{
name: "equal sorted slices",
a: []string{"bar", "baz", "foo"},
b: []string{"bar", "baz", "foo"},
want: true,
},
{
name: "unequal slices",
a: []string{"foo", "bar", "baz"},
b: []string{"foo", "bar", "qux"},
want: false,
},
{
name: "different length slices",
a: []string{"foo", "bar", "baz"},
b: []string{"foo", "bar"},
want: false,
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
got := StringSlicesAreEqual(tc.a, tc.b)
if got != tc.want {
t.Errorf("StringSlicesAreEqual(%v, %v) = %v; want %v", tc.a, tc.b, got, tc.want)
}
})
}
}

View File

@@ -27,6 +27,7 @@ var (
cloudapis.CloudProviderDescribeKind,
cloudapis.CloudProviderDescribeRepositoriesKind,
cloudapis.CloudProviderListEntitiesForPoliciesKind,
cloudapis.CloudProviderPolicyVersionKind,
string(cloudsupport.TypeApiServerInfo),
}
)

View File

@@ -84,7 +84,7 @@ func downloadArtifacts(ctx context.Context, downloadInfo *metav1.DownloadInfo) e
}
for artifact := range artifacts {
if err := downloadArtifact(ctx, &metav1.DownloadInfo{Target: artifact, Path: downloadInfo.Path, FileName: fmt.Sprintf("%s.json", artifact)}, artifacts); err != nil {
logger.L().Ctx(ctx).Error("error downloading", helpers.String("artifact", artifact), helpers.Error(err))
logger.L().Ctx(ctx).Warning("error downloading", helpers.String("artifact", artifact), helpers.Error(err))
}
}
return nil

View File

@@ -11,9 +11,11 @@ import (
"github.com/kubescape/kubescape/v2/core/pkg/fixhandler"
)
const NoChangesApplied = "No changes were applied."
const NoResourcesToFix = "No issues to fix."
const ConfirmationQuestion = "Would you like to apply the changes to the files above? [y|n]: "
const (
noChangesApplied = "No changes were applied."
noResourcesToFix = "No issues to fix."
confirmationQuestion = "Would you like to apply the changes to the files above? [y|n]: "
)
func (ks *Kubescape) Fix(ctx context.Context, fixInfo *metav1.FixInfo) error {
logger.L().Info("Reading report file...")
@@ -25,19 +27,19 @@ func (ks *Kubescape) Fix(ctx context.Context, fixInfo *metav1.FixInfo) error {
resourcesToFix := handler.PrepareResourcesToFix(ctx)
if len(resourcesToFix) == 0 {
logger.L().Info(NoResourcesToFix)
logger.L().Info(noResourcesToFix)
return nil
}
handler.PrintExpectedChanges(resourcesToFix)
if fixInfo.DryRun {
logger.L().Info(NoChangesApplied)
logger.L().Info(noChangesApplied)
return nil
}
if !fixInfo.NoConfirm && !userConfirmed() {
logger.L().Info(NoChangesApplied)
logger.L().Info(noChangesApplied)
return nil
}
@@ -46,7 +48,7 @@ func (ks *Kubescape) Fix(ctx context.Context, fixInfo *metav1.FixInfo) error {
if len(errors) > 0 {
for _, err := range errors {
logger.L().Ctx(ctx).Error(err.Error())
logger.L().Ctx(ctx).Warning(err.Error())
}
return fmt.Errorf("Failed to fix some resources, check the logs for more details")
}
@@ -58,7 +60,7 @@ func userConfirmed() bool {
var input string
for {
fmt.Printf(ConfirmationQuestion)
fmt.Println(confirmationQuestion)
if _, err := fmt.Scanln(&input); err != nil {
continue
}

View File

@@ -66,8 +66,9 @@ func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.Kubern
}
func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan bool, scanningContext cautils.ScanningContext) reporter.IReport {
ctx, span := otel.Tracer("").Start(ctx, "getReporter")
_, span := otel.Tracer("").Start(ctx, "getReporter")
defer span.End()
if submit {
submitData := reporterv2.SubmitContextScan
if scanningContext != cautils.ContextCluster {
@@ -77,7 +78,7 @@ func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, report
}
if tenantConfig.GetAccountID() == "" {
// Add link only when scanning a cluster using a framework
return reporterv2.NewReportMock("https://hub.armosec.io/docs/installing-kubescape", "run kubescape with the '--account' flag")
return reporterv2.NewReportMock("", "")
}
var message string
if !fwScan {
@@ -90,35 +91,48 @@ func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, report
func getResourceHandler(ctx context.Context, scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor, registryAdaptors *resourcehandler.RegistryAdaptors) resourcehandler.IResourceHandler {
ctx, span := otel.Tracer("").Start(ctx, "getResourceHandler")
defer span.End()
if len(scanInfo.InputPatterns) > 0 || k8s == nil {
// scanInfo.HostSensor.SetBool(false)
return resourcehandler.NewFileResourceHandler(ctx, scanInfo.InputPatterns, registryAdaptors)
return resourcehandler.NewFileResourceHandler(ctx, scanInfo.InputPatterns)
}
getter.GetKSCloudAPIConnector()
rbacObjects := getRBACHandler(tenantConfig, k8s, scanInfo.Submit)
return resourcehandler.NewK8sResourceHandler(k8s, getFieldSelector(scanInfo), hostSensorHandler, rbacObjects, registryAdaptors)
}
// getHostSensorHandler yields a IHostSensor that knows how to collect a host's scanned resources.
//
// A noop sensor is returned whenever host scanning is disabled or an error prevented the scanner to properly deploy.
func getHostSensorHandler(ctx context.Context, scanInfo *cautils.ScanInfo, k8s *k8sinterface.KubernetesApi) hostsensorutils.IHostSensor {
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
return &hostsensorutils.HostSensorHandlerMock{}
}
const wantsHostSensorControls = true // defaults to disabling the scanner if not explictly enabled (TODO(fredbi): should be addressed by injecting ScanInfo defaults)
hostSensorVal := scanInfo.HostSensorEnabled.Get()
hasHostSensorControls := true
// we need to determined which controls needs host scanner
if scanInfo.HostSensorEnabled.Get() == nil && hasHostSensorControls {
scanInfo.HostSensorEnabled.SetBool(false) // default - do not run host scanner
}
if hostSensorVal := scanInfo.HostSensorEnabled.Get(); hostSensorVal != nil && *hostSensorVal {
switch {
case !k8sinterface.IsConnectedToCluster() || k8s == nil: // TODO(fred): fix race condition on global KSConfig there
return hostsensorutils.NewHostSensorHandlerMock()
case hostSensorVal != nil && *hostSensorVal:
hostSensorHandler, err := hostsensorutils.NewHostSensorHandler(k8s, scanInfo.HostSensorYamlPath)
if err != nil {
logger.L().Ctx(ctx).Warning(fmt.Sprintf("failed to create host scanner: %s", err.Error()))
return &hostsensorutils.HostSensorHandlerMock{}
return hostsensorutils.NewHostSensorHandlerMock()
}
return hostSensorHandler
case hostSensorVal == nil && wantsHostSensorControls:
// TODO: we need to determine which controls need the host scanner
scanInfo.HostSensorEnabled.SetBool(false)
fallthrough
default:
return hostsensorutils.NewHostSensorHandlerMock()
}
return &hostsensorutils.HostSensorHandlerMock{}
}
func getFieldSelector(scanInfo *cautils.ScanInfo) resourcehandler.IFieldSelector {
if scanInfo.IncludeNamespaces != "" {
return resourcehandler.NewIncludeSelector(scanInfo.IncludeNamespaces)
@@ -169,12 +183,6 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
return
}
scanningContext := scanInfo.GetScanningContext()
if scanningContext == cautils.ContextFile || scanningContext == cautils.ContextDir {
scanInfo.Submit = false
return
}
if scanInfo.Local {
scanInfo.Submit = false
return

View File

@@ -7,8 +7,11 @@ import (
"github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/pkg/hostsensorutils"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_getUIPrinter(t *testing.T) {
@@ -105,3 +108,71 @@ func Test_getUIPrinter(t *testing.T) {
})
}
}
func TestGetSensorHandler(t *testing.T) {
t.Parallel()
ctx := context.Background()
t.Run("should return mock sensor if not k8s interface is provided", func(t *testing.T) {
t.Parallel()
scanInfo := &cautils.ScanInfo{}
var k8s *k8sinterface.KubernetesApi
sensor := getHostSensorHandler(ctx, scanInfo, k8s)
require.NotNil(t, sensor)
_, isMock := sensor.(*hostsensorutils.HostSensorHandlerMock)
require.True(t, isMock)
})
t.Run("should return mock sensor if the sensor is not enabled", func(t *testing.T) {
t.Parallel()
scanInfo := &cautils.ScanInfo{}
k8s := &k8sinterface.KubernetesApi{}
sensor := getHostSensorHandler(ctx, scanInfo, k8s)
require.NotNil(t, sensor)
_, isMock := sensor.(*hostsensorutils.HostSensorHandlerMock)
require.True(t, isMock)
})
t.Run("should return mock sensor if the sensor is disabled", func(t *testing.T) {
t.Parallel()
falseFlag := cautils.NewBoolPtr(nil)
falseFlag.SetBool(false)
scanInfo := &cautils.ScanInfo{
HostSensorEnabled: falseFlag,
}
k8s := &k8sinterface.KubernetesApi{}
sensor := getHostSensorHandler(ctx, scanInfo, k8s)
require.NotNil(t, sensor)
_, isMock := sensor.(*hostsensorutils.HostSensorHandlerMock)
require.True(t, isMock)
})
t.Run("should return mock sensor if the sensor is enabled, but can't deploy (nil)", func(t *testing.T) {
t.Parallel()
falseFlag := cautils.NewBoolPtr(nil)
falseFlag.SetBool(true)
scanInfo := &cautils.ScanInfo{
HostSensorEnabled: falseFlag,
}
var k8s *k8sinterface.KubernetesApi
sensor := getHostSensorHandler(ctx, scanInfo, k8s)
require.NotNil(t, sensor)
_, isMock := sensor.(*hostsensorutils.HostSensorHandlerMock)
require.True(t, isMock)
})
// TODO(fredbi): need to share the k8s client mock to test a happy path / deployment failure path
}

View File

@@ -27,13 +27,13 @@ type componentInterfaces struct {
tenantConfig cautils.ITenantConfig
resourceHandler resourcehandler.IResourceHandler
report reporter.IReport
outputPrinters []printer.IPrinter
uiPrinter printer.IPrinter
hostSensorHandler hostsensorutils.IHostSensor
outputPrinters []printer.IPrinter
}
func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInterfaces {
ctx, span := otel.Tracer("").Start(ctx, "getInterfaces")
ctx, span := otel.Tracer("").Start(ctx, "setup interfaces")
defer span.End()
// ================== setup k8s interface object ======================================
@@ -74,16 +74,12 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
hostSensorHandler := getHostSensorHandler(ctx, scanInfo, k8s)
if err := hostSensorHandler.Init(ctxHostScanner); err != nil {
logger.L().Ctx(ctxHostScanner).Error("failed to init host scanner", helpers.Error(err))
hostSensorHandler = &hostsensorutils.HostSensorHandlerMock{}
hostSensorHandler = hostsensorutils.NewHostSensorHandlerMock()
}
spanHostScanner.End()
// ================== setup registry adaptors ======================================
registryAdaptors, err := resourcehandler.NewRegistryAdaptors()
if err != nil {
logger.L().Ctx(ctx).Error("failed to initialize registry adaptors", helpers.Error(err))
}
registryAdaptors, _ := resourcehandler.NewRegistryAdaptors()
// ================== setup resource collector object ======================================
@@ -119,12 +115,10 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
}
func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*resultshandling.ResultsHandler, error) {
ctx, spanScan := otel.Tracer("").Start(ctx, "kubescape.Scan")
defer spanScan.End()
ctxInit, spanInit := otel.Tracer("").Start(ctx, "initialization")
logger.L().Info("Kubescape scanner starting")
// ===================== Initialization =====================
ctxInit, spanInit := otel.Tracer("").Start(ctx, "initialization")
scanInfo.Init(ctxInit) // initialize scan info
interfaces := getInterfaces(ctxInit, scanInfo)
@@ -137,10 +131,10 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
downloadReleasedPolicy := getter.NewDownloadReleasedPolicy() // download config inputs from github release
// set policy getter only after setting the customerGUID
scanInfo.Getters.PolicyGetter = getPolicyGetter(ctx, scanInfo.UseFrom, interfaces.tenantConfig.GetTenantEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(ctx, scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(ctx, scanInfo.UseExceptions, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
scanInfo.Getters.AttackTracksGetter = getAttackTracksGetter(ctx, scanInfo.AttackTracks, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
scanInfo.Getters.PolicyGetter = getPolicyGetter(ctxInit, scanInfo.UseFrom, interfaces.tenantConfig.GetTenantEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(ctxInit, scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(ctxInit, scanInfo.UseExceptions, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
scanInfo.Getters.AttackTracksGetter = getAttackTracksGetter(ctxInit, scanInfo.AttackTracks, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
// TODO - list supported frameworks/controls
if scanInfo.ScanAll {
@@ -150,40 +144,53 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
// remove host scanner components
defer func() {
if err := interfaces.hostSensorHandler.TearDown(); err != nil {
logger.L().Ctx(ctxInit).Error("failed to tear down host scanner", helpers.Error(err))
logger.L().Ctx(ctx).Error("failed to tear down host scanner", helpers.Error(err))
}
}()
resultsHandling := resultshandling.NewResultsHandler(interfaces.report, interfaces.outputPrinters, interfaces.uiPrinter)
spanInit.End()
// ===================== policies & resources =====================
ctxPolicies, spanPolicies := otel.Tracer("").Start(ctx, "policies & resources")
policyHandler := policyhandler.NewPolicyHandler(interfaces.resourceHandler)
scanData, err := policyHandler.CollectResources(ctxPolicies, scanInfo.PolicyIdentifier, scanInfo)
// ===================== policies =====================
ctxPolicies, spanPolicies := otel.Tracer("").Start(ctxInit, "policies")
policyHandler := policyhandler.NewPolicyHandler()
scanData, err := policyHandler.CollectPolicies(ctxPolicies, scanInfo.PolicyIdentifier, scanInfo)
if err != nil {
spanInit.End()
return resultsHandling, err
}
spanPolicies.End()
// ===================== resources =====================
ctxResources, spanResources := otel.Tracer("").Start(ctxInit, "resources")
err = resourcehandler.CollectResources(ctxResources, interfaces.resourceHandler, scanInfo.PolicyIdentifier, scanData, cautils.NewProgressHandler(""))
if err != nil {
spanInit.End()
return resultsHandling, err
}
spanResources.End()
spanInit.End()
// ========================= opa testing =====================
ctxOpa, spanOpa := otel.Tracer("").Start(ctx, "opa testing")
defer spanOpa.End()
deps := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), interfaces.tenantConfig.GetContextName())
reportResults := opaprocessor.NewOPAProcessor(scanData, deps)
if err := reportResults.ProcessRulesListenner(ctxOpa, cautils.NewProgressHandler("")); err != nil {
if err := reportResults.ProcessRulesListener(ctxOpa, cautils.NewProgressHandler("")); err != nil {
// TODO - do something
return resultsHandling, fmt.Errorf("%w", err)
}
spanOpa.End()
// ======================== prioritization ===================
_, spanPrioritization := otel.Tracer("").Start(ctx, "prioritization")
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(ctx, scanInfo.Getters.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
logger.L().Ctx(ctx).Warning("failed to get attack tracks, this may affect the scanning results", helpers.Error(err))
} else if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
return resultsHandling, fmt.Errorf("%w", err)
if scanInfo.PrintAttackTree {
_, spanPrioritization := otel.Tracer("").Start(ctxOpa, "prioritization")
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(ctxOpa, scanInfo.Getters.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
logger.L().Ctx(ctx).Warning("failed to get attack tracks, this may affect the scanning results", helpers.Error(err))
} else if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
return resultsHandling, fmt.Errorf("%w", err)
}
spanPrioritization.End()
}
spanPrioritization.End()
// ========================= results handling =====================
resultsHandling.SetData(scanData)

View File

@@ -43,7 +43,7 @@ func (ks *Kubescape) SubmitExceptions(ctx context.Context, credentials *cautils.
// load cached config
tenantConfig := getTenantConfig(credentials, "", "", getKubernetesApi())
if err := tenantConfig.SetTenant(); err != nil {
logger.L().Ctx(ctx).Error("failed setting account ID", helpers.Error(err))
logger.L().Ctx(ctx).Warning("failed setting account ID", helpers.Error(err))
}
// load exceptions from file

59
core/metrics/metrics.go Normal file
View File

@@ -0,0 +1,59 @@
package metrics
import (
"context"
"fmt"
"sync"
"github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
)
const (
METER_NAME = "github.com/kubescape/kubescape/v2"
METRIC_NAME_PREFIX = "kubescape"
)
var initOnce sync.Once
// Metrics are defined here
var (
kubernetesResourcesCount metric.Int64UpDownCounter
workerNodesCount metric.Int64UpDownCounter
)
// Init initializes the metrics
func Init() {
initOnce.Do(func() {
var err error
meterProvider := otel.GetMeterProvider()
meter := meterProvider.Meter(METER_NAME)
metricName := func(name string) string {
return fmt.Sprintf("%s_%s", METRIC_NAME_PREFIX, name)
}
if kubernetesResourcesCount, err = meter.Int64UpDownCounter(metricName("kubernetes_resources_count")); err != nil {
logger.L().Error("failed to register instrument", helpers.Error(err))
}
if workerNodesCount, err = meter.Int64UpDownCounter(metricName("worker_nodes_count")); err != nil {
logger.L().Error("failed to register instrument", helpers.Error(err))
}
})
}
// UpdateKubernetesResourcesCount updates the kubernetes resources count metric
func UpdateKubernetesResourcesCount(ctx context.Context, value int64) {
if kubernetesResourcesCount != nil {
kubernetesResourcesCount.Add(ctx, value)
}
}
// UpdateWorkerNodesCount updates the worker nodes count metric
func UpdateWorkerNodesCount(ctx context.Context, value int64) {
if workerNodesCount != nil {
workerNodesCount.Add(ctx, value)
}
}

View File

@@ -20,7 +20,7 @@ type FixHandler struct {
// ResourceFixInfo is a struct that holds the information about the resource that needs to be fixed
type ResourceFixInfo struct {
YamlExpressions map[string]*armotypes.FixPath
YamlExpressions map[string]armotypes.FixPath
Resource *reporthandling.Resource
FilePath string
DocumentIndex int
@@ -58,7 +58,7 @@ func withNewline(content, targetNewline string) string {
replaceNewlines := map[string]bool{
unixNewline: true,
windowsNewline: true,
oldMacNewline: true,
oldMacNewline: true,
}
replaceNewlines[targetNewline] = false

View File

@@ -4,7 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"os"
"path"
"path/filepath"
@@ -36,7 +36,7 @@ func NewFixHandler(fixInfo *metav1.FixInfo) (*FixHandler, error) {
return nil, err
}
defer jsonFile.Close()
byteValue, _ := ioutil.ReadAll(jsonFile)
byteValue, _ := io.ReadAll(jsonFile)
var reportObj reporthandlingv2.PostureReport
if err = json.Unmarshal(byteValue, &reportObj); err != nil {
@@ -142,20 +142,20 @@ func (h *FixHandler) PrepareResourcesToFix(ctx context.Context) []ResourceFixInf
relativePath, documentIndex, err := h.getFilePathAndIndex(resourcePath)
if err != nil {
logger.L().Ctx(ctx).Error("Skipping invalid resource path: " + resourcePath)
logger.L().Ctx(ctx).Warning("Skipping invalid resource path: " + resourcePath)
continue
}
absolutePath := path.Join(h.localBasePath, relativePath)
if _, err := os.Stat(absolutePath); err != nil {
logger.L().Ctx(ctx).Error("Skipping missing file: " + absolutePath)
logger.L().Ctx(ctx).Warning("Skipping missing file: " + absolutePath)
continue
}
rfi := ResourceFixInfo{
FilePath: absolutePath,
Resource: resourceObj,
YamlExpressions: make(map[string]*armotypes.FixPath, 0),
YamlExpressions: make(map[string]armotypes.FixPath, 0),
DocumentIndex: documentIndex,
}
@@ -185,7 +185,7 @@ func (h *FixHandler) PrintExpectedChanges(resourcesToFix []ResourceFixInfo) {
i := 1
for _, fixPath := range resourceFixInfo.YamlExpressions {
sb.WriteString(fmt.Sprintf("\t%d) %s = %s\n", i, (*fixPath).Path, (*fixPath).Value))
sb.WriteString(fmt.Sprintf("\t%d) %s = %s\n", i, fixPath.Path, fixPath.Value))
i++
}
sb.WriteString("\n------\n")
@@ -201,14 +201,14 @@ func (h *FixHandler) ApplyChanges(ctx context.Context, resourcesToFix []Resource
fileYamlExpressions := h.getFileYamlExpressions(resourcesToFix)
for filepath, yamlExpression := range fileYamlExpressions {
fileAsString, err := getFileString(filepath)
fileAsString, err := GetFileString(filepath)
if err != nil {
errors = append(errors, err)
continue
}
fixedYamlString, err := h.ApplyFixToContent(ctx, fileAsString, yamlExpression)
fixedYamlString, err := ApplyFixToContent(ctx, fileAsString, yamlExpression)
if err != nil {
errors = append(errors, fmt.Errorf("Failed to fix file %s: %w ", filepath, err))
@@ -220,7 +220,7 @@ func (h *FixHandler) ApplyChanges(ctx context.Context, resourcesToFix []Resource
err = writeFixesToFile(filepath, fixedYamlString)
if err != nil {
logger.L().Ctx(ctx).Error(fmt.Sprintf("Failed to write fixes to file %s, %v", filepath, err.Error()))
logger.L().Ctx(ctx).Warning(fmt.Sprintf("Failed to write fixes to file %s, %v", filepath, err.Error()))
errors = append(errors, err)
}
}
@@ -242,7 +242,7 @@ func (h *FixHandler) getFilePathAndIndex(filePathWithIndex string) (filePath str
}
}
func (h *FixHandler) ApplyFixToContent(ctx context.Context, yamlAsString, yamlExpression string) (fixedString string, err error) {
func ApplyFixToContent(ctx context.Context, yamlAsString, yamlExpression string) (fixedString string, err error) {
newline := determineNewlineSeparator(yamlAsString)
yamlLines := strings.Split(yamlAsString, newline)
@@ -259,9 +259,9 @@ func (h *FixHandler) ApplyFixToContent(ctx context.Context, yamlAsString, yamlEx
return "", err
}
fileFixInfo := getFixInfo(ctx, originalRootNodes, fixedRootNodes)
fixInfo := getFixInfo(ctx, originalRootNodes, fixedRootNodes)
fixedYamlLines := getFixedYamlLines(yamlLines, fileFixInfo, newline)
fixedYamlLines := getFixedYamlLines(yamlLines, fixInfo, newline)
fixedString = getStringFromSlice(fixedYamlLines, newline)
@@ -270,7 +270,9 @@ func (h *FixHandler) ApplyFixToContent(ctx context.Context, yamlAsString, yamlEx
func (h *FixHandler) getFileYamlExpressions(resourcesToFix []ResourceFixInfo) map[string]string {
fileYamlExpressions := make(map[string]string, 0)
for _, resourceToFix := range resourcesToFix {
for _, toPin := range resourcesToFix {
resourceToFix := toPin
singleExpression := reduceYamlExpressions(&resourceToFix)
resourceFilePath := resourceToFix.FilePath
@@ -299,8 +301,8 @@ func (rfi *ResourceFixInfo) addYamlExpressionsFromResourceAssociatedControl(docu
continue
}
yamlExpression := fixPathToValidYamlExpression(rulePaths.FixPath.Path, rulePaths.FixPath.Value, documentIndex)
rfi.YamlExpressions[yamlExpression] = &rulePaths.FixPath
yamlExpression := FixPathToValidYamlExpression(rulePaths.FixPath.Path, rulePaths.FixPath.Value, documentIndex)
rfi.YamlExpressions[yamlExpression] = rulePaths.FixPath
}
}
}
@@ -315,7 +317,7 @@ func reduceYamlExpressions(resource *ResourceFixInfo) string {
return strings.Join(expressions, " | ")
}
func fixPathToValidYamlExpression(fixPath, value string, documentIndexInYaml int) string {
func FixPathToValidYamlExpression(fixPath, value string, documentIndexInYaml int) string {
isStringValue := true
if _, err := strconv.ParseBool(value); err == nil {
isStringValue = false
@@ -338,8 +340,8 @@ func joinStrings(inputStrings ...string) string {
return strings.Join(inputStrings, "")
}
func getFileString(filepath string) (string, error) {
bytes, err := ioutil.ReadFile(filepath)
func GetFileString(filepath string) (string, error) {
bytes, err := os.ReadFile(filepath)
if err != nil {
return "", fmt.Errorf("Error reading file %s", filepath)
@@ -349,7 +351,7 @@ func getFileString(filepath string) (string, error) {
}
func writeFixesToFile(filepath, content string) error {
err := ioutil.WriteFile(filepath, []byte(content), 0644)
err := os.WriteFile(filepath, []byte(content), 0644) //nolint:gosec
if err != nil {
return fmt.Errorf("Error writing fixes to file: %w", err)

View File

@@ -8,6 +8,7 @@ import (
logger "github.com/kubescape/go-logger"
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
"github.com/kubescape/kubescape/v2/internal/testutils"
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
"github.com/mikefarah/yq/v4/pkg/yqlib"
"github.com/stretchr/testify/assert"
@@ -32,11 +33,6 @@ func NewFixHandlerMock() (*FixHandler, error) {
}, nil
}
func getTestdataPath() string {
currentDir, _ := os.Getwd()
return filepath.Join(currentDir, "testdata")
}
func getTestCases() []indentationTestCase {
indentationTestCases := []indentationTestCase{
// Insertion Scenarios
@@ -123,7 +119,7 @@ func getTestCases() []indentationTestCase {
},
{
"removes/tc-04-00-input.yaml",
`del(select(di==0).spec.containers[0].securityContext) |
`del(select(di==0).spec.containers[0].securityContext) |
del(select(di==1).spec.containers[1])`,
"removes/tc-04-01-expected.yaml",
},
@@ -177,9 +173,8 @@ func TestApplyFixKeepsFormatting(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.inputFile, func(t *testing.T) {
getTestDataPath := func(filename string) string {
currentDir, _ := os.Getwd()
currentFile := "testdata/" + filename
return filepath.Join(currentDir, currentFile)
return filepath.Join(testutils.CurrentDir(), currentFile)
}
input, _ := os.ReadFile(getTestDataPath(tc.inputFile))
@@ -187,9 +182,7 @@ func TestApplyFixKeepsFormatting(t *testing.T) {
want := string(wantRaw)
expression := tc.yamlExpression
h, _ := NewFixHandlerMock()
got, _ := h.ApplyFixToContent(context.TODO(), string(input), expression)
got, _ := ApplyFixToContent(context.TODO(), string(input), expression)
assert.Equalf(
t, want, got,
@@ -246,7 +239,7 @@ func Test_fixPathToValidYamlExpression(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := fixPathToValidYamlExpression(tt.args.fixPath, tt.args.value, tt.args.documentIndexInYaml); got != tt.want {
if got := FixPathToValidYamlExpression(tt.args.fixPath, tt.args.value, tt.args.documentIndexInYaml); got != tt.want {
t.Errorf("fixPathToValidYamlExpression() = %v, want %v", got, tt.want)
}
})

View File

@@ -74,9 +74,6 @@ func adjustFixedListLines(originalList, fixedList *[]nodeInfo) {
node.node.Line += differenceAtTop
}
}
return
}
func enocodeIntoYaml(parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) (string, error) {
@@ -309,7 +306,7 @@ func readDocuments(ctx context.Context, reader io.Reader, decoder yqlib.Decoder)
func safelyCloseFile(ctx context.Context, file *os.File) {
err := file.Close()
if err != nil {
logger.L().Ctx(ctx).Error("Error Closing File")
logger.L().Ctx(ctx).Warning("Error Closing File")
}
}

View File

@@ -6,13 +6,13 @@ metadata:
k8s-app: kubescape-host-scanner
kubernetes.io/metadata.name: kubescape-host-scanner
tier: kubescape-host-scanner-control-plane
name: kubescape-host-scanner
name: kubescape
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: host-scanner
namespace: kubescape-host-scanner
namespace: kubescape
labels:
app: host-scanner
k8s-app: kubescape-host-scanner
@@ -27,17 +27,12 @@ spec:
name: host-scanner
spec:
tolerations:
# this toleration is to have the DaemonDet runnable on master nodes
# this toleration is to have the DaemonDet runnable on all nodes (including masters)
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
- operator: Exists
containers:
- name: host-sensor
image: quay.io/kubescape/host-scanner:v1.0.45
image: quay.io/kubescape/host-scanner:v1.0.61
securityContext:
allowPrivilegeEscalation: true
privileged: true
@@ -45,7 +40,6 @@ spec:
procMount: Unmasked
ports:
- name: scanner # Do not change port name
hostPort: 7888
containerPort: 7888
protocol: TCP
resources:
@@ -58,12 +52,17 @@ spec:
volumeMounts:
- mountPath: /host_fs
name: host-filesystem
readinessProbe:
startupProbe:
httpGet:
path: /kernelVersion
path: /readyz
port: 7888
initialDelaySeconds: 1
periodSeconds: 1
failureThreshold: 30
periodSeconds: 1
livenessProbe:
httpGet:
path: /healthz
port: 7888
periodSeconds: 10
terminationGracePeriodSeconds: 120
dnsPolicy: ClusterFirstWithHostNet
automountServiceAccountToken: false
@@ -72,6 +71,5 @@ spec:
path: /
type: Directory
name: host-filesystem
hostNetwork: true
hostPID: true
hostIPC: true

View File

@@ -0,0 +1,23 @@
package hostsensorutils
import (
"context"
"testing"
"github.com/stretchr/testify/require"
)
func TestHostSensorHandlerMock(t *testing.T) {
ctx := context.Background()
h := &HostSensorHandlerMock{}
require.NoError(t, h.Init(ctx))
envelope, status, err := h.CollectResources(ctx)
require.Empty(t, envelope)
require.Nil(t, status)
require.NoError(t, err)
require.Empty(t, h.GetNamespace())
require.NoError(t, h.TearDown())
}

View File

@@ -3,7 +3,6 @@ package hostsensorutils
import (
"context"
_ "embed"
"encoding/json"
"fmt"
"os"
"sync"
@@ -18,6 +17,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
)
@@ -27,40 +27,45 @@ var (
namespaceWasPresent bool
)
const PortName string = "scanner"
const portName string = "scanner"
// HostSensorHandler is a client that interacts with a host-scanner component deployed on nodes.
//
// The API exposed by the host sensor is defined here: https://github.com/kubescape/host-scanner
type HostSensorHandler struct {
HostSensorPort int32
HostSensorPodNames map[string]string //map from pod names to node names
HostSensorUnscheduledPodNames map[string]string //map from pod names to node names
IsReady <-chan bool //readonly chan
hostSensorPort int32
hostSensorPodNames map[string]string //map from pod names to node names
hostSensorUnscheduledPodNames map[string]string //map from pod names to node names
k8sObj *k8sinterface.KubernetesApi
DaemonSet *appsv1.DaemonSet
daemonSet *appsv1.DaemonSet
podListLock sync.RWMutex
gracePeriod int64
workerPool workerPool
}
// NewHostSensorHandler builds a new http client to the host-scanner API.
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, hostSensorYAMLFile string) (*HostSensorHandler, error) {
if k8sObj == nil {
return nil, fmt.Errorf("nil k8s interface received")
}
if hostSensorYAMLFile != "" {
d, err := loadHostSensorFromFile(hostSensorYAMLFile)
if err != nil {
return nil, fmt.Errorf("failed to load host-scan yaml file, reason: %s", err.Error())
return nil, fmt.Errorf("failed to load host-scanner yaml file, reason: %w", err)
}
hostSensorYAML = d
}
hsh := &HostSensorHandler{
k8sObj: k8sObj,
HostSensorPodNames: map[string]string{},
HostSensorUnscheduledPodNames: map[string]string{},
hostSensorPodNames: map[string]string{},
hostSensorUnscheduledPodNames: map[string]string{},
gracePeriod: int64(15),
workerPool: NewWorkerPool(),
workerPool: newWorkerPool(),
}
// Don't deploy on cluster with no nodes. Some cloud providers prevents termination of K8s objects for cluster with no nodes!!!
// Don't deploy on a cluster with no nodes. Some cloud providers prevent the termination of K8s objects for cluster with no nodes!!!
if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 {
if err == nil {
err = fmt.Errorf("no nodes to scan")
@@ -71,25 +76,30 @@ func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, hostSensorYAMLFile
return hsh, nil
}
// Init deploys the host-scanner and start watching the pods on the host.
func (hsh *HostSensorHandler) Init(ctx context.Context) error {
// deploy the YAML
// store namespace + port
// store pod names
// make sure all pods are running, after X seconds treat has running anyway, and log an error on the pods not running yet
logger.L().Info("Installing host scanner")
logger.L().Debug("The host scanner is a DaemonSet that runs on each node in the cluster. The DaemonSet will be running in it's own namespace and will be deleted once the scan is completed. If you do not wish to install the host scanner, please run the scan without the --enable-host-scan flag.")
// log is used to avoid log duplication
// coming from the different host-scanner instances
log := NewLogCoupling()
cautils.StartSpinner()
defer cautils.StopSpinner()
if err := hsh.applyYAML(ctx); err != nil {
cautils.StopSpinner()
return fmt.Errorf("failed to apply host scanner YAML, reason: %v", err)
}
hsh.populatePodNamesToNodeNames(ctx)
hsh.populatePodNamesToNodeNames(ctx, log)
if err := hsh.checkPodForEachNode(); err != nil {
logger.L().Ctx(ctx).Error("failed to validate host-sensor pods status", helpers.Error(err))
logger.L().Ctx(ctx).Warning(failedToValidateHostSensorPodStatus, helpers.Error(err))
}
cautils.StopSpinner()
return nil
}
@@ -104,10 +114,12 @@ func (hsh *HostSensorHandler) checkNamespaceWasPresent(namespace string) bool {
if err != nil {
return false
}
// check also if it is in "Active" state.
if ns.Status.Phase != corev1.NamespaceActive {
return false
}
return true
}
@@ -123,7 +135,7 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
}
// Get namespace name
namespaceName := ""
namespaceName := cautils.GetConfigMapNamespace()
for i := range workloads {
if workloads[i].GetKind() == "Namespace" {
namespaceName = workloads[i].GetName()
@@ -141,6 +153,7 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
}
// set namespace in all objects
if w.GetKind() != "Namespace" {
logger.L().Debug("Setting namespace", helpers.String("kind", w.GetKind()), helpers.String("name", w.GetName()), helpers.String("namespace", namespaceName))
w.SetNamespace(namespaceName)
}
// Get container port
@@ -148,18 +161,17 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
containers, err := w.GetContainers()
if err != nil {
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
}
return fmt.Errorf("container not found in DaemonSet: %v", err)
}
for j := range containers {
for k := range containers[j].Ports {
if containers[j].Ports[k].Name == PortName {
hsh.HostSensorPort = containers[j].Ports[k].ContainerPort
if containers[j].Ports[k].Name == portName {
hsh.hostSensorPort = containers[j].Ports[k].ContainerPort
}
}
}
}
// Apply workload
@@ -173,7 +185,7 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
}
if e != nil {
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
}
return fmt.Errorf("failed to create/update YAML, reason: %v", e)
}
@@ -183,20 +195,21 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
b, err := json.Marshal(newWorkload.GetObject())
if err != nil {
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
}
return fmt.Errorf("failed to Marshal YAML of DaemonSet, reason: %v", err)
}
var ds appsv1.DaemonSet
if err := json.Unmarshal(b, &ds); err != nil {
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
}
return fmt.Errorf("failed to Unmarshal YAML of DaemonSet, reason: %v", err)
}
hsh.DaemonSet = &ds
hsh.daemonSet = &ds
}
}
return nil
}
@@ -207,52 +220,56 @@ func (hsh *HostSensorHandler) checkPodForEachNode() error {
if err != nil {
return fmt.Errorf("in checkPodsForEveryNode, failed to get nodes list: %v", nodesList)
}
hsh.podListLock.RLock()
podsNum := len(hsh.HostSensorPodNames)
unschedPodNum := len(hsh.HostSensorUnscheduledPodNames)
podsNum := len(hsh.hostSensorPodNames)
unschedPodNum := len(hsh.hostSensorUnscheduledPodNames)
hsh.podListLock.RUnlock()
if len(nodesList.Items) <= podsNum+unschedPodNum {
break
}
if time.Now().After(deadline) {
hsh.podListLock.RLock()
podsMap := hsh.HostSensorPodNames
podsMap := hsh.hostSensorPodNames
hsh.podListLock.RUnlock()
return fmt.Errorf("host-sensor pods number (%d) differ than nodes number (%d) after deadline exceeded. Kubescape will take data only from the pods below: %v",
return fmt.Errorf("host-scanner pods number (%d) differ than nodes number (%d) after deadline exceeded. Kubescape will take data only from the pods below: %v",
podsNum, len(nodesList.Items), podsMap)
}
time.Sleep(100 * time.Millisecond)
}
return nil
}
// initiating routine to keep pod list updated
func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context) {
func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context, log *LogsMap) {
go func() {
var watchRes watch.Interface
var err error
watchRes, err = hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).Watch(hsh.k8sObj.Context, metav1.ListOptions{
watchRes, err = hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.daemonSet.Namespace).Watch(hsh.k8sObj.Context, metav1.ListOptions{
Watch: true,
LabelSelector: fmt.Sprintf("name=%s", hsh.DaemonSet.Spec.Template.Labels["name"]),
LabelSelector: fmt.Sprintf("name=%s", hsh.daemonSet.Spec.Template.Labels["name"]),
})
if err != nil {
logger.L().Ctx(ctx).Error("failed to watch over daemonset pods - are we missing watch pods permissions?", helpers.Error(err))
logger.L().Ctx(ctx).Warning(failedToWatchOverDaemonSetPods, helpers.Error(err))
}
if watchRes == nil {
logger.L().Ctx(ctx).Error("failed to watch over DaemonSet pods, will not be able to get host-scanner data")
return
}
for eve := range watchRes.ResultChan() {
pod, ok := eve.Object.(*corev1.Pod)
if !ok {
continue
}
go hsh.updatePodInListAtomic(ctx, eve.Type, pod)
go hsh.updatePodInListAtomic(ctx, eve.Type, pod, log)
}
}()
}
func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventType watch.EventType, podObj *corev1.Pod) {
func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventType watch.EventType, podObj *corev1.Pod, log *LogsMap) {
hsh.podListLock.Lock()
defer hsh.podListLock.Unlock()
@@ -260,8 +277,8 @@ func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventTy
case watch.Added, watch.Modified:
if podObj.Status.Phase == corev1.PodRunning && len(podObj.Status.ContainerStatuses) > 0 &&
podObj.Status.ContainerStatuses[0].Ready {
hsh.HostSensorPodNames[podObj.ObjectMeta.Name] = podObj.Spec.NodeName
delete(hsh.HostSensorUnscheduledPodNames, podObj.ObjectMeta.Name)
hsh.hostSensorPodNames[podObj.ObjectMeta.Name] = podObj.Spec.NodeName
delete(hsh.hostSensorUnscheduledPodNames, podObj.ObjectMeta.Name)
} else {
if podObj.Status.Phase == corev1.PodPending && len(podObj.Status.Conditions) > 0 &&
podObj.Status.Conditions[0].Reason == corev1.PodReasonUnschedulable {
@@ -273,54 +290,125 @@ func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventTy
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values) > 0 {
nodeName = podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
}
logger.L().Ctx(ctx).Warning("One host-sensor pod is unable to schedule on node. We will fail to collect the data from this node",
helpers.String("message", podObj.Status.Conditions[0].Message),
helpers.String("nodeName", nodeName),
helpers.String("podName", podObj.ObjectMeta.Name))
if !log.isDuplicated(oneHostSensorPodIsUnabledToSchedule) {
logger.L().Ctx(ctx).Warning(oneHostSensorPodIsUnabledToSchedule,
helpers.String("message", podObj.Status.Conditions[0].Message))
log.update(oneHostSensorPodIsUnabledToSchedule)
}
if nodeName != "" {
hsh.HostSensorUnscheduledPodNames[podObj.ObjectMeta.Name] = nodeName
hsh.hostSensorUnscheduledPodNames[podObj.ObjectMeta.Name] = nodeName
}
} else {
delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name)
delete(hsh.hostSensorPodNames, podObj.ObjectMeta.Name)
}
}
default:
delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name)
delete(hsh.hostSensorPodNames, podObj.ObjectMeta.Name)
}
}
// tearDownNamespace manage the host-scanner deletion.
func (hsh *HostSensorHandler) tearDownHostScanner(namespace string) error {
client := hsh.k8sObj.KubernetesClient
// delete host-scanner DaemonSet
err := client.AppsV1().
DaemonSets(namespace).
Delete(
hsh.k8sObj.Context,
hsh.daemonSet.Name,
metav1.DeleteOptions{
GracePeriodSeconds: &hsh.gracePeriod,
},
)
if err != nil {
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
}
// wait for DaemonSet to be deleted
err = hsh.waitHostScannerDeleted(hsh.k8sObj.Context)
if err != nil {
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
}
return nil
}
// tearDownNamespace manage the given namespace deletion.
// At first, it checks if the namespace was already present before installing host-scanner.
// In that case skips the deletion.
// If was not, then patches the namespace in order to remove the finalizers,
// and finally delete the it.
func (hsh *HostSensorHandler) tearDownNamespace(namespace string) error {
// if namespace was already present on kubernetes (before installing host-scanner),
// then we shouldn't delete it.
if hsh.namespaceWasPresent() {
return nil
}
if err := hsh.k8sObj.KubernetesClient.CoreV1().Namespaces().Delete(hsh.k8sObj.Context, namespace, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
return fmt.Errorf("failed to delete host-sensor namespace: %v", err)
// to make it more readable we store the object client in a variable
client := hsh.k8sObj.KubernetesClient
// prepare patch json to remove finalizers from namespace
patchData := `
[
{
"op": "replace",
"path": "/metadata/finalizers",
"value": []
}
]
`
// patch namespace object removing finalizers
_, err := client.CoreV1().
Namespaces().
Patch(
hsh.k8sObj.Context,
namespace,
types.JSONPatchType,
[]byte(patchData),
metav1.PatchOptions{},
)
if err != nil {
return fmt.Errorf("failed to remove finalizers from Namespace: %v", err)
}
// patch namespace object removing finalizers
// delete namespace object
err = client.CoreV1().
Namespaces().
Delete(
hsh.k8sObj.Context,
namespace,
metav1.DeleteOptions{
GracePeriodSeconds: &hsh.gracePeriod,
},
)
if err != nil {
return fmt.Errorf("failed to delete %s Namespace: %v", namespace, err)
}
return nil
}
func (hsh *HostSensorHandler) TearDown() error {
namespace := hsh.GetNamespace()
// delete DaemonSet
if err := hsh.k8sObj.KubernetesClient.AppsV1().DaemonSets(hsh.GetNamespace()).Delete(hsh.k8sObj.Context, hsh.DaemonSet.Name, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
if err := hsh.tearDownHostScanner(namespace); err != nil {
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
}
// delete Namespace
if err := hsh.tearDownNamespace(namespace); err != nil {
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
return fmt.Errorf("failed to delete host-scanner Namespace: %v", err)
}
// TODO: wait for termination? may take up to 120 seconds!!!
return nil
}
func (hsh *HostSensorHandler) GetNamespace() string {
if hsh.DaemonSet == nil {
if hsh.daemonSet == nil {
return ""
}
return hsh.DaemonSet.Namespace
return hsh.daemonSet.Namespace
}
func loadHostSensorFromFile(hostSensorYAMLFile string) (string, error) {
@@ -331,3 +419,32 @@ func loadHostSensorFromFile(hostSensorYAMLFile string) (string, error) {
// TODO - Add file validation
return string(dat), err
}
// waitHostScannerDeleted watch for host-scanner deletion.
// In case it fails it returns an error.
func (hsh *HostSensorHandler) waitHostScannerDeleted(ctx context.Context) error {
labelSelector := fmt.Sprintf("name=%s", hsh.daemonSet.Name)
opts := metav1.ListOptions{
TypeMeta: metav1.TypeMeta{},
LabelSelector: labelSelector,
FieldSelector: "",
}
watcher, err := hsh.k8sObj.KubernetesClient.CoreV1().
Pods(hsh.daemonSet.Namespace).
Watch(ctx, opts)
if err != nil {
return err
}
defer watcher.Stop()
for {
select {
case event := <-watcher.ResultChan():
if event.Type == watch.Deleted {
return nil
}
case <-ctx.Done():
return nil
}
}
}

View File

@@ -0,0 +1,211 @@
package hostsensorutils
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/kubescape/kubescape/v2/internal/testutils"
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestHostSensorHandler(t *testing.T) {
t.Parallel()
ctx := context.Background()
t.Run("with default manifest", func(t *testing.T) {
t.Run("should build host sensor", func(t *testing.T) {
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponses()))
h, err := NewHostSensorHandler(k8s, "")
require.NoError(t, err)
require.NotNil(t, h)
t.Run("should initialize host sensor", func(t *testing.T) {
require.NoError(t, h.Init(ctx))
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
require.NoError(t, err)
w.Stop()
require.Len(t, h.hostSensorPodNames, 2)
})
t.Run("should return namespace", func(t *testing.T) {
require.Equal(t, "kubescape", h.GetNamespace())
})
t.Run("should collect resources from pods - happy path", func(t *testing.T) {
envelope, status, err := h.CollectResources(ctx)
require.NoError(t, err)
require.Len(t, envelope, 9*2) // has cloud provider, no control plane requested
require.Len(t, status, 0)
foundControl, foundProvider := false, false
for _, sensed := range envelope {
if sensed.Kind == ControlPlaneInfo.String() {
foundControl = true
}
if sensed.Kind == CloudProviderInfo.String() {
foundProvider = hasCloudProviderInfo([]hostsensor.HostSensorDataEnvelope{sensed})
}
}
require.False(t, foundControl)
require.True(t, foundProvider)
})
})
t.Run("should build host sensor without cloud provider", func(t *testing.T) {
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponsesNoCloudProvider()))
h, err := NewHostSensorHandler(k8s, "")
require.NoError(t, err)
require.NotNil(t, h)
t.Run("should initialize host sensor", func(t *testing.T) {
require.NoError(t, h.Init(ctx))
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
require.NoError(t, err)
w.Stop()
require.Len(t, h.hostSensorPodNames, 2)
})
t.Run("should get version", func(t *testing.T) {
version, err := h.getVersion()
require.NoError(t, err)
require.Equal(t, "v1.0.45", version)
})
t.Run("ForwardToPod is a stub, not implemented", func(t *testing.T) {
resp, err := h.forwardToPod("pod1", "/version")
require.Contains(t, err.Error(), "not implemented")
require.Nil(t, resp)
})
t.Run("should collect resources from pods", func(t *testing.T) {
envelope, status, err := h.CollectResources(ctx)
require.NoError(t, err)
require.Len(t, envelope, 10*2) // has empty cloud provider, has control plane info
require.Len(t, status, 0)
foundControl, foundProvider := false, false
for _, sensed := range envelope {
if sensed.Kind == ControlPlaneInfo.String() {
foundControl = true
}
if sensed.Kind == CloudProviderInfo.String() {
foundProvider = hasCloudProviderInfo([]hostsensor.HostSensorDataEnvelope{sensed})
}
}
require.True(t, foundControl)
require.False(t, foundProvider)
})
})
t.Run("should build host sensor with error in response from /version", func(t *testing.T) {
k8s := NewKubernetesApiMock(WithNode(mockNode1()),
WithPod(mockPod1()),
WithPod(mockPod2()),
WithResponses(mockResponsesNoCloudProvider()),
WithErrorResponse(RestURL{"http", "pod1", "7888", "/version"}), // this endpoint will return an error from this pod
WithErrorResponse(RestURL{"http", "pod2", "7888", "/version"}), // this endpoint will return an error from this pod
)
h, err := NewHostSensorHandler(k8s, "")
require.NoError(t, err)
require.NotNil(t, h)
t.Run("should initialize host sensor", func(t *testing.T) {
require.NoError(t, h.Init(ctx))
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
require.NoError(t, err)
w.Stop()
require.Len(t, h.hostSensorPodNames, 2)
})
t.Run("should NOT be able to get version", func(t *testing.T) {
// NOTE: GetVersion might be successful if only one pod responds successfully.
// In order to ensure an error, we need ALL pods to error.
_, err := h.getVersion()
require.Error(t, err)
require.Contains(t, err.Error(), "mock")
})
})
t.Run("should FAIL to build host sensor because there are no nodes", func(t *testing.T) {
h, err := NewHostSensorHandler(NewKubernetesApiMock(), "")
require.Error(t, err)
require.NotNil(t, h)
require.Contains(t, err.Error(), "no nodes to scan")
})
})
t.Run("should NOT build host sensor with nil k8s API", func(t *testing.T) {
h, err := NewHostSensorHandler(nil, "")
require.Error(t, err)
require.Nil(t, h)
})
t.Run("with manifest from YAML file", func(t *testing.T) {
t.Run("should build host sensor", func(t *testing.T) {
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponses()))
h, err := NewHostSensorHandler(k8s, filepath.Join(testutils.CurrentDir(), "hostsensor.yaml"))
require.NoError(t, err)
require.NotNil(t, h)
t.Run("should initialize host sensor", func(t *testing.T) {
require.NoError(t, h.Init(ctx))
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
require.NoError(t, err)
w.Stop()
require.Len(t, h.hostSensorPodNames, 2)
})
})
})
t.Run("with manifest from invalid YAML file", func(t *testing.T) {
t.Run("should NOT build host sensor", func(t *testing.T) {
var invalid string
t.Run("should create temp file", func(t *testing.T) {
file, err := os.CreateTemp("", "*.yaml")
require.NoError(t, err)
t.Cleanup(func() {
_ = os.Remove(file.Name())
})
_, err = file.Write([]byte(" x: 1"))
require.NoError(t, err)
invalid = file.Name()
require.NoError(t, file.Close())
})
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponses()))
_, err := NewHostSensorHandler(k8s, filepath.Join(testutils.CurrentDir(), invalid))
require.Error(t, err)
})
})
// TODO(test coverage): the following cases are not covered by tests yet.
//
// * applyYAML fails
// * checkPodForEachNode fails, or times out
// * non-active namespace
// * getPodList fails when GetVersion
// * getPodList fails when CollectResources
// * error cases that trigger a namespace tear-down
// * watch pods with a Delete event
// * explicit TearDown()
//
// Notice that the package doesn't current pass tests with the race detector enabled.
}

View File

@@ -2,60 +2,56 @@ package hostsensorutils
import (
"context"
"encoding/json"
stdjson "encoding/json"
"errors"
"fmt"
"reflect"
"strings"
"sync"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
"github.com/kubescape/opa-utils/reporthandling/apis"
"sigs.k8s.io/yaml"
)
func (hsh *HostSensorHandler) getPodList() (res map[string]string, err error) {
// getPodList clones the internal list of pods being watched as a map of pod names.
func (hsh *HostSensorHandler) getPodList() map[string]string {
hsh.podListLock.RLock()
jsonBytes, err := json.Marshal(hsh.HostSensorPodNames)
res := make(map[string]string, len(hsh.hostSensorPodNames))
for k, v := range hsh.hostSensorPodNames {
res[k] = v
}
hsh.podListLock.RUnlock()
if err != nil {
return res, fmt.Errorf("failed to marshal pod list: %v", err)
}
err = json.Unmarshal(jsonBytes, &res)
if err != nil {
return res, fmt.Errorf("failed to unmarshal pod list: %v", err)
}
return res, nil
return res
}
func (hsh *HostSensorHandler) HTTPGetToPod(podName, path string) ([]byte, error) {
// send the request to the port
restProxy := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).ProxyGet("http", podName, fmt.Sprintf("%d", hsh.HostSensorPort), path, map[string]string{})
// httpGetToPod sends the request to a pod using the HostSensorPort.
func (hsh *HostSensorHandler) httpGetToPod(podName, path string) ([]byte, error) {
restProxy := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.daemonSet.Namespace).ProxyGet("http", podName, fmt.Sprintf("%d", hsh.hostSensorPort), path, map[string]string{})
return restProxy.DoRaw(hsh.k8sObj.Context)
}
func (hsh *HostSensorHandler) getResourcesFromPod(podName, nodeName, resourceKind, path string) (hostsensor.HostSensorDataEnvelope, error) {
func (hsh *HostSensorHandler) getResourcesFromPod(podName, nodeName string, resourceKind scannerResource, path string) (hostsensor.HostSensorDataEnvelope, error) {
// send the request and pack the response as an hostSensorDataEnvelope
resBytes, err := hsh.HTTPGetToPod(podName, path)
resBytes, err := hsh.httpGetToPod(podName, path)
if err != nil {
return hostsensor.HostSensorDataEnvelope{}, err
}
hostSensorDataEnvelope := hostsensor.HostSensorDataEnvelope{}
hostSensorDataEnvelope.SetApiVersion(k8sinterface.JoinGroupVersion(hostsensor.GroupHostSensor, hostsensor.Version))
hostSensorDataEnvelope.SetKind(resourceKind)
hostSensorDataEnvelope.SetKind(resourceKind.String())
hostSensorDataEnvelope.SetName(nodeName)
hostSensorDataEnvelope.SetData(resBytes)
return hostSensorDataEnvelope, nil
}
func (hsh *HostSensorHandler) ForwardToPod(podName, path string) ([]byte, error) {
// forwardToPod is currently not implemented.
func (hsh *HostSensorHandler) forwardToPod(podName, path string) ([]byte, error) {
// NOT IN USE:
// ---
// spawn port forwarding
@@ -74,300 +70,224 @@ func (hsh *HostSensorHandler) ForwardToPod(podName, path string) ([]byte, error)
// }
// hostIP := strings.TrimLeft(req.RestConfig.Host, "htps:/")
// dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, &url.URL{Scheme: "http", Path: path, Host: hostIP})
return nil, nil
return nil, errors.New("not implemented")
}
// sendAllPodsHTTPGETRequest fills the raw byte response in the envelope and the node name, but not the GroupVersionKind
// sendAllPodsHTTPGETRequest fills the raw bytes response in the envelope and the node name, but not the GroupVersionKind
// so the caller is responsible to convert the raw data to some structured data and add the GroupVersionKind details
//
// The function produces a worker-pool with a fixed number of workers.
//
// For each node the request is pushed to the jobs channel, the worker sends the request and pushes the result to the result channel.
// When all workers have finished, the function returns a list of results
func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(ctx context.Context, path, requestKind string) ([]hostsensor.HostSensorDataEnvelope, error) {
podList, err := hsh.getPodList()
if err != nil {
return nil, fmt.Errorf("failed to sendAllPodsHTTPGETRequest: %v", err)
}
func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(ctx context.Context, path string, requestKind scannerResource) ([]hostsensor.HostSensorDataEnvelope, error) {
podList := hsh.getPodList()
res := make([]hostsensor.HostSensorDataEnvelope, 0, len(podList))
var wg sync.WaitGroup
// initialization of the channels
hsh.workerPool.init(len(podList))
// log is used to avoid log duplication
// coming from the different host-scanner instances
log := NewLogCoupling()
hsh.workerPool.hostSensorApplyJobs(podList, path, requestKind)
hsh.workerPool.hostSensorGetResults(&res)
hsh.workerPool.createWorkerPool(ctx, hsh, &wg)
hsh.workerPool.createWorkerPool(ctx, hsh, &wg, log)
hsh.workerPool.waitForDone(&wg)
return res, nil
}
// return host-scanner version
func (hsh *HostSensorHandler) GetVersion() (string, error) {
// getVersion returns the version of the deployed host scanner.
//
// NOTE: we pick the version from the first responding pod.
func (hsh *HostSensorHandler) getVersion() (string, error) {
// loop over pods and port-forward it to each of them
podList, err := hsh.getPodList()
if err != nil {
return "", fmt.Errorf("failed to sendAllPodsHTTPGETRequest: %v", err)
}
podList := hsh.getPodList()
// initialization of the channels
hsh.workerPool.init(len(podList))
hsh.workerPool.hostSensorApplyJobs(podList, "/version", "version")
for job := range hsh.workerPool.jobs {
resBytes, err := hsh.HTTPGetToPod(job.podName, job.path)
resBytes, err := hsh.httpGetToPod(job.podName, job.path)
if err != nil {
return "", err
} else {
version := strings.ReplaceAll(string(resBytes), "\"", "")
version = strings.ReplaceAll(version, "\n", "")
return version, nil
}
}
return "", nil
}
// return list of LinuxKernelVariables
func (hsh *HostSensorHandler) GetKernelVariables(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// getKernelVariables returns the list of Linux Kernel variables.
func (hsh *HostSensorHandler) getKernelVariables(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/LinuxKernelVariables", LinuxKernelVariables)
}
// return list of OpenPortsList
func (hsh *HostSensorHandler) GetOpenPortsList(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// getOpenPortsList returns the list of open ports.
func (hsh *HostSensorHandler) getOpenPortsList(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/openedPorts", OpenPortsList)
}
// return list of LinuxSecurityHardeningStatus
func (hsh *HostSensorHandler) GetLinuxSecurityHardeningStatus(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// getLinuxSecurityHardeningStatus returns the list of LinuxSecurityHardeningStatus metadata.
func (hsh *HostSensorHandler) getLinuxSecurityHardeningStatus(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/linuxSecurityHardening", LinuxSecurityHardeningStatus)
}
// return list of KubeletInfo
func (hsh *HostSensorHandler) GetKubeletInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// getKubeletInfo returns the list of kubelet metadata.
func (hsh *HostSensorHandler) getKubeletInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeletInfo", KubeletInfo)
}
// return list of kubeProxyInfo
func (hsh *HostSensorHandler) GetKubeProxyInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// getKubeProxyInfo returns the list of kubeProxy metadata.
func (hsh *HostSensorHandler) getKubeProxyInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeProxyInfo", KubeProxyInfo)
}
// return list of controlPlaneInfo
func (hsh *HostSensorHandler) GetControlPlaneInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// getControlPlanInfo returns the list of controlPlaneInfo metadata
func (hsh *HostSensorHandler) getControlPlaneInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/controlPlaneInfo", ControlPlaneInfo)
}
// return list of cloudProviderInfo
func (hsh *HostSensorHandler) GetCloudProviderInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// getCloudProviderInfo returns the list of cloudProviderInfo metadata.
func (hsh *HostSensorHandler) getCloudProviderInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/cloudProviderInfo", CloudProviderInfo)
}
// return list of KubeletCommandLine
func (hsh *HostSensorHandler) GetKubeletCommandLine(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
resps, err := hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeletCommandLine", KubeletCommandLine)
if err != nil {
return resps, err
}
for resp := range resps {
var data = make(map[string]interface{})
data["fullCommand"] = string(resps[resp].Data)
resBytesMarshal, err := json.Marshal(data)
// TODO catch error
if err == nil {
resps[resp].Data = json.RawMessage(resBytesMarshal)
}
}
return resps, nil
}
// return list of CNIInfo
func (hsh *HostSensorHandler) GetCNIInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// getCNIInfo returns the list of CNI metadata
func (hsh *HostSensorHandler) getCNIInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/CNIInfo", CNIInfo)
}
// return list of kernelVersion
func (hsh *HostSensorHandler) GetKernelVersion(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// getKernelVersion returns the list of kernelVersion metadata.
func (hsh *HostSensorHandler) getKernelVersion(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kernelVersion", "KernelVersion")
}
// return list of osRelease
func (hsh *HostSensorHandler) GetOsReleaseFile(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// getOsReleaseFile returns the list of osRelease metadata.
func (hsh *HostSensorHandler) getOsReleaseFile(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/osRelease", "OsReleaseFile")
}
// return list of kubeletConfigurations
func (hsh *HostSensorHandler) GetKubeletConfigurations(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
res, err := hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeletConfigurations", "KubeletConfiguration") // empty kind, will be overridden
for resIdx := range res {
jsonBytes, ery := yaml.YAMLToJSON(res[resIdx].Data)
if ery != nil {
logger.L().Ctx(ctx).Error("failed to convert kubelet configurations from yaml to json", helpers.Error(ery))
continue
}
res[resIdx].SetData(jsonBytes)
}
return res, err
}
// hasCloudProviderInfo iterate over the []hostsensor.HostSensorDataEnvelope list to find info about cloud provider.
// If information are found, ther return true. Return false otherwise.
// hasCloudProviderInfo iterates over the []hostsensor.HostSensorDataEnvelope list to find info about the cloud provider.
//
// If information are found, then return true. Return false otherwise.
func hasCloudProviderInfo(cpi []hostsensor.HostSensorDataEnvelope) bool {
for index := range cpi {
if !reflect.DeepEqual(cpi[index].GetData(), json.RawMessage("{}\n")) {
if !reflect.DeepEqual(cpi[index].GetData(), stdjson.RawMessage("{}\n")) {
return true
}
}
return false
}
// CollectResources collects all required information about all the pods for this host.
func (hsh *HostSensorHandler) CollectResources(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
res := make([]hostsensor.HostSensorDataEnvelope, 0)
infoMap := make(map[string]apis.StatusInfo)
if hsh.DaemonSet == nil {
if hsh.daemonSet == nil {
return res, nil, nil
}
var kcData []hostsensor.HostSensorDataEnvelope
var err error
logger.L().Debug("Accessing host scanner")
version, err := hsh.GetVersion()
version, err := hsh.getVersion()
if err != nil {
logger.L().Ctx(ctx).Warning(err.Error())
}
if len(version) > 0 {
logger.L().Info("Host scanner version : " + version)
} else {
logger.L().Info("Unknown host scanner version")
}
//
kcData, err = hsh.GetKubeletConfigurations(ctx)
if err != nil {
addInfoToMap(KubeletConfiguration, infoMap, err)
logger.L().Ctx(ctx).Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
//
kcData, err = hsh.GetKubeletCommandLine(ctx)
if err != nil {
addInfoToMap(KubeletCommandLine, infoMap, err)
logger.L().Ctx(ctx).Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
//
kcData, err = hsh.GetOsReleaseFile(ctx)
if err != nil {
addInfoToMap(OsReleaseFile, infoMap, err)
logger.L().Ctx(ctx).Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
//
kcData, err = hsh.GetKernelVersion(ctx)
if err != nil {
addInfoToMap(KernelVersion, infoMap, err)
logger.L().Ctx(ctx).Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
//
kcData, err = hsh.GetLinuxSecurityHardeningStatus(ctx)
if err != nil {
addInfoToMap(LinuxSecurityHardeningStatus, infoMap, err)
logger.L().Ctx(ctx).Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
//
kcData, err = hsh.GetOpenPortsList(ctx)
if err != nil {
addInfoToMap(OpenPortsList, infoMap, err)
logger.L().Ctx(ctx).Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
// GetKernelVariables
kcData, err = hsh.GetKernelVariables(ctx)
if err != nil {
addInfoToMap(LinuxKernelVariables, infoMap, err)
logger.L().Ctx(ctx).Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
// GetKubeletInfo
kcData, err = hsh.GetKubeletInfo(ctx)
if err != nil {
addInfoToMap(KubeletInfo, infoMap, err)
logger.L().Ctx(ctx).Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
var hasCloudProvider bool
for _, toPin := range []struct {
Resource scannerResource
Query func(context.Context) ([]hostsensor.HostSensorDataEnvelope, error)
}{
// queries to the deployed host-scanner
{
Resource: OsReleaseFile,
Query: hsh.getOsReleaseFile,
},
{
Resource: KernelVersion,
Query: hsh.getKernelVersion,
},
{
Resource: LinuxSecurityHardeningStatus,
Query: hsh.getLinuxSecurityHardeningStatus,
},
{
Resource: OpenPortsList,
Query: hsh.getOpenPortsList,
},
{
Resource: LinuxKernelVariables,
Query: hsh.getKernelVariables,
},
{
Resource: KubeletInfo,
Query: hsh.getKubeletInfo,
},
{
Resource: KubeProxyInfo,
Query: hsh.getKubeProxyInfo,
},
{
Resource: CloudProviderInfo,
Query: hsh.getCloudProviderInfo,
},
{
Resource: CNIInfo,
Query: hsh.getCNIInfo,
},
{
// ControlPlaneInfo is queried _after_ CloudProviderInfo.
Resource: ControlPlaneInfo,
Query: hsh.getControlPlaneInfo,
},
} {
k8sInfo := toPin
// GetKubeProxyInfo
kcData, err = hsh.GetKubeProxyInfo(ctx)
if err != nil {
addInfoToMap(KubeProxyInfo, infoMap, err)
logger.L().Ctx(ctx).Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
if k8sInfo.Resource == ControlPlaneInfo && hasCloudProvider {
// we retrieve control plane info only if we are not using a cloud provider
continue
}
// GetCloudProviderInfo
kcData, err = hsh.GetCloudProviderInfo(ctx)
isCloudProvider := hasCloudProviderInfo(kcData)
if err != nil {
addInfoToMap(CloudProviderInfo, infoMap, err)
logger.L().Ctx(ctx).Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
// GetControlPlaneInfo
if !isCloudProvider { // we retrieve control plane info only if we are not using a cloud provider
kcData, err = hsh.GetControlPlaneInfo(ctx)
kcData, err := k8sInfo.Query(ctx)
if err != nil {
addInfoToMap(ControlPlaneInfo, infoMap, err)
addInfoToMap(k8sInfo.Resource, infoMap, err)
logger.L().Ctx(ctx).Warning(err.Error())
}
if k8sInfo.Resource == CloudProviderInfo {
hasCloudProvider = hasCloudProviderInfo(kcData)
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
}
// GetCNIInfo
kcData, err = hsh.GetCNIInfo(ctx)
if err != nil {
addInfoToMap(CNIInfo, infoMap, err)
logger.L().Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
logger.L().Debug("Done reading information from host scanner")
return res, infoMap, nil
}

View File

@@ -7,9 +7,15 @@ import (
"github.com/kubescape/opa-utils/reporthandling/apis"
)
// HostSensorHandlerMock is a noop sensor when the host scanner is disabled.
type HostSensorHandlerMock struct {
}
// NewHostSensorHandlerMock yields a dummy host sensor.
func NewHostSensorHandlerMock() *HostSensorHandlerMock {
return &HostSensorHandlerMock{}
}
func (hshm *HostSensorHandlerMock) Init(_ context.Context) error {
return nil
}

View File

@@ -14,7 +14,7 @@ const noOfWorkers int = 10
type job struct {
podName string
nodeName string
requestKind string
requestKind scannerResource
path string
}
@@ -25,7 +25,7 @@ type workerPool struct {
noOfWorkers int
}
func NewWorkerPool() workerPool {
func newWorkerPool() workerPool {
wp := workerPool{}
wp.noOfWorkers = noOfWorkers
wp.init()
@@ -43,22 +43,23 @@ func (wp *workerPool) init(noOfPods ...int) {
}
// The worker takes a job out of the chan, executes the request, and pushes the result to the results chan
func (wp *workerPool) hostSensorWorker(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup) {
func (wp *workerPool) hostSensorWorker(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup, log *LogsMap) {
defer wg.Done()
for job := range wp.jobs {
hostSensorDataEnvelope, err := hsh.getResourcesFromPod(job.podName, job.nodeName, job.requestKind, job.path)
if err != nil {
logger.L().Ctx(ctx).Error("failed to get data", helpers.String("path", job.path), helpers.String("podName", job.podName), helpers.Error(err))
} else {
wp.results <- hostSensorDataEnvelope
if err != nil && !log.isDuplicated(failedToGetData) {
logger.L().Ctx(ctx).Warning(failedToGetData, helpers.String("path", job.path), helpers.Error(err))
log.update(failedToGetData)
continue
}
wp.results <- hostSensorDataEnvelope
}
}
func (wp *workerPool) createWorkerPool(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup) {
func (wp *workerPool) createWorkerPool(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup, log *LogsMap) {
for i := 0; i < noOfWorkers; i++ {
wg.Add(1)
go wp.hostSensorWorker(ctx, hsh, wg)
go wp.hostSensorWorker(ctx, hsh, wg, log)
}
}
@@ -80,7 +81,7 @@ func (wp *workerPool) hostSensorGetResults(result *[]hostsensor.HostSensorDataEn
}()
}
func (wp *workerPool) hostSensorApplyJobs(podList map[string]string, path, requestKind string) {
func (wp *workerPool) hostSensorApplyJobs(podList map[string]string, path string, requestKind scannerResource) {
go func() {
for podName, nodeName := range podList {
thisJob := job{

View File

@@ -0,0 +1,15 @@
package hostsensorutils
import (
jsoniter "github.com/json-iterator/go"
)
var (
json jsoniter.API
)
func init() {
// NOTE(fredbi): attention, this configuration rounds floats down to 6 digits
// For finer-grained config, see: https://pkg.go.dev/github.com/json-iterator/go#section-readme
json = jsoniter.ConfigFastest
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,51 @@
package hostsensorutils
import "sync"
type LogsMap struct {
// use sync.Mutex to avoid read/write
// access issues in multi-thread environments.
sync.Mutex
usedLogs map[string]int
}
// NewLogCoupling return an empty LogsMap struct ready to be used.
func NewLogCoupling() *LogsMap {
return &LogsMap{
usedLogs: make(map[string]int),
}
}
// update add the logContent to the internal map
// and set the occurrencty to 1 (if it has never been used before),
// increment its values otherwise.
func (lm *LogsMap) update(logContent string) {
lm.Lock()
_, ok := lm.usedLogs[logContent]
if !ok {
lm.usedLogs[logContent] = 1
} else {
lm.usedLogs[logContent]++
}
lm.Unlock()
}
// isDuplicated check if logContent is already present inside the internal map.
// Return true in case logContent already exists, false otherwise.
func (lm *LogsMap) isDuplicated(logContent string) bool {
lm.Lock()
_, ok := lm.usedLogs[logContent]
lm.Unlock()
return ok
}
// GgtOccurrence retrieve the number of occurrences logContent has been used.
func (lm *LogsMap) getOccurrence(logContent string) int {
lm.Lock()
occurrence, ok := lm.usedLogs[logContent]
lm.Unlock()
if !ok {
return 0
}
return occurrence
}

View File

@@ -0,0 +1,100 @@
package hostsensorutils
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestLogsMap_Update(t *testing.T) {
t.Parallel()
tests := []struct {
name string
logs []string
expectedLog string
expected int
}{
{
name: "test_1",
logs: []string{
"log_1",
"log_1",
"log_1",
},
expectedLog: "log_1",
expected: 3,
},
{
name: "test_2",
logs: []string{},
expectedLog: "log_2",
expected: 0,
},
{
name: "test_3",
logs: []string{
"log_3",
},
expectedLog: "log_3",
expected: 1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
lm := NewLogCoupling()
for _, log := range tt.logs {
lm.update(log)
}
if !assert.Equal(t, lm.getOccurrence(tt.expectedLog), tt.expected) {
t.Log("log occurrences are different")
}
})
}
}
func TestLogsMap_IsDuplicated(t *testing.T) {
t.Parallel()
tests := []struct {
name string
logs []string
expectedLog string
expected bool
}{
{
name: "test_1",
logs: []string{
"log_1",
"log_1",
"log_1",
},
expectedLog: "log_1",
expected: true,
},
{
name: "test_2",
logs: []string{
"log_1",
"log_1",
},
expectedLog: "log_2",
expected: false,
},
{
name: "test_3",
logs: []string{},
expectedLog: "log_3",
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
lm := NewLogCoupling()
for _, log := range tt.logs {
lm.update(log)
}
if !assert.Equal(t, lm.isDuplicated(tt.expectedLog), tt.expected) {
t.Log("duplication value differ from expected")
}
})
}
}

View File

@@ -0,0 +1,10 @@
package hostsensorutils
// messages used for warnings
var (
failedToGetData = "failed to get data"
failedToTeardownNamespace = "failed to teardown Namespace"
oneHostSensorPodIsUnabledToSchedule = "One host-sensor pod is unable to schedule on node. We will fail to collect the data from this node"
failedToWatchOverDaemonSetPods = "failed to watch over DaemonSet pods"
failedToValidateHostSensorPodStatus = "failed to validate host-scanner pods status"
)

View File

@@ -5,39 +5,54 @@ import (
"github.com/kubescape/opa-utils/reporthandling/apis"
)
var (
KubeletConfiguration = "KubeletConfiguration"
OsReleaseFile = "OsReleaseFile"
KernelVersion = "KernelVersion"
LinuxSecurityHardeningStatus = "LinuxSecurityHardeningStatus"
OpenPortsList = "OpenPortsList"
LinuxKernelVariables = "LinuxKernelVariables"
KubeletCommandLine = "KubeletCommandLine"
KubeletInfo = "KubeletInfo"
KubeProxyInfo = "KubeProxyInfo"
ControlPlaneInfo = "ControlPlaneInfo"
CloudProviderInfo = "CloudProviderInfo"
CNIInfo = "CNIInfo"
// scannerResource is the enumerated type listing all resources from the host-scanner.
type scannerResource string
MapHostSensorResourceToApiGroup = map[string]string{
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
OsReleaseFile: "hostdata.kubescape.cloud/v1beta0",
KubeletCommandLine: "hostdata.kubescape.cloud/v1beta0",
KernelVersion: "hostdata.kubescape.cloud/v1beta0",
LinuxSecurityHardeningStatus: "hostdata.kubescape.cloud/v1beta0",
OpenPortsList: "hostdata.kubescape.cloud/v1beta0",
LinuxKernelVariables: "hostdata.kubescape.cloud/v1beta0",
KubeletInfo: "hostdata.kubescape.cloud/v1beta0",
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
ControlPlaneInfo: "hostdata.kubescape.cloud/v1beta0",
CloudProviderInfo: "hostdata.kubescape.cloud/v1beta0",
CNIInfo: "hostdata.kubescape.cloud/v1beta0",
}
const (
// host-scanner resources
KubeletConfiguration scannerResource = "KubeletConfiguration"
OsReleaseFile scannerResource = "OsReleaseFile"
KernelVersion scannerResource = "KernelVersion"
LinuxSecurityHardeningStatus scannerResource = "LinuxSecurityHardeningStatus"
OpenPortsList scannerResource = "OpenPortsList"
LinuxKernelVariables scannerResource = "LinuxKernelVariables"
KubeletCommandLine scannerResource = "KubeletCommandLine"
KubeletInfo scannerResource = "KubeletInfo"
KubeProxyInfo scannerResource = "KubeProxyInfo"
ControlPlaneInfo scannerResource = "ControlPlaneInfo"
CloudProviderInfo scannerResource = "CloudProviderInfo"
CNIInfo scannerResource = "CNIInfo"
)
func addInfoToMap(resource string, infoMap map[string]apis.StatusInfo, err error) {
group, version := k8sinterface.SplitApiVersion(MapHostSensorResourceToApiGroup[resource])
r := k8sinterface.JoinResourceTriplets(group, version, resource)
func mapHostSensorResourceToApiGroup(r scannerResource) string {
switch r {
case
KubeletConfiguration,
OsReleaseFile,
KubeletCommandLine,
KernelVersion,
LinuxSecurityHardeningStatus,
OpenPortsList,
LinuxKernelVariables,
KubeletInfo,
KubeProxyInfo,
ControlPlaneInfo,
CloudProviderInfo,
CNIInfo:
return "hostdata.kubescape.cloud/v1beta0"
default:
return ""
}
}
func (r scannerResource) String() string {
return string(r)
}
func addInfoToMap(resource scannerResource, infoMap map[string]apis.StatusInfo, err error) {
group, version := k8sinterface.SplitApiVersion(mapHostSensorResourceToApiGroup(resource))
r := k8sinterface.JoinResourceTriplets(group, version, resource.String())
infoMap[r] = apis.StatusInfo{
InnerStatus: apis.StatusSkipped,
InnerInfo: err.Error(),

View File

@@ -0,0 +1,68 @@
package hostsensorutils
import (
"errors"
"fmt"
"testing"
"github.com/kubescape/opa-utils/reporthandling/apis"
"github.com/stretchr/testify/require"
)
func TestAddInfoToMap(t *testing.T) {
t.Parallel()
// NOTE: the function being tested is hard to test, because
// the worker pool mutes most errors.
//
// Essentially, unless we hit some extreme edge case, we never get an error to be added to the map.
testErr := errors.New("test error")
for _, toPin := range []struct {
Resource scannerResource
Err error
Expected map[string]apis.StatusInfo
}{
{
Resource: KubeletConfiguration,
Err: testErr,
Expected: map[string]apis.StatusInfo{
"hostdata.kubescape.cloud/v1beta0/KubeletConfiguration": {
InnerStatus: apis.StatusSkipped,
InnerInfo: testErr.Error(),
},
},
},
{
Resource: CNIInfo,
Err: testErr,
Expected: map[string]apis.StatusInfo{
"hostdata.kubescape.cloud/v1beta0/CNIInfo": {
InnerStatus: apis.StatusSkipped,
InnerInfo: testErr.Error(),
},
},
},
{
Resource: scannerResource("invalid"),
Err: testErr,
Expected: map[string]apis.StatusInfo{
"//invalid": { // no group, no version
InnerStatus: apis.StatusSkipped,
InnerInfo: testErr.Error(),
},
},
},
} {
tc := toPin
t.Run(fmt.Sprintf("should expect a status for resource %s", tc.Resource), func(t *testing.T) {
t.Parallel()
result := make(map[string]apis.StatusInfo, 1)
addInfoToMap(tc.Resource, result, tc.Err)
require.EqualValues(t, tc.Expected, result)
})
}
}

View File

@@ -1,49 +1,42 @@
package opaprocessor
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func Test_verify(t *testing.T) {
type args struct {
img string
key string
}
tests := []struct {
name string
args args
want bool
wantErr assert.ErrorAssertionFunc
}{
{
"valid signature",
args{
img: "hisu/cosign-tests:signed",
key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
},
true,
assert.NoError,
},
{
"no signature",
args{
img: "hisu/cosign-tests:unsigned",
key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
},
false,
assert.Error,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := verify(tt.args.img, tt.args.key)
if !tt.wantErr(t, err, fmt.Sprintf("verify(%v, %v)", tt.args.img, tt.args.key)) {
return
}
assert.Equalf(t, tt.want, got, "verify(%v, %v)", tt.args.img, tt.args.key)
})
}
}
// func Test_verify(t *testing.T) {
// type args struct {
// img string
// key string
// }
// tests := []struct {
// name string
// args args
// want bool
// wantErr assert.ErrorAssertionFunc
// }{
// {
// "valid signature",
// args{
// img: "hisu/cosign-tests:signed",
// key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
// },
// true,
// assert.NoError,
// },
// {
// "no signature",
// args{
// img: "hisu/cosign-tests:unsigned",
// key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
// },
// false,
// assert.Error,
// },
// }
// for _, tt := range tests {
// t.Run(tt.name, func(t *testing.T) {
// got, err := verify(tt.args.img, tt.args.key)
// if !tt.wantErr(t, err, fmt.Sprintf("verify(%v, %v)", tt.args.img, tt.args.key)) {
// return
// }
// assert.Equalf(t, tt.want, got, "verify(%v, %v)", tt.args.img, tt.args.key)
// })
// }
// }

View File

@@ -3,26 +3,27 @@ package opaprocessor
import (
"context"
"fmt"
"runtime"
"sync"
"time"
"github.com/armosec/armoapi-go/armotypes"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/pkg/score"
"github.com/kubescape/opa-utils/objectsenvelopes"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/apis"
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
"github.com/open-policy-agent/opa/storage"
"go.opentelemetry.io/otel"
"github.com/kubescape/k8s-interface/workloadinterface"
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
"github.com/kubescape/opa-utils/resources"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/rego"
"github.com/open-policy-agent/opa/storage"
"go.opentelemetry.io/otel"
"golang.org/x/sync/errgroup"
)
const ScoreConfigPath = "/resources/config"
@@ -33,9 +34,16 @@ type IJobProgressNotificationClient interface {
Stop()
}
const (
heuristicAllocResources = 100
heuristicAllocControls = 100
)
// OPAProcessor processes Open Policy Agent rules.
type OPAProcessor struct {
regoDependenciesData *resources.RegoDependenciesData
*cautils.OPASessionObj
opaRegisterOnce sync.Once
}
func NewOPAProcessor(sessionObj *cautils.OPASessionObj, regoDependenciesData *resources.RegoDependenciesData) *OPAProcessor {
@@ -43,20 +51,26 @@ func NewOPAProcessor(sessionObj *cautils.OPASessionObj, regoDependenciesData *re
regoDependenciesData.PostureControlInputs = sessionObj.RegoInputData.PostureControlInputs
regoDependenciesData.DataControlInputs = sessionObj.RegoInputData.DataControlInputs
}
return &OPAProcessor{
OPASessionObj: sessionObj,
regoDependenciesData: regoDependenciesData,
}
}
func (opap *OPAProcessor) ProcessRulesListenner(ctx context.Context, progressListener IJobProgressNotificationClient) error {
func (opap *OPAProcessor) ProcessRulesListener(ctx context.Context, progressListener IJobProgressNotificationClient) error {
opap.OPASessionObj.AllPolicies = ConvertFrameworksToPolicies(opap.Policies, cautils.BuildNumber)
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Policies, opap.OPASessionObj.AllPolicies)
maxGoRoutines, err := cautils.ParseIntEnvVar("RULE_PROCESSING_GOMAXPROCS", 2*runtime.NumCPU())
if err != nil {
logger.L().Ctx(ctx).Warning(err.Error())
}
// process
if err := opap.Process(ctx, opap.OPASessionObj.AllPolicies, progressListener); err != nil {
logger.L().Ctx(ctx).Error(err.Error())
if err := opap.Process(ctx, opap.OPASessionObj.AllPolicies, progressListener, maxGoRoutines); err != nil {
logger.L().Ctx(ctx).Warning(err.Error())
// Return error?
}
@@ -65,16 +79,18 @@ func (opap *OPAProcessor) ProcessRulesListenner(ctx context.Context, progressLis
//TODO: review this location
scorewrapper := score.NewScoreWrapper(opap.OPASessionObj)
scorewrapper.Calculate(score.EPostureReportV2)
_ = scorewrapper.Calculate(score.EPostureReportV2)
return nil
}
func (opap *OPAProcessor) Process(ctx context.Context, policies *cautils.Policies, progressListener IJobProgressNotificationClient) error {
// Process OPA policies (rules) on all configured controls.
func (opap *OPAProcessor) Process(ctx context.Context, policies *cautils.Policies, progressListener IJobProgressNotificationClient, maxGoRoutines int) error {
ctx, span := otel.Tracer("").Start(ctx, "OPAProcessor.Process")
defer span.End()
opap.loggerStartScanning()
defer opap.loggerDoneScanning()
cautils.StartSpinner()
defer cautils.StopSpinner()
@@ -83,33 +99,102 @@ func (opap *OPAProcessor) Process(ctx context.Context, policies *cautils.Policie
defer progressListener.Stop()
}
for _, toPin := range policies.Controls {
if progressListener != nil {
progressListener.ProgressJob(1, fmt.Sprintf("Control %s", toPin.ControlID))
}
control := toPin
resourcesAssociatedControl, err := opap.processControl(ctx, &control)
if err != nil {
logger.L().Ctx(ctx).Error(err.Error())
}
if len(resourcesAssociatedControl) == 0 {
continue
}
// update resources with latest results
for resourceID, controlResult := range resourcesAssociatedControl {
if _, ok := opap.ResourcesResult[resourceID]; !ok {
opap.ResourcesResult[resourceID] = resourcesresults.Result{ResourceID: resourceID}
}
t := opap.ResourcesResult[resourceID]
t.AssociatedControls = append(t.AssociatedControls, controlResult)
opap.ResourcesResult[resourceID] = t
}
// results to collect from controls being processed in parallel
type results struct {
resourceAssociatedControl map[string]resourcesresults.ResourceAssociatedControl
allResources map[string]workloadinterface.IMetadata
}
resultsChan := make(chan results)
controlsGroup, groupCtx := errgroup.WithContext(ctx)
controlsGroup.SetLimit(maxGoRoutines)
allResources := make(map[string]workloadinterface.IMetadata, max(len(opap.AllResources), heuristicAllocResources))
for k, v := range opap.AllResources {
allResources[k] = v
}
var resultsCollector sync.WaitGroup
resultsCollector.Add(1)
go func() {
// collects the results from processing all rules for all controls.
//
// NOTE: since policies.Controls is a map, iterating over it doesn't guarantee any
// specific ordering. Therefore, if a conflict is possible on resources, e.g. 2 rules,
// referencing the same resource, the eventual result of the merge is not guaranteed to be
// stable. This behavior is consistent with the previous (unparallelized) processing.
defer resultsCollector.Done()
for result := range resultsChan {
// merge both maps in parallel
var merger sync.WaitGroup
merger.Add(1)
go func() {
// merge all resources
defer merger.Done()
for k, v := range result.allResources {
allResources[k] = v
}
}()
merger.Add(1)
go func() {
defer merger.Done()
// update resources with latest results
for resourceID, controlResult := range result.resourceAssociatedControl {
result, found := opap.ResourcesResult[resourceID]
if !found {
result = resourcesresults.Result{ResourceID: resourceID}
}
result.AssociatedControls = append(result.AssociatedControls, controlResult)
opap.ResourcesResult[resourceID] = result
}
}()
merger.Wait()
}
}()
// processes rules for all controls in parallel
for _, controlToPin := range policies.Controls {
if progressListener != nil {
progressListener.ProgressJob(1, fmt.Sprintf("Control: %s", controlToPin.ControlID))
}
control := controlToPin
controlsGroup.Go(func() error {
resourceAssociatedControl, allResourcesFromControl, err := opap.processControl(groupCtx, &control)
if err != nil {
logger.L().Ctx(groupCtx).Warning(err.Error())
}
select {
case resultsChan <- results{
resourceAssociatedControl: resourceAssociatedControl,
allResources: allResourcesFromControl,
}:
case <-groupCtx.Done(): // interrupted (NOTE: at this moment, this never happens since errors are muted)
return groupCtx.Err()
}
return nil
})
}
// wait for all results from all rules to be collected
err := controlsGroup.Wait()
close(resultsChan)
resultsCollector.Wait()
if err != nil {
return err
}
// merge the final result in resources
for k, v := range allResources {
opap.AllResources[k] = v
}
opap.Report.ReportGenerationTime = time.Now().UTC()
return nil
@@ -133,113 +218,135 @@ func (opap *OPAProcessor) loggerDoneScanning() {
}
}
func (opap *OPAProcessor) processControl(ctx context.Context, control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, error) {
var errs error
// processControl processes all the rules for a given control
//
// NOTE: the call to processControl no longer mutates the state of the current OPAProcessor instance,
// but returns a map instead, to be merged by the caller.
func (opap *OPAProcessor) processControl(ctx context.Context, control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, map[string]workloadinterface.IMetadata, error) {
resourcesAssociatedControl := make(map[string]resourcesresults.ResourceAssociatedControl, heuristicAllocControls)
allResources := make(map[string]workloadinterface.IMetadata, heuristicAllocResources)
resourcesAssociatedControl := make(map[string]resourcesresults.ResourceAssociatedControl)
// ruleResults := make(map[string][]resourcesresults.ResourceAssociatedRule)
for i := range control.Rules {
resourceAssociatedRule, err := opap.processRule(ctx, &control.Rules[i], control.FixedInput)
resourceAssociatedRule, allResourcesFromRule, err := opap.processRule(ctx, &control.Rules[i], control.FixedInput)
if err != nil {
logger.L().Ctx(ctx).Error(err.Error())
logger.L().Ctx(ctx).Warning(err.Error())
continue
}
// merge all resources for all processed rules in this control
for k, v := range allResourcesFromRule {
allResources[k] = v
}
// append failed rules to controls
if len(resourceAssociatedRule) != 0 {
for resourceID, ruleResponse := range resourceAssociatedRule {
for resourceID, ruleResponse := range resourceAssociatedRule {
var controlResult resourcesresults.ResourceAssociatedControl
controlResult.SetID(control.ControlID)
controlResult.SetName(control.Name)
controlResult := resourcesresults.ResourceAssociatedControl{}
controlResult.SetID(control.ControlID)
controlResult.SetName(control.Name)
if _, ok := resourcesAssociatedControl[resourceID]; ok {
controlResult.ResourceAssociatedRules = resourcesAssociatedControl[resourceID].ResourceAssociatedRules
}
if ruleResponse != nil {
controlResult.ResourceAssociatedRules = append(controlResult.ResourceAssociatedRules, *ruleResponse)
}
if control, ok := opap.AllPolicies.Controls[control.ControlID]; ok {
controlResult.SetStatus(control)
}
resourcesAssociatedControl[resourceID] = controlResult
if associatedControl, ok := resourcesAssociatedControl[resourceID]; ok {
controlResult.ResourceAssociatedRules = associatedControl.ResourceAssociatedRules
}
if ruleResponse != nil {
controlResult.ResourceAssociatedRules = append(controlResult.ResourceAssociatedRules, *ruleResponse)
}
if control, ok := opap.AllPolicies.Controls[control.ControlID]; ok {
controlResult.SetStatus(control)
}
resourcesAssociatedControl[resourceID] = controlResult
}
}
return resourcesAssociatedControl, errs
return resourcesAssociatedControl, allResources, nil
}
func (opap *OPAProcessor) processRule(ctx context.Context, rule *reporthandling.PolicyRule, fixedControlInputs map[string][]string) (map[string]*resourcesresults.ResourceAssociatedRule, error) {
// processRule processes a single policy rule, with some extra fixed control inputs.
//
// NOTE: processRule no longer mutates the state of the current OPAProcessor instance,
// and returns a map instead, to be merged by the caller.
func (opap *OPAProcessor) processRule(ctx context.Context, rule *reporthandling.PolicyRule, fixedControlInputs map[string][]string) (map[string]*resourcesresults.ResourceAssociatedRule, map[string]workloadinterface.IMetadata, error) {
ruleRegoDependenciesData := opap.makeRegoDeps(rule.ConfigInputs, fixedControlInputs)
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs) // get store
dataControlInputs := map[string]string{"cloudProvider": opap.OPASessionObj.Report.ClusterCloudProvider}
// Merge configurable control input and fixed control input
for k, v := range fixedControlInputs {
postureControlInputs[k] = v
}
RuleRegoDependenciesData := resources.RegoDependenciesData{DataControlInputs: dataControlInputs,
PostureControlInputs: postureControlInputs}
inputResources, err := reporthandling.RegoResourcesAggregator(rule, getAllSupportedObjects(opap.K8SResources, opap.ArmoResource, opap.AllResources, rule))
inputResources, err := reporthandling.RegoResourcesAggregator(
rule,
getAllSupportedObjects(opap.K8SResources, opap.ArmoResource, opap.AllResources, rule), // NOTE: this uses the initial snapshot of AllResources
)
if err != nil {
return nil, fmt.Errorf("error getting aggregated k8sObjects: %s", err.Error())
return nil, nil, fmt.Errorf("error getting aggregated k8sObjects: %w", err)
}
if len(inputResources) == 0 {
return nil, nil // no resources found for testing
return nil, nil, nil // no resources found for testing
}
inputRawResources := workloadinterface.ListMetaToMap(inputResources)
resources := map[string]*resourcesresults.ResourceAssociatedRule{}
// the failed resources are a subgroup of the enumeratedData, so we store the enumeratedData like it was the input data
enumeratedData, err := opap.enumerateData(ctx, rule, inputRawResources)
if err != nil {
return nil, err
return nil, nil, err
}
inputResources = objectsenvelopes.ListMapToMeta(enumeratedData)
for i := range inputResources {
resources[inputResources[i].GetID()] = &resourcesresults.ResourceAssociatedRule{
resources := make(map[string]*resourcesresults.ResourceAssociatedRule, len(inputResources))
allResources := make(map[string]workloadinterface.IMetadata, len(inputResources))
for i, inputResource := range inputResources {
resources[inputResource.GetID()] = &resourcesresults.ResourceAssociatedRule{
Name: rule.Name,
ControlConfigurations: postureControlInputs,
ControlConfigurations: ruleRegoDependenciesData.PostureControlInputs,
Status: apis.StatusPassed,
}
opap.AllResources[inputResources[i].GetID()] = inputResources[i]
allResources[inputResource.GetID()] = inputResources[i]
}
ruleResponses, err := opap.runOPAOnSingleRule(ctx, rule, inputRawResources, ruleData, RuleRegoDependenciesData)
ruleResponses, err := opap.runOPAOnSingleRule(ctx, rule, inputRawResources, ruleData, ruleRegoDependenciesData)
if err != nil {
// TODO - Handle error
logger.L().Ctx(ctx).Error(err.Error())
} else {
// ruleResponse to ruleResult
for i := range ruleResponses {
failedResources := objectsenvelopes.ListMapToMeta(ruleResponses[i].GetFailedResources())
for j := range failedResources {
ruleResult := &resourcesresults.ResourceAssociatedRule{}
if r, k := resources[failedResources[j].GetID()]; k {
ruleResult = r
}
return resources, allResources, err
}
ruleResult.SetStatus(apis.StatusFailed, nil)
for j := range ruleResponses[i].FailedPaths {
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FailedPath: ruleResponses[i].FailedPaths[j]})
// ruleResponse to ruleResult
for _, ruleResponse := range ruleResponses {
failedResources := objectsenvelopes.ListMapToMeta(ruleResponse.GetFailedResources())
for _, failedResource := range failedResources {
var ruleResult *resourcesresults.ResourceAssociatedRule
if r, found := resources[failedResource.GetID()]; found {
ruleResult = r
} else {
ruleResult = &resourcesresults.ResourceAssociatedRule{
Paths: make([]armotypes.PosturePaths, 0, len(ruleResponse.FailedPaths)+len(ruleResponse.FixPaths)+1),
}
for j := range ruleResponses[i].FixPaths {
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixPath: ruleResponses[i].FixPaths[j]})
}
if ruleResponses[i].FixCommand != "" {
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixCommand: ruleResponses[i].FixCommand})
}
resources[failedResources[j].GetID()] = ruleResult
}
ruleResult.SetStatus(apis.StatusFailed, nil)
for _, failedPath := range ruleResponse.FailedPaths {
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FailedPath: failedPath})
}
for _, fixPath := range ruleResponse.FixPaths {
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixPath: fixPath})
}
if ruleResponse.FixCommand != "" {
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixCommand: ruleResponse.FixCommand})
}
// if ruleResponse has relatedObjects, add it to ruleResult
if len(ruleResponse.RelatedObjects) > 0 {
for _, relatedObject := range ruleResponse.RelatedObjects {
wl := objectsenvelopes.NewObject(relatedObject.Object)
if wl != nil {
ruleResult.RelatedResourcesIDs = append(ruleResult.RelatedResourcesIDs, wl.GetID())
}
}
}
resources[failedResource.GetID()] = ruleResult
}
}
return resources, err
return resources, allResources, nil
}
func (opap *OPAProcessor) runOPAOnSingleRule(ctx context.Context, rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, ruleRegoDependenciesData resources.RegoDependenciesData) ([]reporthandling.RuleResponse, error) {
@@ -251,20 +358,25 @@ func (opap *OPAProcessor) runOPAOnSingleRule(ctx context.Context, rule *reportha
}
}
// runRegoOnK8s compiles an OPA PolicyRule and evaluates its against k8s
func (opap *OPAProcessor) runRegoOnK8s(ctx context.Context, rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, ruleRegoDependenciesData resources.RegoDependenciesData) ([]reporthandling.RuleResponse, error) {
// compile modules
modules, err := getRuleDependencies(ctx)
if err != nil {
return nil, fmt.Errorf("rule: '%s', %s", rule.Name, err.Error())
}
rego.RegisterBuiltin2(cosignVerifySignatureDeclaration, cosignVerifySignatureDefinition)
rego.RegisterBuiltin1(cosignHasSignatureDeclaration, cosignHasSignatureDefinition)
modules[rule.Name] = getRuleData(rule)
compiled, err := ast.CompileModules(modules)
opap.opaRegisterOnce.Do(func() {
// register signature verification methods for the OPA ast engine (since these are package level symbols, we do it only once)
rego.RegisterBuiltin2(cosignVerifySignatureDeclaration, cosignVerifySignatureDefinition)
rego.RegisterBuiltin1(cosignHasSignatureDeclaration, cosignHasSignatureDefinition)
})
modules[rule.Name] = getRuleData(rule)
// NOTE: OPA module compilation is the most resource-intensive operation.
compiled, err := ast.CompileModules(modules)
if err != nil {
return nil, fmt.Errorf("in 'runRegoOnSingleRule', failed to compile rule, name: %s, reason: %s", rule.Name, err.Error())
return nil, fmt.Errorf("in 'runRegoOnK8s', failed to compile rule, name: %s, reason: %w", rule.Name, err)
}
store, err := ruleRegoDependenciesData.TOStorage()
@@ -273,17 +385,15 @@ func (opap *OPAProcessor) runRegoOnK8s(ctx context.Context, rule *reporthandling
}
// Eval
results, err := opap.regoEval(k8sObjects, compiled, &store)
results, err := opap.regoEval(ctx, k8sObjects, compiled, &store)
if err != nil {
logger.L().Ctx(ctx).Error(err.Error())
logger.L().Ctx(ctx).Warning(err.Error())
}
return results, nil
}
func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRego *ast.Compiler, store *storage.Store) ([]reporthandling.RuleResponse, error) {
// opap.regoDependenciesData.PostureControlInputs
func (opap *OPAProcessor) regoEval(ctx context.Context, inputObj []map[string]interface{}, compiledRego *ast.Compiler, store *storage.Store) ([]reporthandling.RuleResponse, error) {
rego := rego.New(
rego.Query("data.armo_builtins"), // get package name from rule
rego.Compiler(compiledRego),
@@ -292,7 +402,7 @@ func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRe
)
// Run evaluation
resultSet, err := rego.Eval(context.Background())
resultSet, err := rego.Eval(ctx)
if err != nil {
return nil, err
}
@@ -305,23 +415,49 @@ func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRe
}
func (opap *OPAProcessor) enumerateData(ctx context.Context, rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}) ([]map[string]interface{}, error) {
if ruleEnumeratorData(rule) == "" {
return k8sObjects, nil
}
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs)
dataControlInputs := map[string]string{"cloudProvider": opap.OPASessionObj.Report.ClusterCloudProvider}
RuleRegoDependenciesData := resources.RegoDependenciesData{DataControlInputs: dataControlInputs,
PostureControlInputs: postureControlInputs}
ruleResponse, err := opap.runOPAOnSingleRule(ctx, rule, k8sObjects, ruleEnumeratorData, RuleRegoDependenciesData)
ruleRegoDependenciesData := opap.makeRegoDeps(rule.ConfigInputs, nil)
ruleResponse, err := opap.runOPAOnSingleRule(ctx, rule, k8sObjects, ruleEnumeratorData, ruleRegoDependenciesData)
if err != nil {
return nil, err
}
failedResources := []map[string]interface{}{}
failedResources := make([]map[string]interface{}, 0, len(ruleResponse))
for _, ruleResponse := range ruleResponse {
failedResources = append(failedResources, ruleResponse.GetFailedResources()...)
}
return failedResources, nil
}
// makeRegoDeps builds a resources.RegoDependenciesData struct for the current cloud provider.
//
// If some extra fixedControlInputs are provided, they are merged into the "posture" control inputs.
func (opap *OPAProcessor) makeRegoDeps(configInputs []string, fixedControlInputs map[string][]string) resources.RegoDependenciesData {
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(configInputs) // get store
// merge configurable control input and fixed control input
for k, v := range fixedControlInputs {
postureControlInputs[k] = v
}
dataControlInputs := map[string]string{
"cloudProvider": opap.OPASessionObj.Report.ClusterCloudProvider,
}
return resources.RegoDependenciesData{
DataControlInputs: dataControlInputs,
PostureControlInputs: postureControlInputs,
}
}
func max(a, b int) int {
if a > b {
return a
}
return b
}

View File

@@ -1,23 +1,176 @@
package opaprocessor
import (
"archive/zip"
"context"
_ "embed"
"encoding/json"
"fmt"
"io"
"os"
"runtime"
"testing"
"time"
"github.com/armosec/armoapi-go/armotypes"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/mocks"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
"github.com/kubescape/opa-utils/resources"
"github.com/stretchr/testify/assert"
"github.com/kubescape/k8s-interface/workloadinterface"
// _ "k8s.io/client-go/plugin/pkg/client/auth"
)
func NewOPAProcessorMock() *OPAProcessor {
return &OPAProcessor{}
var (
//go:embed testdata/opaSessionObjMock.json
opaSessionObjMockData string
//go:embed testdata/opaSessionObjMock1.json
opaSessionObjMockData1 string
//go:embed testdata/regoDependenciesDataMock.json
regoDependenciesData string
allResourcesMockData []byte
//go:embed testdata/resourcesMock1.json
resourcesMock1 []byte
)
func unzipAllResourcesTestDataAndSetVar(zipFilePath, destFilePath string) error {
archive, err := zip.OpenReader(zipFilePath)
if err != nil {
return err
}
os.RemoveAll(destFilePath)
f := archive.File[0]
if err != nil {
return err
}
dstFile, err := os.OpenFile(destFilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
fileInArchive, err := f.Open()
if err != nil {
return err
}
_, err = io.Copy(dstFile, fileInArchive) //nolint:gosec
dstFile.Close()
fileInArchive.Close()
archive.Close()
file, err := os.Open(destFilePath)
if err != nil {
panic(err)
}
allResourcesMockData, err = io.ReadAll(file)
if err != nil {
panic(err)
}
file.Close()
return nil
}
func NewOPAProcessorMock(opaSessionObjMock string, resourcesMock []byte) *OPAProcessor {
opap := &OPAProcessor{}
if err := json.Unmarshal([]byte(regoDependenciesData), &opap.regoDependenciesData); err != nil {
panic(err)
}
// no err check because Unmarshal will fail on AllResources field (expected)
json.Unmarshal([]byte(opaSessionObjMock), &opap.OPASessionObj)
opap.AllResources = make(map[string]workloadinterface.IMetadata)
allResources := make(map[string]map[string]interface{})
if err := json.Unmarshal(resourcesMock, &allResources); err != nil {
panic(err)
}
for i := range allResources {
opap.AllResources[i] = workloadinterface.NewWorkloadObj(allResources[i])
}
return opap
}
func monitorHeapSpace(maxHeap *uint64, quitChan chan bool) {
for {
select {
case <-quitChan:
return
default:
var m runtime.MemStats
runtime.ReadMemStats(&m)
heapSpace := m.HeapAlloc
if heapSpace > *maxHeap {
*maxHeap = heapSpace
}
time.Sleep(100 * time.Millisecond)
}
}
}
/*
goarch: arm64
pkg: github.com/kubescape/kubescape/v2/core/pkg/opaprocessor
BenchmarkProcess/opaprocessor.Process_1-8 1 29714096083 ns/op 22309913416 B/op 498183685 allocs/op
--- BENCH: BenchmarkProcess/opaprocessor.Process_1-8
processorhandler_test.go:178: opaprocessor.Process_1_max_heap_space_gb: 2.85
processorhandler_test.go:179: opaprocessor.Process_1_execution_time_sec: 29.714054
BenchmarkProcess/opaprocessor.Process_4-8 1 25574892875 ns/op 22037035032 B/op 498167263 allocs/op
--- BENCH: BenchmarkProcess/opaprocessor.Process_4-8
processorhandler_test.go:178: opaprocessor.Process_4_max_heap_space_gb: 6.76
processorhandler_test.go:179: opaprocessor.Process_4_execution_time_sec: 25.574884
BenchmarkProcess/opaprocessor.Process_8-8 1 16534461291 ns/op 22308814384 B/op 498167171 allocs/op
--- BENCH: BenchmarkProcess/opaprocessor.Process_8-8
processorhandler_test.go:178: opaprocessor.Process_8_max_heap_space_gb: 9.47
processorhandler_test.go:179: opaprocessor.Process_8_execution_time_sec: 16.534455
BenchmarkProcess/opaprocessor.Process_16-8 1 18924050500 ns/op 22179562272 B/op 498167367 allocs/op
--- BENCH: BenchmarkProcess/opaprocessor.Process_16-8
processorhandler_test.go:178: opaprocessor.Process_16_max_heap_space_gb: 11.69
processorhandler_test.go:179: opaprocessor.Process_16_execution_time_sec: 16.321493
*/
func BenchmarkProcess(b *testing.B) {
b.SetParallelism(1)
// since all resources JSON is a large file, we need to unzip it and set the variable before running the benchmark
unzipAllResourcesTestDataAndSetVar("testdata/allResourcesMock.json.zip", "testdata/allResourcesMock.json")
maxGoRoutinesArr := []int{1, 4, 8, 16}
for _, maxGoRoutines := range maxGoRoutinesArr {
testName := fmt.Sprintf("opaprocessor.Process_%d", maxGoRoutines)
b.Run(testName, func(b *testing.B) {
// setup
opap := NewOPAProcessorMock(opaSessionObjMockData, allResourcesMockData)
b.ResetTimer()
var maxHeap uint64
quitChan := make(chan bool)
go monitorHeapSpace(&maxHeap, quitChan)
// test
opap.Process(context.Background(), opap.OPASessionObj.AllPolicies, nil, maxGoRoutines)
// teardown
quitChan <- true
b.Log(fmt.Sprintf("%s_max_heap_space_gb: %.2f", testName, float64(maxHeap)/(1024*1024*1024)))
b.Log(fmt.Sprintf("%s_execution_time_sec: %f", testName, b.Elapsed().Seconds()))
})
}
}
func TestProcessResourcesResult(t *testing.T) {
// set k8s
@@ -40,22 +193,22 @@ func TestProcessResourcesResult(t *testing.T) {
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock())
opap.AllPolicies = policies
opap.Process(context.TODO(), policies, nil)
opap.Process(context.TODO(), policies, nil, 1)
assert.Equal(t, 1, len(opaSessionObj.ResourcesResult))
res := opaSessionObj.ResourcesResult[deployment.GetID()]
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
assert.Equal(t, 2, res.ListControlsIDs(nil).Len())
assert.Equal(t, 1, res.ListControlsIDs(nil).Failed())
assert.Equal(t, 1, res.ListControlsIDs(nil).Passed())
assert.True(t, res.GetStatus(nil).IsFailed())
assert.False(t, res.GetStatus(nil).IsPassed())
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
opap.updateResults(context.TODO())
res = opaSessionObj.ResourcesResult[deployment.GetID()]
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
assert.Equal(t, 2, res.ListControlsIDs(nil).Len())
assert.Equal(t, 1, res.ListControlsIDs(nil).Failed())
assert.Equal(t, 1, res.ListControlsIDs(nil).Passed())
assert.True(t, res.GetStatus(nil).IsFailed())
assert.False(t, res.GetStatus(nil).IsPassed())
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
@@ -68,32 +221,114 @@ func TestProcessResourcesResult(t *testing.T) {
assert.Equal(t, 0, summaryDetails.NumberOfResources().Skipped())
// test resource listing
assert.Equal(t, 1, summaryDetails.ListResourcesIDs().All().Len())
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Skipped()))
assert.Equal(t, 1, summaryDetails.ListResourcesIDs(nil).Len())
assert.Equal(t, 1, summaryDetails.ListResourcesIDs(nil).Failed())
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Passed())
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Skipped())
// test control listing
assert.Equal(t, res.ListControlsIDs(nil).All().Len(), summaryDetails.NumberOfControls().All())
assert.Equal(t, len(res.ListControlsIDs(nil).Passed()), summaryDetails.NumberOfControls().Passed())
assert.Equal(t, len(res.ListControlsIDs(nil).Skipped()), summaryDetails.NumberOfControls().Skipped())
assert.Equal(t, len(res.ListControlsIDs(nil).Failed()), summaryDetails.NumberOfControls().Failed())
assert.Equal(t, res.ListControlsIDs(nil).Len(), summaryDetails.NumberOfControls().All())
assert.Equal(t, res.ListControlsIDs(nil).Passed(), summaryDetails.NumberOfControls().Passed())
assert.Equal(t, res.ListControlsIDs(nil).Skipped(), summaryDetails.NumberOfControls().Skipped())
assert.Equal(t, res.ListControlsIDs(nil).Failed(), summaryDetails.NumberOfControls().Failed())
assert.True(t, summaryDetails.GetStatus().IsFailed())
opaSessionObj.Exceptions = []armotypes.PostureExceptionPolicy{*mocks.MockExceptionAllKinds(&armotypes.PosturePolicy{FrameworkName: frameworks[0].Name})}
opap.updateResults(context.TODO())
res = opaSessionObj.ResourcesResult[deployment.GetID()]
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
assert.Equal(t, 2, len(res.ListControlsIDs(nil).Passed()))
assert.Equal(t, 2, res.ListControlsIDs(nil).Len())
assert.Equal(t, 2, res.ListControlsIDs(nil).Passed())
assert.True(t, res.GetStatus(nil).IsPassed())
assert.False(t, res.GetStatus(nil).IsFailed())
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
// test resource listing
summaryDetails = opaSessionObj.Report.SummaryDetails
assert.Equal(t, 1, summaryDetails.ListResourcesIDs().All().Len())
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Skipped()))
assert.Equal(t, 1, summaryDetails.ListResourcesIDs(nil).Len())
assert.Equal(t, 1, summaryDetails.ListResourcesIDs(nil).Failed())
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Passed())
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Skipped())
}
// don't parallelize this test because it uses a global variable - allResourcesMockData
func TestProcessRule(t *testing.T) {
testCases := []struct {
name string
rule reporthandling.PolicyRule
resourcesMock []byte
opaSessionObjMock string
expectedResult map[string]*resourcesresults.ResourceAssociatedRule
}{
{
name: "TestRelatedResourcesIDs",
rule: reporthandling.PolicyRule{
PortalBase: armotypes.PortalBase{
Name: "exposure-to-internet",
Attributes: map[string]interface{}{
"armoBuiltin": true,
},
},
Rule: "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": ingress,\n \"failedPaths\": result,\n }]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"ingress.spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n",
Match: []reporthandling.RuleMatchObjects{
{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"Pod", "Service"},
},
{
APIGroups: []string{"apps"},
APIVersions: []string{"v1"},
Resources: []string{"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet"},
},
{
APIGroups: []string{"batch"},
APIVersions: []string{"*"},
Resources: []string{"Job", "CronJob"},
},
{
APIGroups: []string{"extensions", "networking.k8s.io"},
APIVersions: []string{"v1beta1", "v1"},
Resources: []string{"Ingress"},
},
},
Description: "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.",
Remediation: "",
RuleQuery: "armo_builtins",
RuleLanguage: reporthandling.RegoLanguage,
},
resourcesMock: resourcesMock1,
opaSessionObjMock: opaSessionObjMockData1,
expectedResult: map[string]*resourcesresults.ResourceAssociatedRule{
"/v1/default/Pod/fake-pod-1-22gck": {
Name: "exposure-to-internet",
ControlConfigurations: map[string][]string{},
Status: "failed",
SubStatus: "",
Paths: nil,
Exception: nil,
RelatedResourcesIDs: []string{
"/v1/default/Service/fake-service-1",
},
},
"/v1/default/Service/fake-service-1": {
Name: "exposure-to-internet",
ControlConfigurations: map[string][]string{},
Status: "passed",
SubStatus: "",
Paths: nil,
Exception: nil,
RelatedResourcesIDs: nil,
},
},
},
}
for _, tc := range testCases {
// since all resources JSON is a large file, we need to unzip it and set the variable before running the benchmark
unzipAllResourcesTestDataAndSetVar("testdata/allResourcesMock.json.zip", "testdata/allResourcesMock.json")
opap := NewOPAProcessorMock(tc.opaSessionObjMock, tc.resourcesMock)
resources, _, err := opap.processRule(context.Background(), &tc.rule, nil)
assert.NoError(t, err)
assert.Equal(t, tc.expectedResult, resources)
}
}

View File

@@ -23,7 +23,7 @@ import (
// - adds exceptions (and updates controls status)
// - summarizes results
func (opap *OPAProcessor) updateResults(ctx context.Context) {
ctx, span := otel.Tracer("").Start(ctx, "OPAProcessor.updateResults")
_, span := otel.Tracer("").Start(ctx, "OPAProcessor.updateResults")
defer span.End()
// remove data from all objects
@@ -150,10 +150,9 @@ func filterOutChildResources(objects []workloadinterface.IMetadata, match []repo
response := []workloadinterface.IMetadata{}
owners := []string{}
for m := range match {
for i := range match[m].Resources {
owners = append(owners, match[m].Resources[i])
}
owners = append(owners, match[m].Resources...)
}
for i := range objects {
if !k8sinterface.IsTypeWorkload(objects[i].GetObject()) {
response = append(response, objects[i])
@@ -167,8 +166,10 @@ func filterOutChildResources(objects []workloadinterface.IMetadata, match []repo
response = append(response, w)
}
}
return response
}
func getRuleDependencies(ctx context.Context) (map[string]string, error) {
modules := resources.LoadRegoModules()
if len(modules) == 0 {

Binary file not shown.

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
{"clusterName":"kwok-kwok-cluster","postureControlInputs":{"cpu_limit_max":[],"cpu_limit_min":[],"cpu_request_max":[],"cpu_request_min":[],"imageRepositoryAllowList":[],"insecureCapabilities":["SETPCAP","NET_ADMIN","NET_RAW","SYS_MODULE","SYS_RAWIO","SYS_PTRACE","SYS_ADMIN","SYS_BOOT","MAC_OVERRIDE","MAC_ADMIN","PERFMON","ALL","BPF"],"k8sRecommendedLabels":["app.kubernetes.io/name","app.kubernetes.io/instance","app.kubernetes.io/version","app.kubernetes.io/component","app.kubernetes.io/part-of","app.kubernetes.io/managed-by","app.kubernetes.io/created-by"],"listOfDangerousArtifacts":["bin/bash","sbin/sh","bin/ksh","bin/tcsh","bin/zsh","usr/bin/scsh","bin/csh","bin/busybox","usr/bin/busybox"],"max_critical_vulnerabilities":["5"],"max_high_vulnerabilities":["10"],"memory_limit_max":[],"memory_limit_min":[],"memory_request_max":[],"memory_request_min":[],"publicRegistries":[],"recommendedLabels":["app","tier","phase","version","owner","env"],"sensitiveInterfaces":["nifi","argo-server","weave-scope-app","kubeflow","kubernetes-dashboard","jenkins","prometheus-deployment"],"sensitiveKeyNames":["aws_access_key_id","aws_secret_access_key","azure_batchai_storage_account","azure_batchai_storage_key","azure_batch_account","azure_batch_key","secret","key","password","pwd","token","jwt","bearer","credential"],"sensitiveValues":["BEGIN \\w+ PRIVATE KEY","PRIVATE KEY","eyJhbGciO","JWT","Bearer","_key_","_secret_"],"sensitiveValuesAllowed":[],"servicesNames":["nifi-service","argo-server","minio","postgres","workflow-controller-metrics","weave-scope-app","kubernetes-dashboard"],"trustedCosignPublicKeys":[],"untrustedRegistries":[],"wlKnownNames":["coredns","kube-proxy","event-exporter-gke","kube-dns","17-default-backend","metrics-server","ca-audit","ca-dashboard-aggregator","ca-notification-server","ca-ocimage","ca-oracle","ca-posture","ca-rbac","ca-vuln-scan","ca-webhook","ca-websocket","clair-clair"]},"dataControlInputs":null,"k8sconfig":{"token":"","ip":"","host":"https://127.0.0.1:32766","port":"","crtfile":"","clientcrtfile":"/Users/amirmalka/.kwok/clusters/kwok-cluster/pki/admin.crt","clientkeyfile":"/Users/amirmalka/.kwok/clusters/kwok-cluster/pki/admin.key"}}

View File

@@ -0,0 +1,220 @@
{
"/v1/default/Pod/fake-pod-1-22gck": {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"name\":\"fake-pod-1-22gck\",\"namespace\":\"default\"},\"spec\":{\"containers\":[{\"image\":\"redis\",\"name\":\"fake-pod-1-22gck\",\"volumeMounts\":[{\"mountPath\":\"/etc/foo\",\"name\":\"foo\",\"readOnly\":true}]}],\"volumes\":[{\"name\":\"foo\",\"secret\":{\"optional\":true,\"secretName\":\"mysecret\"}}]}}\n"
},
"creationTimestamp": "2023-06-22T07:47:38Z",
"name": "fake-pod-1-22gck",
"namespace": "default",
"resourceVersion": "1087189",
"uid": "046753fa-c7b6-46dd-ae18-dd68b8b20cd3",
"labels": {"app": "argo-server"}
},
"spec": {
"containers": [
{
"image": "redis",
"imagePullPolicy": "Always",
"name": "fake-pod-1-22gck",
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/etc/foo",
"name": "foo",
"readOnly": true
},
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "kube-api-access-lrpxm",
"readOnly": true
}
]
}
],
"dnsPolicy": "ClusterFirst",
"enableServiceLinks": true,
"nodeName": "minikube-yiscah",
"preemptionPolicy": "PreemptLowerPriority",
"priority": 0,
"restartPolicy": "Always",
"schedulerName": "default-scheduler",
"securityContext": {},
"serviceAccount": "default",
"serviceAccountName": "default",
"terminationGracePeriodSeconds": 30,
"tolerations": [
{
"effect": "NoExecute",
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"tolerationSeconds": 300
},
{
"effect": "NoExecute",
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"tolerationSeconds": 300
}
],
"volumes": [
{
"name": "foo",
"secret": {
"defaultMode": 420,
"optional": true,
"secretName": "mysecret"
}
},
{
"name": "kube-api-access-lrpxm",
"projected": {
"defaultMode": 420,
"sources": [
{
"serviceAccountToken": {
"expirationSeconds": 3607,
"path": "token"
}
},
{
"configMap": {
"items": [
{
"key": "ca.crt",
"path": "ca.crt"
}
],
"name": "kube-root-ca.crt"
}
},
{
"downwardAPI": {
"items": [
{
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "metadata.namespace"
},
"path": "namespace"
}
]
}
}
]
}
}
]
},
"status": {
"conditions": [
{
"lastProbeTime": null,
"lastTransitionTime": "2023-06-22T07:47:38Z",
"status": "True",
"type": "Initialized"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2023-07-18T05:07:57Z",
"status": "True",
"type": "Ready"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2023-07-18T05:07:57Z",
"status": "True",
"type": "ContainersReady"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2023-06-22T07:47:38Z",
"status": "True",
"type": "PodScheduled"
}
],
"containerStatuses": [
{
"containerID": "docker://a3a1aac00031c6ab85f75cfa17d14ebd71ab15f1fc5c82a262449621a77d7a7e",
"image": "redis:latest",
"imageID": "docker-pullable://redis@sha256:08a82d4bf8a8b4dd94e8f5408cdbad9dd184c1cf311d34176cd3e9972c43f872",
"lastState": {
"terminated": {
"containerID": "docker://1ae623f4faf8cda5dabdc65c342752dfdf1675cb173b46875596c2eb0dae472f",
"exitCode": 255,
"finishedAt": "2023-07-18T05:03:55Z",
"reason": "Error",
"startedAt": "2023-07-17T16:32:35Z"
}
},
"name": "fake-pod-1-22gck",
"ready": true,
"restartCount": 9,
"started": true,
"state": {
"running": {
"startedAt": "2023-07-18T05:07:56Z"
}
}
}
],
"hostIP": "192.168.85.2",
"phase": "Running",
"podIP": "10.244.1.131",
"podIPs": [
{
"ip": "10.244.1.131"
}
],
"qosClass": "BestEffort",
"startTime": "2023-06-22T07:47:38Z"
}
},
"/v1/default/Service/fake-service-1": {
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"name\":\"fake-service-1\",\"namespace\":\"default\"},\"spec\":{\"clusterIP\":\"10.96.0.11\",\"ports\":[{\"port\":80,\"protocol\":\"TCP\",\"targetPort\":9376}],\"selector\":{\"app\":\"argo-server\"},\"type\":\"LoadBalancer\"},\"status\":{\"loadBalancer\":{\"ingress\":[{\"ip\":\"192.0.2.127\"}]}}}\n"
},
"creationTimestamp": "2023-07-09T06:22:27Z",
"name": "fake-service-1",
"namespace": "default",
"resourceVersion": "981856",
"uid": "dd629eb1-6779-4298-a70f-0bdbd046d409"
},
"spec": {
"allocateLoadBalancerNodePorts": true,
"clusterIP": "10.96.0.11",
"clusterIPs": [
"10.96.0.11"
],
"externalTrafficPolicy": "Cluster",
"internalTrafficPolicy": "Cluster",
"ipFamilies": [
"IPv4"
],
"ipFamilyPolicy": "SingleStack",
"ports": [
{
"nodePort": 30706,
"port": 80,
"protocol": "TCP",
"targetPort": 9376
}
],
"selector": {
"app": "argo-server"
},
"sessionAffinity": "None",
"type": "LoadBalancer"
},
"status": {
"loadBalancer": {}
}
}
}

File diff suppressed because one or more lines are too long

View File

@@ -7,6 +7,7 @@ import (
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/apis"
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/rego"
@@ -38,6 +39,12 @@ func ConvertFrameworksToSummaryDetails(summaryDetails *reportsummary.SummaryDeta
Description: frameworks[i].Controls[j].Description,
Remediation: frameworks[i].Controls[j].Remediation,
}
if frameworks[i].Controls[j].GetActionRequiredAttribute() == string(apis.SubStatusManualReview) {
c.Status = apis.StatusSkipped
c.StatusInfo.InnerStatus = apis.StatusSkipped
c.StatusInfo.SubStatus = apis.SubStatusManualReview
c.StatusInfo.InnerInfo = string(apis.SubStatusManualReviewInfo)
}
controls[frameworks[i].Controls[j].ControlID] = c
summaryDetails.Controls[id] = c
}

View File

@@ -0,0 +1,73 @@
package policyhandler
import (
"sync"
"time"
)
// TimedCache provides functionality for managing a timed cache.
// The timed cache holds a value for a specified time duration (TTL).
// After the TTL has passed, the value is invalidated.
//
// The cache is thread safe.
type TimedCache[T any] struct {
value T
isSet bool
ttl time.Duration
expiration int64
mutex sync.RWMutex
}
func NewTimedCache[T any](ttl time.Duration) *TimedCache[T] {
cache := &TimedCache[T]{
ttl: ttl,
isSet: false,
}
// start the invalidate task only when the ttl is greater than 0 (cache is enabled)
if ttl > 0 {
go cache.invalidateTask()
}
return cache
}
func (c *TimedCache[T]) Set(value T) {
c.mutex.Lock()
defer c.mutex.Unlock()
// cache is disabled
if c.ttl == 0 {
return
}
c.isSet = true
c.value = value
c.expiration = time.Now().Add(c.ttl).UnixNano()
}
func (c *TimedCache[T]) Get() (T, bool) {
c.mutex.RLock()
defer c.mutex.RUnlock()
if !c.isSet || time.Now().UnixNano() > c.expiration {
return c.value, false
}
return c.value, true
}
func (c *TimedCache[T]) invalidateTask() {
for {
<-time.After(c.ttl)
if time.Now().UnixNano() > c.expiration {
c.Invalidate()
}
}
}
func (c *TimedCache[T]) Invalidate() {
c.mutex.Lock()
defer c.mutex.Unlock()
c.isSet = false
}

View File

@@ -0,0 +1,75 @@
package policyhandler
import (
"testing"
"time"
)
func TestTimedCache(t *testing.T) {
tests := []struct {
name string
// value ttl
ttl time.Duration
// value to set
value int
// time to wait before checking if value exists
wait time.Duration
// number of times to check if value exists (with wait in between)
checks int
// should the value exist in cache
exists bool
// expected cache value
wantVal int
}{
{
name: "value exists before ttl",
ttl: time.Second * 5,
value: 42,
wait: time.Second * 1,
checks: 2,
exists: true,
wantVal: 42,
},
{
name: "value does not exist after ttl",
ttl: time.Second * 3,
value: 55,
wait: time.Second * 4,
checks: 1,
exists: false,
},
{
name: "cache is disabled (ttl = 0) always returns false",
ttl: 0,
value: 55,
wait: 0,
checks: 1,
exists: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cache := NewTimedCache[int](tt.ttl)
cache.Set(tt.value)
for i := 0; i < tt.checks; i++ {
// Wait for the specified duration
time.Sleep(tt.wait)
// Get the value from the cache
value, exists := cache.Get()
// Check if value exists
if exists != tt.exists {
t.Errorf("Expected exists to be %v, got %v", tt.exists, exists)
}
// Check value
if exists && value != tt.wantVal {
t.Errorf("Expected value to be %d, got %d", tt.wantVal, value)
}
}
})
}
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"strings"
"github.com/armosec/armoapi-go/armotypes"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/kubescape/v2/core/cautils"
@@ -14,7 +15,55 @@ import (
"go.opentelemetry.io/otel"
)
func (policyHandler *PolicyHandler) getPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier, policiesAndResources *cautils.OPASessionObj) error {
const (
PoliciesCacheTtlEnvVar = "POLICIES_CACHE_TTL"
)
var policyHandlerInstance *PolicyHandler
// PolicyHandler
type PolicyHandler struct {
getters *cautils.Getters
cachedPolicyIdentifiers *TimedCache[[]string]
cachedFrameworks *TimedCache[[]reporthandling.Framework]
cachedExceptions *TimedCache[[]armotypes.PostureExceptionPolicy]
cachedControlInputs *TimedCache[map[string][]string]
}
// NewPolicyHandler creates and returns an instance of the `PolicyHandler`. The function initializes the `PolicyHandler` only if it hasn't been previously created.
// The PolicyHandler supports caching of downloaded policies and exceptions by setting the `POLICIES_CACHE_TTL` environment variable (default is no caching).
func NewPolicyHandler() *PolicyHandler {
if policyHandlerInstance == nil {
cacheTtl := getPoliciesCacheTtl()
policyHandlerInstance = &PolicyHandler{
cachedPolicyIdentifiers: NewTimedCache[[]string](cacheTtl),
cachedFrameworks: NewTimedCache[[]reporthandling.Framework](cacheTtl),
cachedExceptions: NewTimedCache[[]armotypes.PostureExceptionPolicy](cacheTtl),
cachedControlInputs: NewTimedCache[map[string][]string](cacheTtl),
}
}
return policyHandlerInstance
}
func (policyHandler *PolicyHandler) CollectPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
opaSessionObj := cautils.NewOPASessionObj(ctx, nil, nil, scanInfo)
policyHandler.getters = &scanInfo.Getters
// get policies, exceptions and controls inputs
policies, exceptions, controlInputs, err := policyHandler.getPolicies(ctx, policyIdentifier)
if err != nil {
return opaSessionObj, err
}
opaSessionObj.Policies = policies
opaSessionObj.Exceptions = exceptions
opaSessionObj.RegoInputData.PostureControlInputs = controlInputs
return opaSessionObj, nil
}
func (policyHandler *PolicyHandler) getPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) (policies []reporthandling.Framework, exceptions []armotypes.PostureExceptionPolicy, controlInputs map[string][]string, err error) {
ctx, span := otel.Tracer("").Start(ctx, "policyHandler.getPolicies")
defer span.End()
logger.L().Info("Downloading/Loading policy definitions")
@@ -22,38 +71,57 @@ func (policyHandler *PolicyHandler) getPolicies(ctx context.Context, policyIdent
cautils.StartSpinner()
defer cautils.StopSpinner()
policies, err := policyHandler.getScanPolicies(ctx, policyIdentifier)
// get policies
policies, err = policyHandler.getScanPolicies(ctx, policyIdentifier)
if err != nil {
return err
return nil, nil, nil, err
}
if len(policies) == 0 {
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(policyIdentifier), ", "))
return nil, nil, nil, fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(policyIdentifier), ", "))
}
policiesAndResources.Policies = policies
// get exceptions
exceptionPolicies, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)
if err == nil {
policiesAndResources.Exceptions = exceptionPolicies
} else {
logger.L().Ctx(ctx).Error("failed to load exceptions", helpers.Error(err))
if exceptions, err = policyHandler.getExceptions(); err != nil {
logger.L().Ctx(ctx).Warning("failed to load exceptions", helpers.Error(err))
}
// get account configuration
controlsInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.ClusterName)
if err == nil {
policiesAndResources.RegoInputData.PostureControlInputs = controlsInputs
} else {
logger.L().Ctx(ctx).Error(err.Error())
if controlInputs, err = policyHandler.getControlInputs(); err != nil {
logger.L().Ctx(ctx).Warning(err.Error())
}
cautils.StopSpinner()
cautils.StopSpinner()
logger.L().Success("Downloaded/Loaded policy")
return nil
return policies, exceptions, controlInputs, nil
}
// getScanPolicies - get policies from cache or downloads them. The function returns an error if the policies could not be downloaded.
func (policyHandler *PolicyHandler) getScanPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
policyIdentifiersSlice := policyIdentifierToSlice(policyIdentifier)
// check if policies are cached
if cachedPolicies, policiesExist := policyHandler.cachedFrameworks.Get(); policiesExist {
// check if the cached policies are the same as the requested policies, otherwise download the policies
if cachedIdentifiers, identifiersExist := policyHandler.cachedPolicyIdentifiers.Get(); identifiersExist && cautils.StringSlicesAreEqual(cachedIdentifiers, policyIdentifiersSlice) {
logger.L().Info("Using cached policies")
return cachedPolicies, nil
}
logger.L().Debug("Cached policies are not the same as the requested policies")
policyHandler.cachedPolicyIdentifiers.Invalidate()
policyHandler.cachedFrameworks.Invalidate()
}
policies, err := policyHandler.downloadScanPolicies(ctx, policyIdentifier)
if err == nil {
policyHandler.cachedFrameworks.Set(policies)
policyHandler.cachedPolicyIdentifiers.Set(policyIdentifiersSlice)
}
return policies, err
}
func (policyHandler *PolicyHandler) downloadScanPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
frameworks := []reporthandling.Framework{}
switch getScanKind(policyIdentifier) {
@@ -102,10 +170,30 @@ func (policyHandler *PolicyHandler) getScanPolicies(ctx context.Context, policyI
return frameworks, nil
}
func policyIdentifierToSlice(rules []cautils.PolicyIdentifier) []string {
s := []string{}
for i := range rules {
s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Identifier))
func (policyHandler *PolicyHandler) getExceptions() ([]armotypes.PostureExceptionPolicy, error) {
if cachedExceptions, exist := policyHandler.cachedExceptions.Get(); exist {
logger.L().Info("Using cached exceptions")
return cachedExceptions, nil
}
return s
exceptions, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)
if err == nil {
policyHandler.cachedExceptions.Set(exceptions)
}
return exceptions, err
}
func (policyHandler *PolicyHandler) getControlInputs() (map[string][]string, error) {
if cachedControlInputs, exist := policyHandler.cachedControlInputs.Get(); exist {
logger.L().Info("Using cached control inputs")
return cachedControlInputs, nil
}
controlInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.ClusterName)
if err == nil {
policyHandler.cachedControlInputs.Set(controlInputs)
}
return controlInputs, err
}

View File

@@ -3,6 +3,7 @@ package policyhandler
import (
"fmt"
"strings"
"time"
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
"github.com/kubescape/opa-utils/reporthandling"
@@ -35,3 +36,20 @@ func validateFramework(framework *reporthandling.Framework) error {
}
return nil
}
// getPoliciesCacheTtl - get policies cache TTL from environment variable or return 0 if not set
func getPoliciesCacheTtl() time.Duration {
if val, err := cautils.ParseIntEnvVar(PoliciesCacheTtlEnvVar, 0); err == nil {
return time.Duration(val) * time.Minute
}
return 0
}
func policyIdentifierToSlice(rules []cautils.PolicyIdentifier) []string {
s := []string{}
for i := range rules {
s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Identifier))
}
return s
}

Some files were not shown because too many files have changed in this diff Show More