Compare commits

..

186 Commits

Author SHA1 Message Date
Rotem Refael
42929dac58 Merge pull request #171 from armosec/dev
Fix workflow for building image
2021-10-18 11:59:29 +03:00
Rotem Refael
74449c64a2 Merge pull request #170 from Daniel-GrunbergerCA/master
Fix env var name
2021-10-18 11:26:18 +03:00
Daniel-GrunbergerCA
0bd164c69e fix workflow for dev 2021-10-18 11:15:55 +03:00
Daniel-GrunbergerCA
d44c082134 fix env var name 2021-10-18 11:11:32 +03:00
Daniel-GrunbergerCA
d756b9bfe4 check repo name 2021-10-18 11:04:57 +03:00
Daniel-GrunbergerCA
6144050212 echo repo 2021-10-18 11:02:54 +03:00
Daniel-GrunbergerCA
b5fe456b0d fix test 2021-10-18 10:59:41 +03:00
Daniel-GrunbergerCA
37791ff391 testing workflow output 2021-10-18 10:57:04 +03:00
Daniel-GrunbergerCA
c2d99163a6 test output for workflow 2021-10-18 10:55:13 +03:00
Daniel-GrunbergerCA
d948353b99 test env var output for build 2021-10-18 10:51:50 +03:00
Daniel-GrunbergerCA
2649cb75f6 Revert "check env var output"
This reverts commit 7c8da4a4b9.
2021-10-18 10:51:28 +03:00
Daniel-GrunbergerCA
7c8da4a4b9 check env var output 2021-10-18 10:49:53 +03:00
Rotem Refael
b72f5d75f7 Merge pull request #167 from armosec/dev
Update resource count
2021-10-18 10:34:45 +03:00
Rotem Refael
947826a764 Merge pull request #168 from Daniel-GrunbergerCA/master
Build docker image only for original repo (not for forks)
2021-10-18 10:31:22 +03:00
David Wertenteil
906c69a86d Merge pull request #169 from armosec/rotemamsa-patch-1
Delete backendconnectormethods.go
2021-10-18 10:28:19 +03:00
Rotem Refael
9c65aadcc7 Delete backendconnectormethods.go 2021-10-18 10:26:59 +03:00
Daniel-GrunbergerCA
09879e00ba build image only for original repo 2021-10-18 10:14:42 +03:00
Daniel-GrunbergerCA
5cbb7d940d Revert "fix build again"
This reverts commit 050976d7a3.
2021-10-18 10:12:42 +03:00
Daniel-GrunbergerCA
050976d7a3 fix build again 2021-10-18 10:05:58 +03:00
Daniel-GrunbergerCA
6a96d9f8b5 fix build 2021-10-18 09:59:03 +03:00
Daniel-GrunbergerCA
8c55dfbcf6 fix workflow 2021-10-18 09:52:26 +03:00
Daniel-GrunbergerCA
78c5b49b5a create image only for original repo 2021-10-18 09:49:50 +03:00
dwertent
c33ca04a4f update dev build workflow 2021-10-18 09:06:14 +03:00
David Wertenteil
f12e66d315 Merge pull request #166 from dwertent/master
update package version
2021-10-18 09:02:25 +03:00
David Wertenteil
8e4bb36df8 Merge pull request #165 from AvnerTzurArmo/dev
Add Docker image builds
2021-10-18 09:00:27 +03:00
dwertent
034412f6fe update package version 2021-10-18 08:59:41 +03:00
Avner Tzur
88d83ba72d fix typo 2021-10-18 08:26:15 +03:00
Avner Tzur
829e7a33aa fix typo 2021-10-18 08:25:25 +03:00
Avner Tzur
1dcde1538e fix typo 2021-10-17 16:50:59 +03:00
Avner Tzur
b876cd0975 use armosec repo name 2021-10-17 16:49:01 +03:00
Avner Tzur
a06d11dc17 fix repo env usage 2021-10-17 16:38:00 +03:00
Avner Tzur
b1f5cd45c4 use repo as secret 2021-10-17 16:13:54 +03:00
Avner Tzur
2c44c0e1f0 fix Docker build and tags 2021-10-17 15:59:47 +03:00
Avner Tzur
37c429b264 add Docker container build 2021-10-17 15:55:06 +03:00
Avner Tzur
82994ce754 fix typo 2021-10-17 15:43:38 +03:00
Avner Tzur
621f64c363 fix image name 2021-10-17 15:25:50 +03:00
David Wertenteil
419a77f144 Merge pull request #164 from dwertent/master
using public packages
2021-10-14 17:36:37 +03:00
dwertent
f043358a59 merging to dev 2021-10-14 17:32:56 +03:00
dwertent
24b17a8c27 update counters 2021-10-14 17:12:10 +03:00
Rotem Refael
26a64b788b Merge pull request #163 from AvnerTzurArmo/dev
update Dockerfile using Python build script
2021-10-14 15:54:56 +03:00
Rotem Refael
89a05d247b Merge pull request #159 from armosec/dev
test help message after build
2021-10-14 15:43:28 +03:00
Avner Tzur
718d549bb3 update Dockerfile using Python build script 2021-10-14 15:23:35 +03:00
Bezbran
7faf24cf88 build python3 --version && python3 build.py 2021-10-14 14:48:47 +03:00
Bezbran
7bebc7a814 master PR python 3 for build 2021-10-14 14:48:16 +03:00
Bezbran
8647a087dd Merge pull request #162 from Bezbran/dev
python3 for build
2021-10-14 14:40:25 +03:00
Bezalel Brandwine
78670665c4 build.py support only pthon 3.7 and above 2021-10-14 14:39:43 +03:00
Bezalel Brandwine
1dd587cd83 try python3 2021-10-14 14:30:58 +03:00
Bezalel Brandwine
d69cccf821 print python version 2021-10-14 14:24:49 +03:00
Bezbran
8ae3b9c28f Merge pull request #161 from Bezbran/dev
adapt test_cli_prints to windows
2021-10-14 14:17:42 +03:00
Bezalel Brandwine
9b6ad102b1 adapt test_cli_prints to windows 2021-10-14 14:16:12 +03:00
Bezbran
e6787b77fb Merge pull request #160 from Bezbran/dev
files tests passed locally on windows
2021-10-14 14:10:35 +03:00
Bezalel Brandwine
46449045a6 files tests passed locally on windows 2021-10-14 14:04:55 +03:00
Bezalel Brandwine
d81984b4c6 try to adapt path to windows 2021-10-14 13:28:10 +03:00
Bezbran
22b463f306 Merge pull request #158 from Bezbran/dev
test help message after build
2021-10-14 12:13:32 +03:00
Bezalel Brandwine
475e45b848 add CLI prints checks skeleton 2021-10-14 12:07:51 +03:00
Rotem Refael
32fac97d21 Merge pull request #157 from armosec/dev
fix  --help flag
2021-10-14 11:47:22 +03:00
Bezalel Brandwine
f9f32a1062 comment in file loading tests 2021-10-14 11:43:11 +03:00
Bezbran
c90c3bbd05 test help message 2021-10-14 11:32:59 +03:00
dwertent
ea42d9a061 remove exceptions from pkg 2021-10-14 11:25:53 +03:00
Bezbran
cbae9a087b Merge pull request #6 from armosec/dev
Dev
2021-10-14 10:56:39 +03:00
Bezbran
4da199c43e Merge pull request #156 from Bezbran/dev
fix  --help flag
2021-10-14 10:55:43 +03:00
Bezalel Brandwine
00d8660a91 fix --help flag 2021-10-14 10:46:42 +03:00
Rotem Refael
76c2f6afe0 Merge pull request #155 from armosec/dev
Fixed #148
2021-10-14 10:20:35 +03:00
dwertent
efa53bd83c fixed loop 2021-10-14 09:13:07 +03:00
dwertent
01f6a1e1c0 fixed counters 2021-10-13 20:41:30 +03:00
dwertent
1d1344ebc1 fixed warning counter 2021-10-13 20:33:26 +03:00
Rotem Refael
9f78703dee Merge pull request #154 from armosec/dev
Support in --environment
2021-10-13 17:05:52 +03:00
Bezbran
1a0e96338e Merge pull request #5 from armosec/dev
Merge back pull request #153 from Bezbran/dev
2021-10-13 16:58:31 +03:00
Bezbran
eff4690e0e Merge pull request #153 from Bezbran/dev
add support in --environment=dev or customized URLs
2021-10-13 16:57:39 +03:00
Bezbran
266480c234 Merge branch 'dev' into dev 2021-10-13 16:57:14 +03:00
Bezalel Brandwine
a922d01005 add support in --environment=dev or customized URLs 2021-10-13 16:45:06 +03:00
dwertent
b053b84197 use packets 2021-10-13 15:56:43 +03:00
David Wertenteil
0d9711c8bb Merge pull request #151 from armosec/dev
Fixed issues
2021-10-13 13:02:52 +03:00
David Wertenteil
edab68f4fb Merge pull request #152 from Bezbran/dev
fix download framework command
2021-10-13 12:35:26 +03:00
Bezalel Brandwine
08f04e19ef fix download framework (create dir if necessary, lower case "download") 2021-10-13 12:24:21 +03:00
David Wertenteil
e62234a6ac Merge pull request #150 from dwertent/master
Fixed issues #149 #76
2021-10-13 12:07:12 +03:00
dwertent
5499c7a96f fixed #149 2021-10-13 12:03:57 +03:00
dwertent
7f9c5c25ae fixed #76 2021-10-13 11:58:22 +03:00
Bezbran
b1276d56f7 Merge pull request #4 from armosec/dev
Dev
2021-10-13 09:11:24 +03:00
Rotem Refael
b53bf320a6 Merge pull request #141 from armosec/dev
Update default upload of results to be opt-in
2021-10-12 18:31:16 +03:00
Daniel Grunberger
81a4c168ed Merge pull request #147 from Daniel-GrunbergerCA/master
fix warning numbers of resources
2021-10-12 18:13:51 +03:00
Daniel-GrunbergerCA
512a1a806e fix warning numbers of resources 2021-10-12 18:12:49 +03:00
David Wertenteil
c95ef05177 Merge pull request #146 from dwertent/master
update flag to keep-local
2021-10-12 18:06:02 +03:00
Daniel Grunberger
563bd8a6a3 Merge pull request #145 from Daniel-GrunbergerCA/master
fix number of resources
2021-10-12 17:41:48 +03:00
dwertent
b444542f4d update flag to keep-local 2021-10-12 17:40:56 +03:00
Daniel-GrunbergerCA
6eded41eee fix number of resources 2021-10-12 17:08:03 +03:00
YiscahLevySilas1
de91ce182d Merge pull request #144 from YiscahLevySilas1/dev
add controlID field, 'id' to be deprecated
2021-10-12 15:07:10 +03:00
dwertent
afc7f85460 adding demo link to readme 2021-10-12 12:39:26 +03:00
yiscah
c1b4d7de39 add controlID field, 'id' to be deprecated 2021-10-12 10:48:46 +03:00
dwertent
cde5b83bca update summary 2021-10-12 10:26:30 +03:00
Bezbran
f00106a502 Merge pull request #143 from Bezbran/dev
Run release workflow just for merged PRs to master
2021-10-12 10:17:26 +03:00
Bezalel Brandwine
3ae2742717 rmove workflow dependency 2021-10-12 10:05:55 +03:00
David Wertenteil
8deac19945 Merge pull request #142 from dwertent/master
Update default unregistered behavior
2021-10-12 10:00:46 +03:00
dwertent
2ad469a5f4 update summary image 2021-10-12 09:57:46 +03:00
dwertent
c67b111c77 do not submit unregisred user 2021-10-12 09:51:14 +03:00
Bezalel Brandwine
ee770e7429 add github actions workflow for opened PRs to master 2021-10-12 09:49:26 +03:00
Bezalel Brandwine
197a3adf6a trigger release only for push to master to avoid release on closed PRs without merging 2021-10-12 09:38:52 +03:00
Bezbran
269d39497b Merge pull request #1 from armosec/dev
Dev
2021-10-12 09:29:28 +03:00
David Wertenteil
0c3a7ac02b Merge pull request #140 from dwertent/master
split setCustomer func
2021-10-12 08:10:02 +03:00
dwertent
07443548c9 split setCustomer func 2021-10-12 07:59:41 +03:00
Ben Hirschberg
e561f78ada Merge pull request #133 from clfs/clfs/bugfixes
Add missing return
2021-10-11 23:22:29 +03:00
Ben Hirschberg
bb88272251 Merge pull request #129 from ferhaty/dev
removed references to go mod tidy
2021-10-11 23:21:08 +03:00
Ferhat Yildiz
ce2f3dafae removed references to go mod tidy since there is a go.sum file now 2021-10-11 17:02:21 +02:00
David Wertenteil
488c67056c Merge pull request #138 from dwertent/master
Update readme
2021-10-11 16:32:25 +03:00
dwertent
351e51208d Merge remote-tracking branch 'upstream/dev' 2021-10-11 16:29:43 +03:00
dwertent
0c79df7299 restore id after local run, update readme 2021-10-11 16:29:26 +03:00
David Wertenteil
b9a5c5af35 Merge pull request #137 from dwertent/master
Update default upload of results to be opt-in
2021-10-11 15:04:07 +03:00
dwertent
59741b84d1 Update default upload of results to be opt-in 2021-10-11 15:00:23 +03:00
David Wertenteil
25efcdf750 Merge pull request #135 from Daniel-GrunbergerCA/master
Improve help msgs
2021-10-10 18:50:46 +03:00
Daniel-GrunbergerCA
49dfa6c2a6 Merge remote-tracking branch 'upstream/dev' 2021-10-10 15:50:44 +03:00
Daniel-GrunbergerCA
659647353e fix return code in help msg 2021-10-10 15:43:08 +03:00
Daniel-GrunbergerCA
0eba51c80b beautify help msgs 2021-10-10 14:38:14 +03:00
David Wertenteil
35996d11f4 Merge pull request #134 from Daniel-GrunbergerCA/master
fix TestConvertLabelsToString test
2021-10-10 14:06:03 +03:00
Daniel-GrunbergerCA
e0e0a811eb Add supported frameworks to help msg 2021-10-10 14:01:35 +03:00
Daniel-GrunbergerCA
76f400d0aa fix TestConvertLabelsToString test 2021-10-10 13:06:51 +03:00
Calvin Figuereo-Supraner
8e950e0f54 Add missing return 2021-10-09 21:25:01 -07:00
Rotem Refael
0adb9dd540 Merge pull request #126 from armosec/dev 2021-10-07 19:18:10 +03:00
David Wertenteil
5825555534 Merge pull request #127 from dwertent/master
Adding unittest to workflow
2021-10-07 18:24:56 +03:00
dwertent
306a8c6201 Merge remote-tracking branch 'upstream/dev' 2021-10-07 18:19:00 +03:00
dwertent
123b620085 Merge branch 'master' of github.com:armosec/kubescape into dev 2021-10-07 18:14:29 +03:00
dwertent
831e7814be ignore files 2021-10-07 18:12:21 +03:00
Ben Hirschberg
7c85199ac2 Merge pull request #106 from Juneezee/go1.17
build: upgrade to Go 1.17
2021-10-07 18:05:42 +03:00
dwertent
efec8e4f2f adding unittest to workflow 2021-10-07 18:02:44 +03:00
Avner Tzur
af4faef9cf Add macOS brew installation 2021-10-07 17:30:12 +03:00
dwertent
90052ad9e3 Merge branch 'master' of github.com:armosec/kubescape into dev 2021-10-07 15:46:50 +03:00
Rotem Refael
35c7b16e4a Update build.yaml
change report link
2021-10-07 15:18:37 +03:00
David Wertenteil
7ac75acfc2 Merge pull request #125 from dwertent/master
Update display color
2021-10-07 15:11:43 +03:00
dwertent
22662fddcd update display color 2021-10-07 15:03:48 +03:00
David Wertenteil
6df1cdf5d8 Merge pull request #124 from dwertent/master
Changing warning to excluded in results
2021-10-07 14:00:19 +03:00
dwertent
2287c51d73 changing warning to excluded 2021-10-07 13:59:20 +03:00
David Wertenteil
372942a14f Merge pull request #122 from dwertent/master
Adding go sum
2021-10-07 10:18:23 +03:00
dwertent
6362246da4 Merge remote-tracking branch 'upstream/dev' 2021-10-07 10:11:41 +03:00
dwertent
9986d69215 adding go sum 2021-10-07 10:11:14 +03:00
Eng Zer Jun
21644e5cba refactor: move from io/ioutil to io and os package
The io/ioutil package has been deprecated as of Go 1.16, see
https://golang.org/doc/go1.16#ioutil. This commit replaces the existing
io/ioutil functions with their new definitions in io and os packages.

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2021-10-07 00:23:09 +08:00
Eng Zer Jun
ad93217bf6 build: upgrade to Go 1.17
Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2021-10-07 00:23:04 +08:00
Ben Hirschberg
ca49bc1ddd Merge pull request #121 from armosec/dev
Fixed issues, update windows support
2021-10-06 18:20:37 +03:00
David Wertenteil
1229c73ddc Merge pull request #120 from dwertent/master
Fixing exception cluster name support
2021-10-06 18:16:51 +03:00
dwertent
7416202555 adding mitre example 2021-10-06 18:14:24 +03:00
dwertent
a0ba683eea Merge remote-tracking branch 'upstream/dev' 2021-10-06 18:12:48 +03:00
dwertent
89654eb26f update exception cluster name compare 2021-10-06 18:12:33 +03:00
Bezbran
9d1736a141 Typo in readme built.py -->> build.py 2021-10-06 15:22:38 +03:00
David Wertenteil
eaa4ed3da5 Merge pull request #118 from xdavidel/master
Update windows support in build
2021-10-06 15:10:02 +03:00
David Delarosa
0db3f65312 Merge remote-tracking branch 'upstream/dev' 2021-10-06 15:05:07 +03:00
David Wertenteil
1ea0a3ccc5 Merge pull request #117 from dwertent/master
load configMap configuration before file configuration
2021-10-06 14:24:30 +03:00
dwertent
16cd30bea8 load configMap before file 2021-10-06 14:06:36 +03:00
David Delarosa
075ba4c603 Comment out this windows exe
The build workflow relay on the same binary name - so we won't change
that right now.
2021-10-06 11:23:56 +03:00
David Delarosa
2d898822df Merge branch 'dev-win' 2021-10-06 10:36:11 +03:00
David Delarosa
25b8ec82e8 Cannot use both 'uses' and 'run' 2021-10-06 10:21:29 +03:00
David Delarosa
44b74e2681 Change workflow to use build.py script 2021-10-05 17:28:38 +03:00
David Wertenteil
485e171008 Merge pull request #116 from dwertent/master
Revert python script build
2021-10-05 17:17:13 +03:00
dwertent
c12eb83b4b remove comment 2021-10-05 17:12:43 +03:00
dwertent
84060e7823 revert python build 2021-10-05 17:07:30 +03:00
David Wertenteil
d80d50b59d Merge pull request #115 from dwertent/master
fixed in cluster crash - #114
2021-10-05 16:56:00 +03:00
dwertent
f11f054fea offline not new feature 2021-10-05 16:51:03 +03:00
dwertent
4c1d491d5a Merge remote-tracking branch 'upstream/dev' 2021-10-05 16:43:54 +03:00
dwertent
2b67cc520c windows install support 2021-10-05 16:43:05 +03:00
dwertent
2a5712bd3c fixed in cluster crash 2021-10-05 16:11:41 +03:00
Ben Hirschberg
ccbc11408b Merge pull request #109 from armosec/dev
Update master with fixed issues #95 #96
2021-10-05 09:25:35 +03:00
Bezbran
22bae315be Merge pull request #112 from Bezbran/dev
Add discord using github pages
2021-10-05 09:23:17 +03:00
Bezalel Brandwine
740ab7cb46 point readme discord to github pages 2021-10-05 09:21:58 +03:00
Bezalel Brandwine
cdaa9aa1b0 discord UI beautify 2021-10-05 09:18:39 +03:00
Bezbran
875f82dcbb Merge pull request #4 from armosec/dev
Dev from org
2021-10-05 09:18:03 +03:00
Ben Hirschberg
e8f6bdd64a Merge pull request #108 from YiscahLevySilas1/master
added mitre to supported frameworks
2021-10-04 17:06:00 +03:00
yiscah
25247491ee add mitre to supportedFrameworks, accept upper/lowercase "MITRE" 2021-10-04 16:56:18 +03:00
Ben Hirschberg
57ae3dc3a7 Merge pull request #107 from dwertent/master
Fixed #95, #96
2021-10-04 16:53:48 +03:00
dwertent
27d00b58d7 Adding star to readme, support wild labels 2021-10-04 15:15:18 +03:00
Bezalel Brandwine
263821ce67 landing page in docs dir 2021-10-04 14:57:11 +03:00
Bezalel Brandwine
3c12247b00 add index html as GitHub pages landing page 2021-10-04 14:54:04 +03:00
Bezbran
56d41596f6 Merge pull request #3 from armosec/dev
Dev merge
2021-10-04 14:32:51 +03:00
Avner Tzur
f0cd1965b4 update git repo URL using https 2021-10-04 13:51:47 +03:00
dwertent
3a4c06a818 fixed issue #95 2021-10-03 17:22:50 +03:00
David Wertenteil
fd8dd7ab8a Merge pull request #100 from slashben/master
Adding discord links to readme
2021-10-03 15:26:39 +03:00
Ben Hirschberg
0434f6a935 Merge pull request #99 from taylor/patch-1
Fix link to hardening guide in README
2021-10-03 13:39:05 +03:00
Benyamin Hirschberg
b51a26442c returning examples to the main readme 2021-10-03 09:16:18 +03:00
Benyamin Hirschberg
6462ac0f0e moving options 2021-10-03 09:08:03 +03:00
Benyamin Hirschberg
fc01dbbac9 discord banner 2021-10-03 08:10:29 +03:00
Benyamin Hirschberg
df8576c066 Use a better discord logo 2021-10-03 08:07:42 +03:00
Benyamin Hirschberg
ca8adf28cf Discord invitation 2021-10-03 08:03:17 +03:00
Taylor Carpenter
f70c6c566e Fix link to hardening guide
Signed-off-by: Taylor Carpenter <taylor@vulk.coop>

Updating the link to the Kubernetes Hardening Guidance by NSA and CISA

Ref: https://github.com/armosec/kubescape/issues/97
2021-09-30 10:10:51 -05:00
Benyamin Hirschberg
056d4411b7 Merge pull request #92 from BenHirschbergCa/master
adding CONTRIBUTING.md
2021-09-27 13:55:13 +03:00
Ben Hirschberg
602b83b0e5 adding CONTRIBUTING.md 2021-09-27 13:53:54 +03:00
Benyamin Hirschberg
1cfcc6d930 Merge pull request #90 from BenHirschbergCa/master
installing kubescape in home directory if possible
2021-09-23 21:05:31 +03:00
Ben Hirschberg
8c6f618743 installing kubescape in home directory if possible 2021-09-23 17:54:05 +00:00
Benyamin Hirschberg
6adf1c3162 Merge pull request #88 from BenHirschbergCa/master
Fixing sudo missing issue and returning build without C runtime dependency
2021-09-21 23:59:30 +03:00
Ben Hirschberg
9f5f4f1832 removing useless command in install 2021-09-21 23:48:26 +03:00
Ben Hirschberg
9f49cc83e9 Fixing install.sh to work in environments where there is no sudo && enabling static executable build 2021-09-21 23:45:35 +03:00
126 changed files with 2431 additions and 12150 deletions

View File

@@ -2,10 +2,7 @@ name: build
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
types: [ closed ]
branches: [ master ]
jobs:
once:
name: Create release
@@ -32,18 +29,22 @@ jobs:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v1
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.16
go-version: 1.17
- name: Test
run: go test -v ./...
- name: Build
env:
RELEASE: v1.0.${{ github.run_number }}
ArmoBEServer: api.armo.cloud
ArmoERServer: report.euprod1.cyberarmorsoft.com
ArmoERServer: report.armo.cloud
ArmoWebsite: portal.armo.cloud
run: mkdir -p build/${{ matrix.os }} && go mod tidy && go build -ldflags "-w -s -X github.com/armosec/kubescape/cmd.BuildNumber=$RELEASE -X github.com/armosec/kubescape/cautils/getter.ArmoBEURL=$ArmoBEServer -X github.com/armosec/kubescape/cautils/getter.ArmoERURL=$ArmoERServer -X github.com/armosec/kubescape/cautils/getter.ArmoFEURL=$ArmoWebsite" -o build/${{ matrix.os }}/kubescape # && md5sum build/${{ matrix.os }}/kubescape > build/${{ matrix.os }}/kubescape.md5
CGO_ENABLED: 0
run: python3 --version && python3 build.py
- name: Upload Release binaries
id: upload-release-asset
@@ -55,3 +56,33 @@ jobs:
asset_path: build/${{ matrix.os }}/kubescape
asset_name: kubescape-${{ matrix.os }}
asset_content_type: application/octet-stream
build-docker:
name: Build docker container, tag and upload to registry
needs: build
if: ${{ github.repository == 'armosec/kubescape' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set name
run: echo quay.io/armosec/kubescape:v1.0.${{ github.run_number }} > build_tag.txt
- name: Build the Docker image
run: docker build . --file build/Dockerfile --tag $(cat build_tag.txt)
- name: Re-Tag Image to latest
run: docker tag $(cat build_tag.txt) quay.io/armosec/kubescape:latest
- name: Login to Quay.io
env: # Or as an environment variable
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
- name: Push Docker image
run: |
docker push $(cat build_tag.txt)
docker push quay.io/armosec/kubescape:latest

View File

@@ -1,11 +1,8 @@
name: build
name: build-dev
on:
push:
branches: [ dev ]
pull_request:
branches: [ dev ]
types: [ closed ]
jobs:
build:
name: Create cross-platform dev build
@@ -19,17 +16,50 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.16
go-version: 1.17
- name: Test
run: go test -v ./...
- name: Build
env:
RELEASE: v1.0.${{ github.run_number }}
ArmoBEServer: api.armo.cloud
ArmoERServer: report.euprod1.cyberarmorsoft.com
ArmoWebsite: portal.armo.cloud
run: mkdir -p build/${{ matrix.os }} && go mod tidy && go build -ldflags "-w -s -X github.com/armosec/kubescape/cmd.BuildNumber=$RELEASE -X github.com/armosec/kubescape/cautils/getter.ArmoBEURL=$ArmoBEServer -X github.com/armosec/kubescape/cautils/getter.ArmoERURL=$ArmoERServer -X github.com/armosec/kubescape/cautils/getter.ArmoFEURL=$ArmoWebsite" -o build/${{ matrix.os }}/kubescape # && md5sum build/${{ matrix.os }}/kubescape > build/${{ matrix.os }}/kubescape.md5
CGO_ENABLED: 0
run: python3 --version && python3 build.py
- name: Upload build artifacts
uses: actions/upload-artifact@v2
with:
name: kubescape-${{ matrix.os }}
path: build/${{ matrix.os }}/kubescape
build-docker:
name: Build docker container, tag and upload to registry
needs: build
if: ${{ github.repository == 'armosec/kubescape' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set name
run: echo quay.io/armosec/kubescape:dev-v1.0.${{ github.run_number }} > build_tag.txt
- name: Build the Docker image
run: docker build . --file build/Dockerfile --tag $(cat build_tag.txt)
- name: Login to Quay.io
env: # Or as an environment variable
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
- name: Push Docker image
run: |
docker push $(cat build_tag.txt)

38
.github/workflows/master_pr_checks.yaml vendored Normal file
View File

@@ -0,0 +1,38 @@
name: master-pr
on:
pull_request:
branches: [ master ]
types: [ edited, opened, synchronize, reopened ]
jobs:
build:
name: Create cross-platform build
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v1
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.17
- name: Test
run: go test -v ./...
- name: Build
env:
RELEASE: v1.0.${{ github.run_number }}
ArmoBEServer: api.armo.cloud
ArmoERServer: report.armo.cloud
ArmoWebsite: portal.armo.cloud
CGO_ENABLED: 0
run: python3 --version && python3 build.py
- name: Upload build artifacts
uses: actions/upload-artifact@v2
with:
name: kubescape-${{ matrix.os }}
path: build/${{ matrix.os }}/kubescape

1
.gitignore vendored
View File

@@ -1,5 +1,4 @@
*.vs*
*go.sum*
*kubescape*
*debug*
.idea

99
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,99 @@
# Contributing
First, it is awesome that you are considering contributing to Kubescape! Contributing is important and fun and we welcome your efforts.
When contributing, we categorize contributions into two:
* Small code changes or fixes, whose scope are limited to a single or two files
* Complex features and improvements, whose are not limited
If you have a small change, feel free to fire up a Pull Request.
When planning a bigger change, please first discuss the change you wish to make via issue,
email, or any other method with the owners of this repository before making a change. Most likely your changes or features are great, but sometimes we might already going to this direction (or the exact opposite ;-) ) and we don't want to waste your time.
Please note we have a code of conduct, please follow it in all your interactions with the project.
## Pull Request Process
1. Ensure any install or build dependencies are removed before the end of the layer when doing a
build.
2. Update the README.md with details of changes to the interface, this includes new environment
variables, exposed ports, useful file locations and container parameters.
3. We will merge the Pull Request in once you have the sign-off.
## Code of Conduct
### Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level of experience,
nationality, personal appearance, race, religion, or sexual identity and
orientation.
### Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
We will distance those who are constantly adhere to unacceptable behavior.
### Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
### Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
### Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at [INSERT EMAIL ADDRESS]. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
### Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

132
README.md
View File

@@ -3,21 +3,23 @@
[![build](https://github.com/armosec/kubescape/actions/workflows/build.yaml/badge.svg)](https://github.com/armosec/kubescape/actions/workflows/build.yaml)
[![Go Report Card](https://goreportcard.com/badge/github.com/armosec/kubescape)](https://goreportcard.com/report/github.com/armosec/kubescape)
Kubescape is the first tool for testing if Kubernetes is deployed securely as defined in [Kubernetes Hardening Guidance by NSA and CISA](https://www.nsa.gov/News-Features/Feature-Stories/Article-View/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/)
Kubescape is the first tool for testing if Kubernetes is deployed securely as defined in [Kubernetes Hardening Guidance by NSA and CISA](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/)
Use Kubescape to test clusters or scan single YAML files and integrate it to your processes.
Use Kubescape to test clusters or scan single YAML files and integrate it to your processes.
<img src="docs/demo.gif">
# TL;DR
## Install & Run
### Install:
## Install:
```
curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh | /bin/bash
```
### Run:
[Install on windows](#install-on-windows)
[Install on macOS](#install-on-macos)
## Run:
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
```
@@ -26,47 +28,92 @@ If you wish to scan all namespaces in your cluster, remove the `--exclude-namesp
<img src="docs/summary.png">
### Click [👍](https://github.com/armosec/kubescape/stargazers) if you want us to continue to develop and improve Kubescape 😀
### Flags
# Being part of the team
We invite you to our team! We are excited about this project and want to return the love we get.
Want to contribute? Want to discuss something? Have an issue?
* Open a issue, we are trying to respond within 48 hours
* [Join us](https://armosec.github.io/kubescape/) in a discussion on our discord server!
[<img src="docs/discord-banner.png" width="100" alt="logo" align="center">](https://armosec.github.io/kubescape/)
# Options and examples
## Install on Windows
**Requires powershell v5.0+**
``` powershell
iwr -useb https://raw.githubusercontent.com/armosec/kubescape/master/install.ps1 | iex
```
Note: if you get an error you might need to change the execution policy (i.e. enable Powershell) with
``` powershell
Set-ExecutionPolicy RemoteSigned -scope CurrentUser
```
## Install on macOS
1. ```
brew tap armosec/kubescape
```
2. ```
brew install kubescape
```
## Flags
| flag | default | description | options |
| --- | --- | --- | --- |
| `-e`/`--exclude-namespaces` | Scan all namespaces | Namespaces to exclude from scanning. Recommended to exclude `kube-system` and `kube-public` namespaces |
| `-s`/`--silent` | Display progress messages | Silent progress messages |
| `-t`/`--fail-threshold` | `0` (do not fail) | fail command (return exit code 1) if result bellow threshold| `0` -> `100` |
| `-f`/`--format` | `pretty-printer` | Output format | `pretty-printer`/`json`/`junit` |
| `-f`/`--format` | `pretty-printer` | Output format | `pretty-printer`/`json`/`junit` |
| `-o`/`--output` | print to stdout | Save scan result in file |
| `--use-from` | | Load local framework object from specified path. If not used will download latest |
| `--use-default` | `false` | Load local framework object from default path. If not used will download latest | `true`/`false` |
| `--exceptions` | | Path to an [exceptions obj](examples/exceptions.json). If not set will download exceptions from Armo management portal |
| `--results-locally` | `false` | Kubescape sends scan results to Armo management portal to allow users to control exceptions and maintain chronological scan results. Use this flag if you do not wish to use these features | `true`/`false`|
| `--submit` | `false` | If set, Kubescape will send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not sent | `true`/`false`|
| `--keep-local` | `false` | Kubescape will not send scan results to Armo management portal. Use this flag if you ran with the `--submit` flag in the past and you do not want to submit your current scan results | `true`/`false`|
| `--account` | | Armo portal account ID. Default will load account ID from configMap or config file | |
## Usage & Examples
### Examples
* Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/News-Features/Feature-Stories/Article-View/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework
* Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/News-Features/Feature-Stories/Article-View/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework and submit results to [Armo portal](https://portal.armo.cloud/)
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --submit
```
* Scan local `yaml`/`json` files before deploying
* Scan a running Kubernetes cluster with [`mitre`](https://www.microsoft.com/security/blog/2020/04/02/attack-matrix-kubernetes/) framework and submit results to [Armo portal](https://portal.armo.cloud/)
```
kubescape scan framework mitre --exclude-namespaces kube-system,kube-public --submit
```
* Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI)
```
kubescape scan framework nsa *.yaml
```
* Scan `yaml`/`json` files from url
* Scan `yaml`/`json` files from url
```
kubescape scan framework nsa https://raw.githubusercontent.com/GoogleCloudPlatform/microservices-demo/master/release/kubernetes-manifests.yaml
```
* Output in `json` format
* Output in `json` format
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format json --output results.json
```
* Output in `junit xml` format
* Output in `junit xml` format
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format junit --output results.xml
```
@@ -78,7 +125,7 @@ kubescape scan framework nsa --exceptions examples/exceptions.json
### Helm Support
* Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
* Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
```
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan framework nsa -
```
@@ -87,36 +134,55 @@ for example:
```
helm template bitnami/mysql --generate-name --dry-run | kubescape scan framework nsa -
```
### Offline Support <img src="docs/new-feature.svg">
### Offline Support
It is possible to run Kubescape offline!
First download the framework and then scan with `--use-from` flag
* Download and save in file, if file name not specified, will store save to `~/.kubescape/<framework name>.json`
1. Download and save in file, if file name not specified, will store save to `~/.kubescape/<framework name>.json`
```
kubescape download framework nsa --output nsa.json
```
* Scan using the downloaded framework
2. Scan using the downloaded framework
```
kubescape scan framework nsa --use-from nsa.json
```
Kubescape is an open source project, we welcome your feedback and ideas for improvement. Were also aiming to collaborate with the Kubernetes community to help make the tests themselves more robust and complete as Kubernetes develops.
# How to build
# How to build
Note: development (and the release process) is done with Go `1.16`
## Build using python (3.7^) script
Kubescpae can be built using:
``` sh
python build.py
```
Note: In order to built using the above script, one must set the environment
variables in this script:
+ RELEASE
+ ArmoBEServer
+ ArmoERServer
+ ArmoWebsite
## Build using go
Note: development (and the release process) is done with Go `1.17`
1. Clone Project
```
git clone git@github.com:armosec/kubescape.git kubescape && cd "$_"
git clone https://github.com/armosec/kubescape.git kubescape && cd "$_"
```
2. Build
```
go mod tidy && go build -o kubescape .
go build -o kubescape .
```
3. Run
@@ -126,11 +192,11 @@ go mod tidy && go build -o kubescape .
4. Enjoy :zany_face:
# How to build in Docker
## How to build in Docker
1. Clone Project
```
git clone git@github.com:armosec/kubescape.git kubescape && cd "$_"
git clone https://github.com/armosec/kubescape.git kubescape && cd "$_"
```
2. Build
@@ -143,14 +209,14 @@ docker build -t kubescape -f build/Dockerfile .
## Tests
Kubescape is running the following tests according to what is defined by [Kubernetes Hardening Guidance by NSA and CISA](https://www.nsa.gov/News-Features/Feature-Stories/Article-View/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/)
* Non-root containers
* Immutable container filesystem
* Privileged containers
* Immutable container filesystem
* Privileged containers
* hostPID, hostIPC privileges
* hostNetwork access
* allowedHostPaths field
* Protecting pod service account tokens
* Resource policies
* Control plane hardening
* Control plane hardening
* Exposed dashboard
* Allow privilege escalation
* Applications credentials in configuration files
@@ -167,12 +233,10 @@ Kubescape is running the following tests according to what is defined by [Kubern
## Technology
Kubescape based on OPA engine: https://github.com/open-policy-agent/opa and ARMO's posture controls.
Kubescape based on OPA engine: https://github.com/open-policy-agent/opa and ARMO's posture controls.
The tools retrieves Kubernetes objects from the API server and runs a set of [regos snippets](https://www.openpolicyagent.org/docs/latest/policy-language/) developed by [ARMO](https://www.armosec.io/).
The tools retrieves Kubernetes objects from the API server and runs a set of [regos snippets](https://www.openpolicyagent.org/docs/latest/policy-language/) developed by [ARMO](https://www.armosec.io/).
The results by default printed in a pretty "console friendly" manner, but they can be retrieved in JSON format for further processing.
Kubescape is an open source project, we welcome your feedback and ideas for improvement. Were also aiming to collaborate with the Kubernetes community to help make the tests themselves more robust and complete as Kubernetes develops.

82
build.py Normal file
View File

@@ -0,0 +1,82 @@
import os
import sys
import hashlib
import platform
import subprocess
BASE_GETTER_CONST = "github.com/armosec/kubescape/cautils/getter"
BE_SERVER_CONST = BASE_GETTER_CONST + ".ArmoBEURL"
ER_SERVER_CONST = BASE_GETTER_CONST + ".ArmoERURL"
WEBSITE_CONST = BASE_GETTER_CONST + ".ArmoFEURL"
def checkStatus(status, msg):
if status != 0:
sys.stderr.write(msg)
exit(status)
def getBuildDir():
currentPlatform = platform.system()
buildDir = "build/"
if currentPlatform == "Windows": buildDir += "windows-latest"
elif currentPlatform == "Linux": buildDir += "ubuntu-latest"
elif currentPlatform == "Darwin": buildDir += "macos-latest"
else: raise OSError("Platform %s is not supported!" % (currentPlatform))
return buildDir
def getPackageName():
packageName = "kubescape"
# if platform.system() == "Windows": packageName += ".exe"
return packageName
def main():
print("Building Kubescape")
# print environment variables
print(os.environ)
# Set some variables
packageName = getPackageName()
buildUrl = "github.com/armosec/kubescape/cmd.BuildNumber"
releaseVersion = os.getenv("RELEASE")
ArmoBEServer = os.getenv("ArmoBEServer")
ArmoERServer = os.getenv("ArmoERServer")
ArmoWebsite = os.getenv("ArmoWebsite")
# Create build directory
buildDir = getBuildDir()
if not os.path.isdir(buildDir):
os.makedirs(buildDir)
# Build kubescape
ldflags = "-w -s -X %s=%s -X %s=%s -X %s=%s -X %s=%s" \
% (buildUrl, releaseVersion, BE_SERVER_CONST, ArmoBEServer,
ER_SERVER_CONST, ArmoERServer, WEBSITE_CONST, ArmoWebsite)
status = subprocess.call(["go", "build", "-o", "%s/%s" % (buildDir, packageName), "-ldflags" ,ldflags])
checkStatus(status, "Failed to build kubescape")
test_cli_prints(buildDir,packageName)
sha1 = hashlib.sha1()
with open(buildDir + "/" + packageName, "rb") as kube:
sha1.update(kube.read())
with open(buildDir + "/" + packageName + ".sha1", "w") as kube_sha:
kube_sha.write(sha1.hexdigest())
print("Build Done")
def test_cli_prints(buildDir,packageName):
bin_cli = os.path.abspath(os.path.join(buildDir,packageName))
print(f"testing CLI prints on {bin_cli}")
status = str(subprocess.check_output([bin_cli, "-h"]))
assert "download" in status, "download is missing: " + status
if __name__ == "__main__":
main()

View File

@@ -1,13 +1,27 @@
FROM golang:1.16-alpine as builder
ENV GOPROXY=https://goproxy.io,direct
ENV GO111MODULE=on
FROM golang:1.17-alpine as builder
#ENV GOPROXY=https://goproxy.io,direct
ENV GO111MODULE=
ENV CGO_ENABLED=0
# Install required python/pip
ENV PYTHONUNBUFFERED=1
RUN apk add --update --no-cache python3 && ln -sf python3 /usr/bin/python
RUN python3 -m ensurepip
RUN pip3 install --no-cache --upgrade pip setuptools
WORKDIR /work
ADD . .
RUN go mod tidy
RUN GOOS=linux CGO_ENABLED=0 go build -ldflags="-s -w " -installsuffix cgo -o kubescape .
RUN python build.py
RUN ls -ltr build/ubuntu-latest
RUN cat /work/build/ubuntu-latest/kubescape.sha1
FROM alpine
COPY --from=builder /work/kubescape /usr/bin/kubescape
COPY --from=builder /work/build/ubuntu-latest/kubescape /usr/bin/kubescape
# # Download the frameworks. Use the "--use-default" flag when running kubescape
# RUN kubescape download framework nsa && kubescape download framework mitre
CMD ["kubescape"]

View File

@@ -1,101 +0,0 @@
package apis
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
)
// HTTPReqFunc allows you to insert query params and more to aggregation message while using update aggregator
type HTTPReqFunc func(req *http.Request, qryData interface{})
func BasicBEQuery(req *http.Request, qryData interface{}) {
q := req.URL.Query()
if notificationData, isok := qryData.(*LoginObject); isok {
q.Add("customerGUID", notificationData.GUID)
}
req.URL.RawQuery = q.Encode()
}
func EmptyQuery(req *http.Request, qryData interface{}) {
q := req.URL.Query()
req.URL.RawQuery = q.Encode()
}
func MapQuery(req *http.Request, qryData interface{}) {
q := req.URL.Query()
if qryMap, isok := qryData.(map[string]string); isok {
for k, v := range qryMap {
q.Add(k, v)
}
}
req.URL.RawQuery = q.Encode()
}
func BEHttpRequest(loginobj *LoginObject, beURL,
httpverb string,
endpoint string,
payload []byte,
f HTTPReqFunc,
qryData interface{}) ([]byte, error) {
client := &http.Client{}
beURL = fmt.Sprintf("%v/%v", beURL, endpoint)
req, err := http.NewRequest(httpverb, beURL, bytes.NewReader(payload))
if err != nil {
return nil, err
}
req.Header.Set("Authorization", loginobj.Authorization)
f(req, qryData)
for _, cookie := range loginobj.Cookies {
req.AddCookie(cookie)
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
fmt.Printf("req:\n%v\nresp:%v\n", req, resp)
return nil, fmt.Errorf("Error #%v Due to: %v", resp.StatusCode, resp.Status)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
type BELoginResponse struct {
Name string `json:"name"`
PreferredUsername string `json:"preferred_username"`
Email string `json:"email"`
CustomerGuid string `json:"customerGuid"`
Expires string `json:"expires"`
Authorization string `json:"authorization"`
Cookies []*http.Cookie
}
func (r *BELoginResponse) ToLoginObject() *LoginObject {
l := &LoginObject{}
l.Authorization = r.Authorization
l.Cookies = r.Cookies
l.Expires = r.Expires
l.GUID = r.CustomerGuid
return l
}
type BackendConnector struct {
BaseURL string
BELoginResponse *BELoginResponse
Credentials *CustomerLoginDetails
HTTPClient *http.Client
}

View File

@@ -1,128 +0,0 @@
package apis
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
)
func MakeBackendConnector(client *http.Client, baseURL string, loginDetails *CustomerLoginDetails) (*BackendConnector, error) {
if err := ValidateBEConnectorMakerInput(client, baseURL, loginDetails); err != nil {
return nil, err
}
conn := &BackendConnector{BaseURL: baseURL, Credentials: loginDetails, HTTPClient: client}
err := conn.Login()
return conn, err
}
func ValidateBEConnectorMakerInput(client *http.Client, baseURL string, loginDetails *CustomerLoginDetails) error {
if client == nil {
fmt.Errorf("You must provide an initialized httpclient")
}
if len(baseURL) == 0 {
return fmt.Errorf("you must provide a valid backend url")
}
if loginDetails == nil || (len(loginDetails.Email) == 0 && len(loginDetails.Password) == 0) {
return fmt.Errorf("you must provide valid login details")
}
return nil
}
func (r *BackendConnector) Login() error {
if !r.IsExpired() {
return nil
}
loginInfoBytes, err := json.Marshal(r.Credentials)
if err != nil {
return fmt.Errorf("unable to marshal credentials properly")
}
beURL := fmt.Sprintf("%v/%v", r.BaseURL, "login")
req, err := http.NewRequest("POST", beURL, bytes.NewReader(loginInfoBytes))
if err != nil {
return err
}
req.Header.Set("Referer", strings.Replace(beURL, "dashbe", "cpanel", 1))
resp, err := r.HTTPClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("unable to read login response")
}
loginS := &BELoginResponse{}
json.Unmarshal(body, &loginS)
loginS.Cookies = resp.Cookies()
r.BELoginResponse = loginS
return nil
}
func (r *BackendConnector) IsExpired() bool {
return r.BELoginResponse == nil || r.BELoginResponse.ToLoginObject().IsExpired()
}
func (r *BackendConnector) GetBaseURL() string {
return r.BaseURL
}
func (r *BackendConnector) GetLoginObj() *LoginObject {
return r.BELoginResponse.ToLoginObject()
}
func (r *BackendConnector) GetClient() *http.Client {
return r.HTTPClient
}
func (r *BackendConnector) HTTPSend(httpverb string,
endpoint string,
payload []byte,
f HTTPReqFunc,
qryData interface{}) ([]byte, error) {
beURL := fmt.Sprintf("%v/%v", r.GetBaseURL(), endpoint)
req, err := http.NewRequest(httpverb, beURL, bytes.NewReader(payload))
if err != nil {
return nil, err
}
if r.IsExpired() {
r.Login()
}
loginobj := r.GetLoginObj()
req.Header.Set("Authorization", loginobj.Authorization)
f(req, qryData)
q := req.URL.Query()
q.Set("customerGUID", loginobj.GUID)
req.URL.RawQuery = q.Encode()
for _, cookie := range loginobj.Cookies {
req.AddCookie(cookie)
}
resp, err := r.GetClient().Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
fmt.Printf("req:\n%v\nresp:%v\n", req, resp)
return nil, fmt.Errorf("Error #%v Due to: %v", resp.StatusCode, resp.Status)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}

View File

@@ -1,25 +0,0 @@
package apis
// WebsocketScanCommand api
const (
WebsocketScanCommandVersion string = "v1"
WebsocketScanCommandPath string = "scanImage"
)
// commands send via websocket
const (
UPDATE string = "update"
ATTACH string = "Attach"
REMOVE string = "remove"
DETACH string = "Detach"
INCOMPATIBLE string = "Incompatible"
REPLACE_HEADERS string = "ReplaceHeaders"
IMAGE_UNREACHABLE string = "ImageUnreachable"
SIGN string = "sign"
UNREGISTERED string = "unregistered"
INJECT string = "inject"
RESTART string = "restart"
ENCRYPT string = "encryptSecret"
DECRYPT string = "decryptSecret"
SCAN string = "scan"
)

View File

@@ -1,78 +0,0 @@
package apis
import (
"encoding/json"
"fmt"
"net/http"
"github.com/docker/docker/api/types"
)
// WebsocketScanCommand trigger scan thru the websocket
type WebsocketScanCommand struct {
// CustomerGUID string `json:"customerGUID"`
ImageTag string `json:"imageTag"`
Wlid string `json:"wlid"`
IsScanned bool `json:"isScanned"`
ContainerName string `json:"containerName"`
JobID string `json:"jobID,omitempty"`
LastAction int `json:"actionIDN"`
// ImageHash string `json:"imageHash"`
Credentials *types.AuthConfig `json:"credentials,omitempty"`
}
//taken from BE
// ElasticRespTotal holds the total struct in Elastic array response
type ElasticRespTotal struct {
Value int `json:"value"`
Relation string `json:"relation"`
}
// V2ListResponse holds the response of some list request with some metadata
type V2ListResponse struct {
Total ElasticRespTotal `json:"total"`
Response interface{} `json:"response"`
// Cursor for quick access to the next page. Not supported yet
Cursor string `json:"cursor"`
}
// Oauth2Customer returns inside the "ca_groups" field in claims section of
// Oauth2 verification process
type Oauth2Customer struct {
CustomerName string `json:"customerName"`
CustomerGUID string `json:"customerGUID"`
}
type LoginObject struct {
Authorization string `json:"authorization"`
GUID string
Cookies []*http.Cookie
Expires string
}
type SafeMode struct {
Reporter string `json:"reporter"` // "Agent"
Action string `json:"action,omitempty"` // "action"
Wlid string `json:"wlid"` // CAA_WLID
PodName string `json:"podName"` // CAA_POD_NAME
InstanceID string `json:"instanceID"` // CAA_POD_NAME
ContainerName string `json:"containerName,omitempty"` // CAA_CONTAINER_NAME
ProcessName string `json:"processName,omitempty"`
ProcessID int `json:"processID,omitempty"`
ProcessCMD string `json:"processCMD,omitempty"`
ComponentGUID string `json:"componentGUID,omitempty"` // CAA_GUID
StatusCode int `json:"statusCode"` // 0/1/2
ProcessExitCode int `json:"processExitCode"` // 0 +
Timestamp int64 `json:"timestamp"`
Message string `json:"message,omitempty"` // any string
JobID string `json:"jobID,omitempty"` // any string
Compatible *bool `json:"compatible,omitempty"`
}
func (safeMode *SafeMode) Json() string {
b, err := json.Marshal(*safeMode)
if err != nil {
return ""
}
return fmt.Sprintf("%s", b)
}

View File

@@ -1,26 +0,0 @@
package apis
// import (
// "fmt"
// "net/http"
// "testing"
// )
// func TestAuditStructure(t *testing.T) {
// c := http.Client{}
// be, err := MakeBackendConnector(&c, "https://dashbe.eudev3.cyberarmorsoft.com", &CustomerLoginDetails{Email: "lalafi@cyberarmor.io", Password: "*", CustomerName: "CyberArmorTests"})
// if err != nil {
// t.Errorf("sad1")
// }
// b, err := be.HTTPSend("GET", "v1/microservicesOverview", nil, MapQuery, map[string]string{"wlid": "wlid://cluster-childrenofbodom/namespace-default/deployment-pos"})
// if err != nil {
// t.Errorf("sad2")
// }
// fmt.Printf("%v", string(b))
// t.Errorf("sad")
// }

View File

@@ -1,27 +0,0 @@
package apis
import (
"net/http"
)
// type Dashboard interface {
// OPAFRAMEWORKGet(string, bool) ([]opapolicy.Framework, error)
// }
// Connector - interface for any connector (BE/Portal and so on)
type Connector interface {
//may used for a more generic httpsend interface based method
GetBaseURL() string
GetLoginObj() *LoginObject
GetClient() *http.Client
Login() error
IsExpired() bool
HTTPSend(httpverb string,
endpoint string,
payload []byte,
f HTTPReqFunc,
qryData interface{}) ([]byte, error)
}

View File

@@ -1,256 +0,0 @@
package apis
import (
"bytes"
"net/http"
"time"
"io/ioutil"
oidc "github.com/coreos/go-oidc"
uuid "github.com/satori/go.uuid"
// "go.uber.org/zap"
"context"
"encoding/json"
"fmt"
"strings"
"golang.org/x/oauth2"
)
func GetOauth2TokenURL() string {
return "https://idens.eudev3.cyberarmorsoft.com/auth/realms/CyberArmorSites"
}
func GetLoginStruct() (LoginAux, error) {
return LoginAux{Referer: "https://cpanel.eudev3.cyberarmorsoft.com/login", Url: "https://cpanel.eudev3.cyberarmorsoft.com/login"}, nil
}
func LoginWithKeycloak(loginDetails CustomerLoginDetails) ([]uuid.UUID, *oidc.IDToken, error) {
// var custGUID uuid.UUID
// config.Oauth2TokenURL
if GetOauth2TokenURL() == "" {
return nil, nil, fmt.Errorf("missing oauth2 token URL")
}
urlaux, _ := GetLoginStruct()
conf, err := getOauth2Config(urlaux)
if err != nil {
return nil, nil, err
}
ctx := context.Background()
provider, err := oidc.NewProvider(ctx, GetOauth2TokenURL())
if err != nil {
return nil, nil, err
}
// "Oauth2ClientID": "golang-client"
oidcConfig := &oidc.Config{
ClientID: "golang-client",
SkipClientIDCheck: true,
}
verifier := provider.Verifier(oidcConfig)
ouToken, err := conf.PasswordCredentialsToken(ctx, loginDetails.Email, loginDetails.Password)
if err != nil {
return nil, nil, err
}
// "Authorization",
authorization := fmt.Sprintf("%s %s", ouToken.Type(), ouToken.AccessToken)
// oidc.IDTokenVerifier
tkn, err := verifier.Verify(ctx, ouToken.AccessToken)
if err != nil {
return nil, tkn, err
}
tkn.Nonce = authorization
if loginDetails.CustomerName == "" {
customers, err := getCustomersNames(tkn)
if err != nil {
return nil, tkn, err
}
if len(customers) == 1 {
loginDetails.CustomerName = customers[0]
} else {
return nil, tkn, fmt.Errorf("login with one of the following customers: %v", customers)
}
}
custGUID, err := getCustomerGUID(tkn, &loginDetails)
if err != nil {
return nil, tkn, err
}
return []uuid.UUID{custGUID}, tkn, nil
}
func getOauth2Config(urlaux LoginAux) (*oauth2.Config, error) {
reURLSlices := strings.Split(urlaux.Referer, "/")
if len(reURLSlices) == 0 {
reURLSlices = strings.Split(urlaux.Url, "/")
}
// zapLogger.With(zap.Strings("referer", reURLSlices)).Info("Searching oauth2Config for")
if len(reURLSlices) < 3 {
reURLSlices = []string{reURLSlices[0], reURLSlices[0], reURLSlices[0]}
}
lg, _ := GetLoginStruct()
provider, _ := oidc.NewProvider(context.Background(), GetOauth2TokenURL())
//provider.Endpoint {"AuthURL":"https://idens.eudev3.cyberarmorsoft.com/auth/realms/CyberArmorSites/protocol/openid-connect/auth","TokenURL":"https://idens.eudev3.cyberarmorsoft.com/auth/realms/CyberArmorSites/protocol/openid-connect/token","AuthStyle":0}
conf := oauth2.Config{
ClientID: "golang-client",
ClientSecret: "4e33bad2-3491-41a6-b486-93c492cfb4a2",
RedirectURL: lg.Referer,
// Discovery returns the OAuth2 endpoints.
Endpoint: provider.Endpoint(),
// "openid" is a required scope for OpenID Connect flows.
Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
}
return &conf, nil
// return nil, fmt.Errorf("canno't find oauth2Config for referer '%+v'.\nPlease set referer or origin headers", reURLSlices)
}
func getCustomersNames(oauth2Details *oidc.IDToken) ([]string, error) {
var claimsJSON Oauth2Claims
if err := oauth2Details.Claims(&claimsJSON); err != nil {
return nil, err
}
customersList := make([]string, 0, len(claimsJSON.CAGroups))
for _, v := range claimsJSON.CAGroups {
var caCustomer Oauth2Customer
if err := json.Unmarshal([]byte(v), &caCustomer); err == nil {
customersList = append(customersList, caCustomer.CustomerName)
}
}
return customersList, nil
}
func getCustomerGUID(tkn *oidc.IDToken, loginDetails *CustomerLoginDetails) (uuid.UUID, error) {
customers, err := getCustomersList(tkn)
if err != nil {
return uuid.UUID{}, err
}
// if customer name not provided - use default customer
if loginDetails.CustomerName == "" && len(customers) > 0 {
return uuid.FromString(customers[0].CustomerGUID)
}
for _, i := range customers {
if i.CustomerName == loginDetails.CustomerName {
return uuid.FromString(i.CustomerGUID)
}
}
return uuid.UUID{}, fmt.Errorf("customer name not found in customer list")
}
func getCustomersList(oauth2Details *oidc.IDToken) ([]Oauth2Customer, error) {
var claimsJSON Oauth2Claims
if err := oauth2Details.Claims(&claimsJSON); err != nil {
return nil, err
}
customersList := make([]Oauth2Customer, 0, len(claimsJSON.CAGroups))
for _, v := range claimsJSON.CAGroups {
var caCustomer Oauth2Customer
if err := json.Unmarshal([]byte(v), &caCustomer); err == nil {
customersList = append(customersList, caCustomer)
}
}
return customersList, nil
}
// func MakeAuthCookies(custGUID uuid.UUID, ouToken *oidc.IDToken) (*http.Cookie, error) {
// var ccc http.Cookie
// var responseData AuthenticationCookie
// expireDate := time.Now().UTC().Add(time.Duration(config.CookieExpirationHours) * time.Hour)
// if ouToken != nil {
// expireDate = ouToken.Expiry
// }
// ccc.Expires = expireDate
// responseData.CustomerGUID = custGUID
// responseData.Expires = ccc.Expires
// responseData.Version = 0
// authorizationStr := ""
// if ouToken != nil {
// authorizationStr = ouToken.Nonce
// if err := ouToken.Claims(&responseData.Oauth2Claims); err != nil {
// errStr := fmt.Sprintf("failed to get claims from JWT")
// return nil, fmt.Errorf("%v", errStr)
// }
// }
// jsonBytes, err := json.Marshal(responseData)
// if err != nil {
// errStr := fmt.Sprintf("failed to get claims from JWT")
// return nil, fmt.Errorf("%v", errStr)
// }
// ccc.Name = "auth"
// ccc.Value = hex.EncodeToString(jsonBytes) + "." + cacheaccess.CalcHmac256(jsonBytes)
// // TODO: HttpOnly for security...
// ccc.HttpOnly = false
// ccc.Path = "/"
// ccc.Secure = true
// ccc.SameSite = http.SameSiteNoneMode
// http.SetCookie(w, &ccc)
// responseData.Authorization = authorizationStr
// jsonBytes, err = json.Marshal(responseData)
// if err != nil {
// w.WriteHeader(http.StatusInternalServerError)
// fmt.Fprintf(w, "error while marshaling response(2) %s", err)
// return
// }
// w.Write(jsonBytes)
// }
func Login(loginDetails CustomerLoginDetails) (*LoginObject, error) {
return nil, nil
}
func GetBEInfo(cfgFile string) string {
return "https://dashbe.eudev3.cyberarmorsoft.com"
}
func BELogin(loginDetails *CustomerLoginDetails, login string, cfg string) (*BELoginResponse, error) {
client := &http.Client{}
basebeURL := GetBEInfo(cfg)
beURL := fmt.Sprintf("%v/%v", basebeURL, login)
loginInfoBytes, err := json.Marshal(loginDetails)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", beURL, bytes.NewReader(loginInfoBytes))
if err != nil {
return nil, err
}
req.Header.Set("Referer", strings.Replace(beURL, "dashbe", "cpanel", 1))
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
loginS := &BELoginResponse{}
json.Unmarshal(body, &loginS)
loginS.Cookies = resp.Cookies()
return loginS, nil
}
func (r *LoginObject) IsExpired() bool {
if r == nil {
return true
}
t, err := time.Parse(time.RFC3339, r.Expires)
if err != nil {
return true
}
return t.UTC().Before(time.Now().UTC())
}

View File

@@ -1,41 +0,0 @@
package apis
// func TestLogin2BE(t *testing.T) {
// loginDetails := CustomerLoginDetails{Email: "lalafi@cyberarmor.io", Password: "***", CustomerName: "CyberArmorTests"}
// res, err := BELogin(loginDetails, "login")
// if err != nil {
// t.Errorf("failed to get raw audit is different ")
// }
// k := res.ToLoginObject()
// fmt.Printf("%v\n", k)
// }
// func TestGetMicroserviceOverview(t *testing.T) {
// // client := &http.Client{}
// loginDetails := CustomerLoginDetails{Email: "lalafi@cyberarmor.io", Password: "***", CustomerName: "CyberArmorTests"}
// loginobj, err := BELogin(loginDetails, "login")
// if err != nil {
// t.Errorf("failed to get raw audit is different ")
// }
// k := loginobj.ToLoginObject()
// beURL := GetBEInfo("")
// res, err := BEHttpRequest(k, beURL,
// "GET",
// "v1/microservicesOverview",
// nil,
// BasicBEQuery,
// k)
// if err != nil {
// t.Errorf("failed to get raw audit is different ")
// }
// s := string(res)
// fmt.Printf("%v\n", s)
// }

View File

@@ -1,38 +0,0 @@
package apis
import (
"time"
"github.com/gofrs/uuid"
)
// AuthenticationCookie is what it is
type AuthenticationCookie struct {
Oauth2Claims `json:",inline"`
CustomerGUID uuid.UUID `json:"customerGuid"`
Expires time.Time `json:"expires"`
Version int `json:"version"`
Authorization string `json:"authorization,omitempty"`
}
type LoginAux struct {
Referer string
Url string
}
// CustomerLoginDetails is what it is
type CustomerLoginDetails struct {
Email string `json:"email"`
Password string `json:"password"`
CustomerName string `json:"customer,omitempty"`
CustomerGUID uuid.UUID `json:"customerGuid,omitempty"`
}
// Oauth2Claims returns in claims section of Oauth2 verification process
type Oauth2Claims struct {
Sub string `json:"sub"`
Name string `json:"name"`
PreferredUserName string `json:"preferred_username"`
CAGroups []string `json:"ca_groups"`
Email string `json:"email"`
}

View File

@@ -1,132 +0,0 @@
package apis
import (
"encoding/json"
"fmt"
)
// Commands list of commands received from websocket
type Commands struct {
Commands []Command `json:"commands"`
}
// Command structure of command received from websocket
type Command struct {
CommandName string `json:"commandName"`
ResponseID string `json:"responseID"`
Wlid string `json:"wlid,omitempty"`
WildWlid string `json:"wildWlid,omitempty"`
Sid string `json:"sid,omitempty"`
WildSid string `json:"wildSid,omitempty"`
JobTracking JobTracking `json:"jobTracking"`
Args map[string]interface{} `json:"args,omitempty"`
}
type JobTracking struct {
JobID string `json:"jobID,omitempty"`
ParentID string `json:"parentAction,omitempty"`
LastActionNumber int `json:"numSeq,omitempty"`
}
func (c *Command) DeepCopy() *Command {
newCommand := &Command{}
newCommand.CommandName = c.CommandName
newCommand.ResponseID = c.ResponseID
newCommand.Wlid = c.Wlid
newCommand.WildWlid = c.WildWlid
if c.Args != nil {
newCommand.Args = make(map[string]interface{})
for i, j := range c.Args {
newCommand.Args[i] = j
}
}
return newCommand
}
func (c *Command) GetLabels() map[string]string {
if c.Args != nil {
if ilabels, ok := c.Args["labels"]; ok {
labels := map[string]string{}
if b, e := json.Marshal(ilabels); e == nil {
if e = json.Unmarshal(b, &labels); e == nil {
return labels
}
}
}
}
return map[string]string{}
}
func (c *Command) SetLabels(labels map[string]string) {
if c.Args == nil {
c.Args = make(map[string]interface{})
}
c.Args["labels"] = labels
}
func (c *Command) GetFieldSelector() map[string]string {
if c.Args != nil {
if ilabels, ok := c.Args["fieldSelector"]; ok {
labels := map[string]string{}
if b, e := json.Marshal(ilabels); e == nil {
if e = json.Unmarshal(b, &labels); e == nil {
return labels
}
}
}
}
return map[string]string{}
}
func (c *Command) SetFieldSelector(labels map[string]string) {
if c.Args == nil {
c.Args = make(map[string]interface{})
}
c.Args["fieldSelector"] = labels
}
func (c *Command) GetID() string {
if c.WildWlid != "" {
return c.WildWlid
}
if c.WildSid != "" {
return c.WildSid
}
if c.Wlid != "" {
return c.Wlid
}
if c.Sid != "" {
return c.Sid
}
return ""
}
func (c *Command) Json() string {
b, _ := json.Marshal(*c)
return fmt.Sprintf("%s", b)
}
func SIDFallback(c *Command) {
if c.GetID() == "" {
sid, err := getSIDFromArgs(c.Args)
if err != nil || sid == "" {
return
}
c.Sid = sid
}
}
func getSIDFromArgs(args map[string]interface{}) (string, error) {
sidInterface, ok := args["sid"]
if !ok {
return "", nil
}
sid, ok := sidInterface.(string)
if !ok || sid == "" {
return "", fmt.Errorf("sid found in args but empty")
}
// if _, err := secrethandling.SplitSecretID(sid); err != nil {
// return "", err
// }
return sid, nil
}

View File

@@ -1,16 +0,0 @@
package armotypes
type EnforcementsRule struct {
MonitoredObject []string `json:"monitoredObject"`
MonitoredObjectExistence []string `json:"objectExistence"`
MonitoredObjectEvent []string `json:"event"`
Action []string `json:"action"`
}
type ExecutionPolicy struct {
PortalBase `json:",inline"`
Designators []PortalDesignator `json:"designators"`
PolicyType string `json:"policyType"`
CreationTime string `json:"creation_time"`
ExecutionEnforcementsRule []EnforcementsRule `json:"enforcementRules"`
}

View File

@@ -1,66 +0,0 @@
package armotypes
import "strings"
const (
CostumerGuidQuery = "costumerGUID"
ClusterNameQuery = "cluster"
DatacenterNameQuery = "datacenter"
NamespaceQuery = "namespace"
ProjectQuery = "project"
WlidQuery = "wlid"
SidQuery = "sid"
)
// PortalBase holds basic items data from portal BE
type PortalBase struct {
GUID string `json:"guid"`
Name string `json:"name"`
Attributes map[string]interface{} `json:"attributes,omitempty"` // could be string
}
type DesignatorType string
// Supported designators
const (
DesignatorAttributes DesignatorType = "Attributes"
DesignatorAttribute DesignatorType = "Attribute" // Deprecated
/*
WorkloadID format.
k8s format: wlid://cluster-<cluster>/namespace-<namespace>/<kind>-<name>
native format: wlid://datacenter-<datacenter>/project-<project>/native-<name>
*/
DesignatorWlid DesignatorType = "Wlid"
/*
Wild card - subset of wlid. e.g.
1. Include cluster:
wlid://cluster-<cluster>/
2. Include cluster and namespace (filter out all other namespaces):
wlid://cluster-<cluster>/namespace-<namespace>/
*/
DesignatorWildWlid DesignatorType = "WildWlid"
DesignatorWlidContainer DesignatorType = "WlidContainer"
DesignatorWlidProcess DesignatorType = "WlidProcess"
DesignatorSid DesignatorType = "Sid" // secret id
)
func (dt DesignatorType) ToLower() DesignatorType {
return DesignatorType(strings.ToLower(string(dt)))
}
// attributes
const (
AttributeCluster = "cluster"
AttributeNamespace = "namespace"
AttributeKind = "kind"
AttributeName = "name"
)
// PortalDesignator represented single designation options
type PortalDesignator struct {
DesignatorType DesignatorType `json:"designatorType"`
WLID string `json:"wlid"`
WildWLID string `json:"wildwlid"`
SID string `json:"sid"`
Attributes map[string]string `json:"attributes"`
}

View File

@@ -1,18 +0,0 @@
package armotypes
func MockPortalBase(customerGUID, name string, attributes map[string]interface{}) *PortalBase {
if customerGUID == "" {
customerGUID = "36b6f9e1-3b63-4628-994d-cbe16f81e9c7"
}
if name == "" {
name = "portalbase-a"
}
if attributes == nil {
attributes = make(map[string]interface{})
}
return &PortalBase{
GUID: customerGUID,
Name: name,
Attributes: attributes,
}
}

View File

@@ -1,113 +0,0 @@
package armotypes
import (
"github.com/armosec/kubescape/cautils/cautils"
"github.com/golang/glog"
)
var IgnoreLabels = []string{AttributeCluster, AttributeNamespace}
func (designator *PortalDesignator) GetCluster() string {
cluster, _, _, _, _ := designator.DigestPortalDesignator()
return cluster
}
func (designator *PortalDesignator) GetNamespace() string {
_, namespace, _, _, _ := designator.DigestPortalDesignator()
return namespace
}
func (designator *PortalDesignator) GetKind() string {
_, _, kind, _, _ := designator.DigestPortalDesignator()
return kind
}
func (designator *PortalDesignator) GetName() string {
_, _, _, name, _ := designator.DigestPortalDesignator()
return name
}
func (designator *PortalDesignator) GetLabels() map[string]string {
_, _, _, _, labels := designator.DigestPortalDesignator()
return labels
}
// DigestPortalDesignator - get cluster namespace and labels from designator
func (designator *PortalDesignator) DigestPortalDesignator() (string, string, string, string, map[string]string) {
switch designator.DesignatorType.ToLower() {
case DesignatorAttributes.ToLower(), DesignatorAttribute.ToLower():
return designator.DigestAttributesDesignator()
case DesignatorWlid.ToLower(), DesignatorWildWlid.ToLower():
return cautils.GetClusterFromWlid(designator.WLID), cautils.GetNamespaceFromWlid(designator.WLID), cautils.GetKindFromWlid(designator.WLID), cautils.GetNameFromWlid(designator.WLID), map[string]string{}
// case DesignatorSid: // TODO
default:
glog.Warningf("in 'digestPortalDesignator' designator type: '%v' not yet supported. please contact Armo team", designator.DesignatorType)
}
return "", "", "", "", nil
}
func (designator *PortalDesignator) DigestAttributesDesignator() (string, string, string, string, map[string]string) {
cluster := ""
namespace := ""
kind := ""
name := ""
labels := map[string]string{}
attributes := designator.Attributes
if attributes == nil {
return cluster, namespace, kind, name, labels
}
for k, v := range attributes {
labels[k] = v
}
if v, ok := attributes[AttributeNamespace]; ok {
namespace = v
delete(labels, AttributeNamespace)
}
if v, ok := attributes[AttributeCluster]; ok {
cluster = v
delete(labels, AttributeCluster)
}
if v, ok := attributes[AttributeKind]; ok {
kind = v
delete(labels, AttributeKind)
}
if v, ok := attributes[AttributeName]; ok {
name = v
delete(labels, AttributeName)
}
return cluster, namespace, kind, name, labels
}
// DigestPortalDesignator DEPRECATED. use designator.DigestPortalDesignator() - get cluster namespace and labels from designator
func DigestPortalDesignator(designator *PortalDesignator) (string, string, map[string]string) {
switch designator.DesignatorType {
case DesignatorAttributes, DesignatorAttribute:
return DigestAttributesDesignator(designator.Attributes)
case DesignatorWlid, DesignatorWildWlid:
return cautils.GetClusterFromWlid(designator.WLID), cautils.GetNamespaceFromWlid(designator.WLID), map[string]string{}
// case DesignatorSid: // TODO
default:
glog.Warningf("in 'digestPortalDesignator' designator type: '%v' not yet supported. please contact Armo team", designator.DesignatorType)
}
return "", "", nil
}
func DigestAttributesDesignator(attributes map[string]string) (string, string, map[string]string) {
cluster := ""
namespace := ""
labels := map[string]string{}
if attributes == nil {
return cluster, namespace, labels
}
for k, v := range attributes {
labels[k] = v
}
if v, ok := attributes[AttributeNamespace]; ok {
namespace = v
delete(labels, AttributeNamespace)
}
if v, ok := attributes[AttributeCluster]; ok {
cluster = v
delete(labels, AttributeCluster)
}
return cluster, namespace, labels
}

View File

@@ -1,42 +0,0 @@
package armotypes
type PostureExceptionPolicyActions string
const AlertOnly PostureExceptionPolicyActions = "alertOnly"
const Disable PostureExceptionPolicyActions = "disable"
type PostureExceptionPolicy struct {
PortalBase `json:",inline"`
PolicyType string `json:"policyType"`
CreationTime string `json:"creationTime"`
Actions []PostureExceptionPolicyActions `json:"actions"`
Resources []PortalDesignator `json:"resources"`
PosturePolicies []PosturePolicy `json:"posturePolicies"`
}
type PosturePolicy struct {
FrameworkName string `json:"frameworkName"`
ControlName string `json:"controlName"`
RuleName string `json:"ruleName"`
}
func (exceptionPolicy *PostureExceptionPolicy) IsAlertOnly() bool {
if exceptionPolicy.IsDisable() {
return false
}
for i := range exceptionPolicy.Actions {
if exceptionPolicy.Actions[i] == AlertOnly {
return true
}
}
return false
}
func (exceptionPolicy *PostureExceptionPolicy) IsDisable() bool {
for i := range exceptionPolicy.Actions {
if exceptionPolicy.Actions[i] == Disable {
return true
}
}
return false
}

View File

@@ -1 +0,0 @@
package armotypes

View File

@@ -1,197 +0,0 @@
package cautils
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/golang/glog"
)
// labels added to the workload
const (
ArmoPrefix string = "armo"
ArmoAttach string = ArmoPrefix + ".attach"
ArmoInitialSecret string = ArmoPrefix + ".initial"
ArmoSecretStatus string = ArmoPrefix + ".secret"
ArmoCompatibleLabel string = ArmoPrefix + ".compatible"
ArmoSecretProtectStatus string = "protect"
ArmoSecretClearStatus string = "clear"
)
// annotations added to the workload
const (
ArmoUpdate string = ArmoPrefix + ".last-update"
ArmoWlid string = ArmoPrefix + ".wlid"
ArmoSid string = ArmoPrefix + ".sid"
ArmoJobID string = ArmoPrefix + ".job"
ArmoJobIDPath string = ArmoJobID + "/id"
ArmoJobParentPath string = ArmoJobID + "/parent"
ArmoJobActionPath string = ArmoJobID + "/action"
ArmoCompatibleAnnotation string = ArmoAttach + "/compatible"
ArmoReplaceheaders string = ArmoAttach + "/replaceheaders"
)
const ( // DEPRECATED
CAAttachLabel string = "cyberarmor"
Patched string = "Patched"
Done string = "Done"
Encrypted string = "Protected"
CAInjectOld = "injectCyberArmor"
CAPrefix string = "cyberarmor"
CAProtectedSecret string = CAPrefix + ".secret"
CAInitialSecret string = CAPrefix + ".initial"
CAInject string = CAPrefix + ".inject"
CAIgnore string = CAPrefix + ".ignore"
CAReplaceHeaders string = CAPrefix + ".removeSecurityHeaders"
)
const ( // DEPRECATED
CAUpdate string = CAPrefix + ".last-update"
CAStatus string = CAPrefix + ".status"
CAWlid string = CAPrefix + ".wlid"
)
type ClusterConfig struct {
EventReceiverREST string `json:"eventReceiverREST"`
EventReceiverWS string `json:"eventReceiverWS"`
MaserNotificationServer string `json:"maserNotificationServer"`
Postman string `json:"postman"`
Dashboard string `json:"dashboard"`
Portal string `json:"portal"`
CustomerGUID string `json:"customerGUID"`
ClusterGUID string `json:"clusterGUID"`
ClusterName string `json:"clusterName"`
OciImageURL string `json:"ociImageURL"`
NotificationWSURL string `json:"notificationWSURL"`
NotificationRestURL string `json:"notificationRestURL"`
VulnScanURL string `json:"vulnScanURL"`
OracleURL string `json:"oracleURL"`
ClairURL string `json:"clairURL"`
}
// represents workload basic info
type SpiffeBasicInfo struct {
//cluster/datacenter
Level0 string `json:"level0"`
Level0Type string `json:"level0Type"`
//namespace/project
Level1 string `json:"level0"`
Level1Type string `json:"level0Type"`
Kind string `json:"kind"`
Name string `json:"name"`
}
type ImageInfo struct {
Registry string `json:"registry"`
VersionImage string `json:"versionImage"`
}
func IsAttached(labels map[string]string) *bool {
attach := false
if labels == nil {
return nil
}
if attached, ok := labels[ArmoAttach]; ok {
if strings.ToLower(attached) == "true" {
attach = true
return &attach
} else {
return &attach
}
}
// deprecated
if _, ok := labels[CAAttachLabel]; ok {
attach = true
return &attach
}
// deprecated
if inject, ok := labels[CAInject]; ok {
if strings.ToLower(inject) == "true" {
attach = true
return &attach
}
}
// deprecated
if ignore, ok := labels[CAIgnore]; ok {
if strings.ToLower(ignore) == "true" {
return &attach
}
}
return nil
}
func IsSecretProtected(labels map[string]string) *bool {
protect := false
if labels == nil {
return nil
}
if protected, ok := labels[ArmoSecretStatus]; ok {
if strings.ToLower(protected) == ArmoSecretProtectStatus {
protect = true
return &protect
} else {
return &protect
}
}
return nil
}
func LoadConfig(configPath string, loadToEnv bool) (*ClusterConfig, error) {
if configPath == "" {
configPath = "/etc/config/clusterData.json"
}
dat, err := ioutil.ReadFile(configPath)
if err != nil || len(dat) == 0 {
return nil, fmt.Errorf("Config empty or not found. path: %s", configPath)
}
componentConfig := &ClusterConfig{}
if err := json.Unmarshal(dat, componentConfig); err != nil {
return componentConfig, fmt.Errorf("Failed to read component config, path: %s, reason: %s", configPath, err.Error())
}
if loadToEnv {
componentConfig.LoadConfigToEnv()
}
return componentConfig, nil
}
func (clusterConfig *ClusterConfig) LoadConfigToEnv() {
SetEnv("CA_CLUSTER_NAME", clusterConfig.ClusterName)
SetEnv("CA_CLUSTER_GUID", clusterConfig.ClusterGUID)
SetEnv("CA_ORACLE_SERVER", clusterConfig.OracleURL)
SetEnv("CA_CUSTOMER_GUID", clusterConfig.CustomerGUID)
SetEnv("CA_DASHBOARD_BACKEND", clusterConfig.Dashboard)
SetEnv("CA_NOTIFICATION_SERVER_REST", clusterConfig.NotificationWSURL)
SetEnv("CA_NOTIFICATION_SERVER_WS", clusterConfig.NotificationWSURL)
SetEnv("CA_NOTIFICATION_SERVER_REST", clusterConfig.NotificationRestURL)
SetEnv("CA_OCIMAGE_URL", clusterConfig.OciImageURL)
SetEnv("CA_K8S_REPORT_URL", clusterConfig.EventReceiverWS)
SetEnv("CA_EVENT_RECEIVER_HTTP", clusterConfig.EventReceiverREST)
SetEnv("CA_VULNSCAN", clusterConfig.VulnScanURL)
SetEnv("CA_POSTMAN", clusterConfig.Postman)
SetEnv("MASTER_NOTIFICATION_SERVER_HOST", clusterConfig.MaserNotificationServer)
SetEnv("CLAIR_URL", clusterConfig.ClairURL)
}
func SetEnv(key, value string) {
if e := os.Getenv(key); e == "" {
if err := os.Setenv(key, value); err != nil {
glog.Warning("%s: %s", key, err.Error())
}
}
}

View File

@@ -1,29 +0,0 @@
package cautils
import (
"testing"
)
// tests wlid parse
func TestSpiffeWLIDToInfoSuccess(t *testing.T) {
WLID := "wlid://cluster-HipsterShopCluster2/namespace-prod/deployment-cartservice"
ms, er := SpiffeToSpiffeInfo(WLID)
if er != nil || ms.Level0 != "HipsterShopCluster2" || ms.Level0Type != "cluster" || ms.Level1 != "prod" || ms.Level1Type != "namespace" ||
ms.Kind != "deployment" || ms.Name != "cartservice" {
t.Errorf("TestSpiffeWLIDToInfoSuccess failed to parse %v", WLID)
}
}
func TestSpiffeSIDInfoSuccess(t *testing.T) {
SID := "sid://cluster-HipsterShopCluster2/namespace-dev/secret-caregcred"
ms, er := SpiffeToSpiffeInfo(SID)
if er != nil || ms.Level0 != "HipsterShopCluster2" || ms.Level0Type != "cluster" || ms.Level1 != "dev" || ms.Level1Type != "namespace" ||
ms.Kind != "secret" || ms.Name != "caregcred" {
t.Errorf("TestSpiffeSIDInfoSuccess failed to parse %v", SID)
}
}

View File

@@ -1,118 +0,0 @@
package cautils
import (
"crypto/sha256"
"fmt"
"strings"
)
// wlid/ sid utils
const (
SpiffePrefix = "://"
)
// wlid/ sid utils
const (
PackagePath = "vendor/github.com/armosec/capacketsgo"
)
//AsSHA256 takes anything turns it into string :) https://blog.8bitzen.com/posts/22-08-2019-how-to-hash-a-struct-in-go
func AsSHA256(v interface{}) string {
h := sha256.New()
h.Write([]byte(fmt.Sprintf("%v", v)))
return fmt.Sprintf("%x", h.Sum(nil))
}
func SpiffeToSpiffeInfo(spiffe string) (*SpiffeBasicInfo, error) {
basicInfo := &SpiffeBasicInfo{}
pos := strings.Index(spiffe, SpiffePrefix)
if pos < 0 {
return nil, fmt.Errorf("invalid spiffe %s", spiffe)
}
pos += len(SpiffePrefix)
spiffeNoPrefix := spiffe[pos:]
splits := strings.Split(spiffeNoPrefix, "/")
if len(splits) < 3 {
return nil, fmt.Errorf("invalid spiffe %s", spiffe)
}
p0 := strings.Index(splits[0], "-")
p1 := strings.Index(splits[1], "-")
p2 := strings.Index(splits[2], "-")
if p0 == -1 || p1 == -1 || p2 == -1 {
return nil, fmt.Errorf("invalid spiffe %s", spiffe)
}
basicInfo.Level0Type = splits[0][:p0]
basicInfo.Level0 = splits[0][p0+1:]
basicInfo.Level1Type = splits[1][:p1]
basicInfo.Level1 = splits[1][p1+1:]
basicInfo.Kind = splits[2][:p2]
basicInfo.Name = splits[2][p2+1:]
return basicInfo, nil
}
func ImageTagToImageInfo(imageTag string) (*ImageInfo, error) {
ImageInfo := &ImageInfo{}
spDelimiter := "/"
pos := strings.Index(imageTag, spDelimiter)
if pos < 0 {
ImageInfo.Registry = ""
ImageInfo.VersionImage = imageTag
return ImageInfo, nil
}
splits := strings.Split(imageTag, spDelimiter)
if len(splits) == 0 {
return nil, fmt.Errorf("Invalid image info %s", imageTag)
}
ImageInfo.Registry = splits[0]
if len(splits) > 1 {
ImageInfo.VersionImage = splits[len(splits)-1]
} else {
ImageInfo.VersionImage = ""
}
return ImageInfo, nil
}
func BoolPointer(b bool) *bool { return &b }
func BoolToString(b bool) string {
if b {
return "true"
}
return "false"
}
func BoolPointerToString(b *bool) string {
if b == nil {
return ""
}
if *b {
return "true"
}
return "false"
}
func StringToBool(s string) bool {
if strings.ToLower(s) == "true" || strings.ToLower(s) == "1" {
return true
}
return false
}
func StringToBoolPointer(s string) *bool {
if strings.ToLower(s) == "true" || strings.ToLower(s) == "1" {
return BoolPointer(true)
}
if strings.ToLower(s) == "false" || strings.ToLower(s) == "0" {
return BoolPointer(false)
}
return nil
}

View File

@@ -1,52 +0,0 @@
package cautils
import (
"fmt"
"hash/fnv"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var NamespacesListToIgnore = make([]string, 0)
var KubeNamespaces = []string{metav1.NamespaceSystem, metav1.NamespacePublic}
// NamespacesListToIgnore namespaces to ignore if a pod
func InitNamespacesListToIgnore(caNamespace string) {
if len(NamespacesListToIgnore) > 0 {
return
}
NamespacesListToIgnore = append(NamespacesListToIgnore, KubeNamespaces...)
NamespacesListToIgnore = append(NamespacesListToIgnore, caNamespace)
}
func IfIgnoreNamespace(ns string) bool {
for i := range NamespacesListToIgnore {
if NamespacesListToIgnore[i] == ns {
return true
}
}
return false
}
func IfKubeNamespace(ns string) bool {
for i := range KubeNamespaces {
if NamespacesListToIgnore[i] == ns {
return true
}
}
return false
}
func hash(s string) string {
h := fnv.New32a()
h.Write([]byte(s))
return fmt.Sprintf("%d", h.Sum32())
}
func GenarateConfigMapName(wlid string) string {
name := strings.ToLower(fmt.Sprintf("ca-%s-%s-%s", GetNamespaceFromWlid(wlid), GetKindFromWlid(wlid), GetNameFromWlid(wlid)))
if len(name) >= 63 {
name = hash(name)
}
return name
}

View File

@@ -1,238 +0,0 @@
package cautils
import (
"fmt"
"strings"
)
// API fields
var (
WlidPrefix = "wlid://"
SidPrefix = "sid://"
ClusterWlidPrefix = "cluster-"
NamespaceWlidPrefix = "namespace-"
DataCenterWlidPrefix = "datacenter-"
ProjectWlidPrefix = "project-"
SecretSIDPrefix = "secret-"
SubSecretSIDPrefix = "subsecret-"
K8SKindsList = []string{"ComponentStatus", "ConfigMap", "ControllerRevision", "CronJob",
"CustomResourceDefinition", "DaemonSet", "Deployment", "Endpoints", "Event", "HorizontalPodAutoscaler",
"Ingress", "Job", "Lease", "LimitRange", "LocalSubjectAccessReview", "MutatingWebhookConfiguration",
"Namespace", "NetworkPolicy", "Node", "PersistentVolume", "PersistentVolumeClaim", "Pod",
"PodDisruptionBudget", "PodSecurityPolicy", "PodTemplate", "PriorityClass", "ReplicaSet",
"ReplicationController", "ResourceQuota", "Role", "RoleBinding", "Secret", "SelfSubjectAccessReview",
"SelfSubjectRulesReview", "Service", "ServiceAccount", "StatefulSet", "StorageClass",
"SubjectAccessReview", "TokenReview", "ValidatingWebhookConfiguration", "VolumeAttachment"}
NativeKindsList = []string{"Dockerized", "Native"}
KindReverseMap = map[string]string{}
dataImagesList = []string{}
)
func IsWlid(id string) bool {
return strings.HasPrefix(id, WlidPrefix)
}
func IsSid(id string) bool {
return strings.HasPrefix(id, SidPrefix)
}
// GetK8SKindFronList get the calculated wlid
func GetK8SKindFronList(kind string) string { // TODO GetK8SKindFromList
for i := range K8SKindsList {
if strings.ToLower(kind) == strings.ToLower(K8SKindsList[i]) {
return K8SKindsList[i]
}
}
return kind
}
// IsK8SKindInList Check if the kind is a known kind
func IsK8SKindInList(kind string) bool {
for i := range K8SKindsList {
if strings.ToLower(kind) == strings.ToLower(K8SKindsList[i]) {
return true
}
}
return false
}
// generateWLID
func generateWLID(pLevel0, level0, pLevel1, level1, k, name string) string {
kind := strings.ToLower(k)
kind = strings.Replace(kind, "-", "", -1)
wlid := WlidPrefix
wlid += fmt.Sprintf("%s%s", pLevel0, level0)
if level1 == "" {
return wlid
}
wlid += fmt.Sprintf("/%s%s", pLevel1, level1)
if kind == "" {
return wlid
}
wlid += fmt.Sprintf("/%s", kind)
if name == "" {
return wlid
}
wlid += fmt.Sprintf("-%s", name)
return wlid
}
// GetWLID get the calculated wlid
func GetWLID(level0, level1, k, name string) string {
return generateWLID(ClusterWlidPrefix, level0, NamespaceWlidPrefix, level1, k, name)
}
// GetK8sWLID get the k8s calculated wlid
func GetK8sWLID(level0, level1, k, name string) string {
return generateWLID(ClusterWlidPrefix, level0, NamespaceWlidPrefix, level1, k, name)
}
// GetNativeWLID get the native calculated wlid
func GetNativeWLID(level0, level1, k, name string) string {
return generateWLID(DataCenterWlidPrefix, level0, ProjectWlidPrefix, level1, k, name)
}
// WildWlidContainsWlid does WildWlid contains Wlid
func WildWlidContainsWlid(wildWlid, wlid string) bool { // TODO- test
if wildWlid == wlid {
return true
}
wildWlidR, _ := RestoreMicroserviceIDsFromSpiffe(wildWlid)
wlidR, _ := RestoreMicroserviceIDsFromSpiffe(wlid)
if len(wildWlidR) > len(wildWlidR) {
// invalid wlid
return false
}
for i := range wildWlidR {
if wildWlidR[i] != wlidR[i] {
return false
}
}
return true
}
func restoreInnerIdentifiersFromID(spiffeSlices []string) []string {
if len(spiffeSlices) >= 1 && strings.HasPrefix(spiffeSlices[0], ClusterWlidPrefix) {
spiffeSlices[0] = spiffeSlices[0][len(ClusterWlidPrefix):]
}
if len(spiffeSlices) >= 2 && strings.HasPrefix(spiffeSlices[1], NamespaceWlidPrefix) {
spiffeSlices[1] = spiffeSlices[1][len(NamespaceWlidPrefix):]
}
if len(spiffeSlices) >= 3 && strings.Contains(spiffeSlices[2], "-") {
dashIdx := strings.Index(spiffeSlices[2], "-")
spiffeSlices = append(spiffeSlices, spiffeSlices[2][dashIdx+1:])
spiffeSlices[2] = spiffeSlices[2][:dashIdx]
if val, ok := KindReverseMap[spiffeSlices[2]]; ok {
spiffeSlices[2] = val
}
}
return spiffeSlices
}
// RestoreMicroserviceIDsFromSpiffe -
func RestoreMicroserviceIDsFromSpiffe(spiffe string) ([]string, error) {
if spiffe == "" {
return nil, fmt.Errorf("in RestoreMicroserviceIDsFromSpiffe, expecting valid wlid recieved empty string")
}
if StringHasWhitespace(spiffe) {
return nil, fmt.Errorf("wlid %s invalid. whitespace found", spiffe)
}
if strings.HasPrefix(spiffe, WlidPrefix) {
spiffe = spiffe[len(WlidPrefix):]
} else if strings.HasPrefix(spiffe, SidPrefix) {
spiffe = spiffe[len(SidPrefix):]
}
spiffeSlices := strings.Split(spiffe, "/")
// The documented WLID format (https://cyberarmorio.sharepoint.com/sites/development2/Shared%20Documents/kubernetes_design1.docx?web=1)
if len(spiffeSlices) <= 3 {
spiffeSlices = restoreInnerIdentifiersFromID(spiffeSlices)
}
if len(spiffeSlices) != 4 { // first used WLID, deprecated since 24.10.2019
return spiffeSlices, fmt.Errorf("invalid WLID format. format received: %v", spiffeSlices)
}
for i := range spiffeSlices {
if spiffeSlices[i] == "" {
return spiffeSlices, fmt.Errorf("one or more entities are empty, spiffeSlices: %v", spiffeSlices)
}
}
return spiffeSlices, nil
}
// RestoreMicroserviceIDsFromSpiffe -
func RestoreMicroserviceIDs(spiffe string) []string {
if spiffe == "" {
return []string{}
}
if StringHasWhitespace(spiffe) {
return []string{}
}
if strings.HasPrefix(spiffe, WlidPrefix) {
spiffe = spiffe[len(WlidPrefix):]
} else if strings.HasPrefix(spiffe, SidPrefix) {
spiffe = spiffe[len(SidPrefix):]
}
spiffeSlices := strings.Split(spiffe, "/")
return restoreInnerIdentifiersFromID(spiffeSlices)
}
// GetClusterFromWlid parse wlid and get cluster
func GetClusterFromWlid(wlid string) string {
r := RestoreMicroserviceIDs(wlid)
if len(r) >= 1 {
return r[0]
}
return ""
}
// GetNamespaceFromWlid parse wlid and get Namespace
func GetNamespaceFromWlid(wlid string) string {
r := RestoreMicroserviceIDs(wlid)
if len(r) >= 2 {
return r[1]
}
return ""
}
// GetKindFromWlid parse wlid and get kind
func GetKindFromWlid(wlid string) string {
r := RestoreMicroserviceIDs(wlid)
if len(r) >= 3 {
return GetK8SKindFronList(r[2])
}
return ""
}
// GetNameFromWlid parse wlid and get name
func GetNameFromWlid(wlid string) string {
r := RestoreMicroserviceIDs(wlid)
if len(r) >= 4 {
return GetK8SKindFronList(r[3])
}
return ""
}
// IsWlidValid test if wlid is a valid wlid
func IsWlidValid(wlid string) error {
_, err := RestoreMicroserviceIDsFromSpiffe(wlid)
return err
}
// StringHasWhitespace check if a string has whitespace
func StringHasWhitespace(str string) bool {
if whitespace := strings.Index(str, " "); whitespace != -1 {
return true
}
return false
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"os"
"strings"
@@ -12,15 +11,21 @@ import (
"github.com/armosec/kubescape/cautils/getter"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/armosec/kubescape/cautils/k8sinterface"
"github.com/armosec/k8s-interface/k8sinterface"
corev1 "k8s.io/api/core/v1"
)
const (
configMapName = "kubescape"
ConfigFileName = "config"
configFileName = "config"
)
func ConfigFileFullPath() string { return getter.GetDefaultPath(configFileName + ".json") }
// ======================================================================================
// =============================== Config structure =====================================
// ======================================================================================
type ConfigObj struct {
CustomerGUID string `json:"customerGUID"`
Token string `json:"invitationParam"`
@@ -34,62 +39,117 @@ func (co *ConfigObj) Json() []byte {
return []byte{}
}
// ======================================================================================
// =============================== interface ============================================
// ======================================================================================
type IClusterConfig interface {
SetCustomerGUID() error
// setters
SetCustomerGUID(customerGUID string) error
// getters
GetCustomerGUID() string
GetConfigObj() *ConfigObj
GetK8sAPI() *k8sinterface.KubernetesApi
GetBackendAPI() getter.IBackend
GetDefaultNS() string
GenerateURL()
}
type ClusterConfig struct {
k8s *k8sinterface.KubernetesApi
defaultNS string
armoAPI *getter.ArmoAPI
configObj *ConfigObj
// ClusterConfigSetup - Setup the desired cluster behavior regarding submittion to the Armo BE
func ClusterConfigSetup(scanInfo *ScanInfo, k8s *k8sinterface.KubernetesApi, beAPI getter.IBackend) IClusterConfig {
/*
If "First run (local config not found)" -
Default - Do not send report (local)
Local - Do not send report
Submit - Create tenant & Submit report
If "Submitted but not signed up" -
Default - Delete local config & Do not send report (local)
Local - Delete local config & Do not send report
Submit - Submit report
If "Signed up user" -
Default - Submit report (submit)
Local - Do not send report
Submit - Submit report
*/
clusterConfig := NewClusterConfig(k8s, beAPI)
clusterConfig.LoadConfig()
if !IsSubmitted(clusterConfig) {
if scanInfo.Submit {
return clusterConfig // submit - Create tenant & Submit report
}
return NewEmptyConfig() // local/default - Do not send report
}
if !IsRegistered(clusterConfig) {
if scanInfo.Submit {
return clusterConfig // submit/default - Submit report
}
DeleteConfig(k8s)
return NewEmptyConfig() // local - Delete local config & Do not send report
}
if scanInfo.Local {
return NewEmptyConfig() // local - Do not send report
}
return clusterConfig // submit/default - Submit report
}
// ======================================================================================
// ============================= Mock Config ============================================
// ======================================================================================
type EmptyConfig struct {
}
func NewEmptyConfig() *EmptyConfig { return &EmptyConfig{} }
func (c *EmptyConfig) GetConfigObj() *ConfigObj { return &ConfigObj{} }
func (c *EmptyConfig) SetCustomerGUID(customerGUID string) error { return nil }
func (c *EmptyConfig) GetCustomerGUID() string { return "" }
func (c *EmptyConfig) GetK8sAPI() *k8sinterface.KubernetesApi { return nil } // TODO: return mock obj
func (c *EmptyConfig) GetDefaultNS() string { return k8sinterface.GetDefaultNamespace() }
func (c *EmptyConfig) GetBackendAPI() getter.IBackend { return nil } // TODO: return mock obj
func (c *EmptyConfig) GenerateURL() {
message := fmt.Sprintf("You can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here: https://%s", getter.GetArmoAPIConnector().GetFrontendURL())
InfoTextDisplay(os.Stdout, message+"\n")
}
func (c *EmptyConfig) SetCustomerGUID() error {
return nil
// ======================================================================================
// ========================== Cluster Config ============================================
// ======================================================================================
type ClusterConfig struct {
k8s *k8sinterface.KubernetesApi
defaultNS string
backendAPI getter.IBackend
configObj *ConfigObj
}
func (c *EmptyConfig) GetCustomerGUID() string {
return ""
}
func NewEmptyConfig() *EmptyConfig {
return &EmptyConfig{}
}
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, armoAPI *getter.ArmoAPI) *ClusterConfig {
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend) *ClusterConfig {
return &ClusterConfig{
k8s: k8s,
armoAPI: armoAPI,
defaultNS: k8sinterface.GetDefaultNamespace(),
k8s: k8s,
backendAPI: backendAPI,
configObj: &ConfigObj{},
defaultNS: k8sinterface.GetDefaultNamespace(),
}
}
func createConfigJson() {
ioutil.WriteFile(getter.GetDefaultPath(ConfigFileName+".json"), nil, 0664)
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
func (c *ClusterConfig) GetK8sAPI() *k8sinterface.KubernetesApi { return c.k8s }
func (c *ClusterConfig) GetDefaultNS() string { return c.defaultNS }
func (c *ClusterConfig) GetBackendAPI() getter.IBackend { return c.backendAPI }
}
func update(configObj *ConfigObj) {
ioutil.WriteFile(getter.GetDefaultPath(ConfigFileName+".json"), configObj.Json(), 0664)
}
func (c *ClusterConfig) GenerateURL() {
u := url.URL{}
u.Scheme = "https"
u.Host = getter.ArmoFEURL
u.Host = getter.GetArmoAPIConnector().GetFrontendURL()
if c.configObj == nil {
return
}
message := fmt.Sprintf("You can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here: %s", u.String())
if c.configObj.CustomerAdminEMail != "" {
msgStr := fmt.Sprintf("To view all controls and get remediations ask access permissions to %s from %s", u.String(), c.configObj.CustomerAdminEMail)
InfoTextDisplay(os.Stdout, msgStr+"\n")
InfoTextDisplay(os.Stdout, message+"\n")
return
}
u.Path = "account/sign-up"
@@ -98,8 +158,7 @@ func (c *ClusterConfig) GenerateURL() {
q.Add("customerGUID", c.configObj.CustomerGUID)
u.RawQuery = q.Encode()
fmt.Println("To view all controls and get remediations visit:")
InfoTextDisplay(os.Stdout, u.String()+"\n")
InfoTextDisplay(os.Stdout, message+"\n")
}
@@ -110,6 +169,82 @@ func (c *ClusterConfig) GetCustomerGUID() string {
return ""
}
func (c *ClusterConfig) SetCustomerGUID(customerGUID string) error {
if customerGUID != "" && c.GetCustomerGUID() != customerGUID {
c.configObj.CustomerGUID = customerGUID // override config customerGUID
}
customerGUID = c.GetCustomerGUID()
// get from armoBE
tenantResponse, err := c.backendAPI.GetCustomerGUID(customerGUID)
if err == nil && tenantResponse != nil {
if tenantResponse.AdminMail != "" { // this customer already belongs to some user
c.configObj.CustomerAdminEMail = tenantResponse.AdminMail
} else {
c.configObj.Token = tenantResponse.Token
c.configObj.CustomerGUID = tenantResponse.TenantID
}
} else {
if err != nil && !strings.Contains(err.Error(), "already exists") {
return err
}
}
// update/create config
if c.existsConfigMap() {
c.updateConfigMap()
} else {
c.createConfigMap()
}
if existsConfigFile() {
c.updateConfigFile()
} else {
c.createConfigFile()
}
return nil
}
func (c *ClusterConfig) LoadConfig() {
// get from configMap
if c.existsConfigMap() {
c.configObj, _ = c.loadConfigFromConfigMap()
} else if existsConfigFile() { // get from file
c.configObj, _ = loadConfigFromFile()
} else {
c.configObj = &ConfigObj{}
}
}
func (c *ClusterConfig) ToMapString() map[string]interface{} {
m := map[string]interface{}{}
bc, _ := json.Marshal(c.configObj)
json.Unmarshal(bc, &m)
return m
}
func (c *ClusterConfig) loadConfigFromConfigMap() (*ConfigObj, error) {
if c.k8s == nil {
return nil, nil
}
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
if err != nil {
return nil, err
}
if bData, err := json.Marshal(configMap.Data); err == nil {
return readConfig(bData)
}
return nil, nil
}
func (c *ClusterConfig) existsConfigMap() bool {
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
// TODO - check if has customerGUID
return err == nil
}
func (c *ClusterConfig) GetValueByKeyFromConfigMap(key string) (string, error) {
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
@@ -122,16 +257,17 @@ func (c *ClusterConfig) GetValueByKeyFromConfigMap(key string) (string, error) {
} else {
return "", fmt.Errorf("value does not exist")
}
}
func GetValueFromConfigJson(key string) (string, error) {
data, err := ioutil.ReadFile(getter.GetDefaultPath(ConfigFileName + ".json"))
data, err := os.ReadFile(ConfigFileFullPath())
if err != nil {
return "", err
}
var obj map[string]interface{}
err = json.Unmarshal(data, &obj)
if err := json.Unmarshal(data, &obj); err != nil {
return "", err
}
if val, ok := obj[key]; ok {
return fmt.Sprint(val), nil
} else {
@@ -141,7 +277,7 @@ func GetValueFromConfigJson(key string) (string, error) {
}
func SetKeyValueInConfigJson(key string, value string) error {
data, err := ioutil.ReadFile(getter.GetDefaultPath(ConfigFileName + ".json"))
data, err := os.ReadFile(ConfigFileFullPath())
if err != nil {
return err
}
@@ -157,7 +293,7 @@ func SetKeyValueInConfigJson(key string, value string) error {
return err
}
return ioutil.WriteFile(getter.GetDefaultPath(ConfigFileName+".json"), newData, 0664)
return os.WriteFile(ConfigFileFullPath(), newData, 0664)
}
@@ -187,76 +323,11 @@ func (c *ClusterConfig) SetKeyValueInConfigmap(key string, value string) error {
return err
}
func (c *ClusterConfig) SetCustomerGUID() error {
// get from file
if existsConfigJson() {
c.configObj, _ = loadConfigFromFile()
} else if c.existsConfigMap() {
c.configObj, _ = c.loadConfigFromConfigMap()
} else {
c.createConfigMap()
createConfigJson()
}
customerGUID := c.GetCustomerGUID()
// get from armoBE
tenantResponse, err := c.armoAPI.GetCustomerGUID(customerGUID)
if err == nil && tenantResponse != nil {
if tenantResponse.AdminMail != "" { // this customer already belongs to some user
if existsConfigJson() {
update(&ConfigObj{CustomerGUID: customerGUID, CustomerAdminEMail: tenantResponse.AdminMail})
}
if c.existsConfigMap() {
c.configObj.CustomerAdminEMail = tenantResponse.AdminMail
c.updateConfigMap()
}
} else {
if existsConfigJson() {
update(&ConfigObj{CustomerGUID: tenantResponse.TenantID, Token: tenantResponse.Token})
}
if c.existsConfigMap() {
c.configObj = &ConfigObj{CustomerGUID: tenantResponse.TenantID, Token: tenantResponse.Token}
c.updateConfigMap()
}
}
} else {
if err != nil && strings.Contains(err.Error(), "Invitation for tenant already exists") {
return nil
}
return err
}
return nil
}
func (c *ClusterConfig) loadConfigFromConfigMap() (*ConfigObj, error) {
if c.k8s == nil {
return nil, nil
}
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
if err != nil {
return nil, err
}
if bData, err := json.Marshal(configMap.Data); err == nil {
return readConfig(bData)
}
return nil, nil
}
func (c *ClusterConfig) existsConfigMap() bool {
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
func existsConfigFile() bool {
_, err := os.ReadFile(ConfigFileFullPath())
return err == nil
}
func existsConfigJson() bool {
_, err := ioutil.ReadFile(getter.GetDefaultPath(ConfigFileName + ".json"))
return err == nil
}
func (c *ClusterConfig) createConfigMap() error {
if c.k8s == nil {
return nil
@@ -288,6 +359,20 @@ func (c *ClusterConfig) updateConfigMap() error {
return err
}
func (c *ClusterConfig) updateConfigFile() error {
if err := os.WriteFile(ConfigFileFullPath(), c.configObj.Json(), 0664); err != nil {
return err
}
return nil
}
func (c *ClusterConfig) createConfigFile() error {
if err := os.WriteFile(ConfigFileFullPath(), c.configObj.Json(), 0664); err != nil {
return err
}
return nil
}
func (c *ClusterConfig) updateConfigData(configMap *corev1.ConfigMap) {
if len(configMap.Data) == 0 {
configMap.Data = make(map[string]string)
@@ -300,7 +385,7 @@ func (c *ClusterConfig) updateConfigData(configMap *corev1.ConfigMap) {
}
}
func loadConfigFromFile() (*ConfigObj, error) {
dat, err := ioutil.ReadFile(getter.GetDefaultPath(ConfigFileName + ".json"))
dat, err := os.ReadFile(ConfigFileFullPath())
if err != nil {
return nil, err
}
@@ -317,9 +402,38 @@ func readConfig(dat []byte) (*ConfigObj, error) {
return configObj, err
}
func (c *ClusterConfig) ToMapString() map[string]interface{} {
m := map[string]interface{}{}
bc, _ := json.Marshal(c.configObj)
json.Unmarshal(bc, &m)
return m
// Check if the customer is submitted
func IsSubmitted(clusterConfig *ClusterConfig) bool {
return clusterConfig.existsConfigMap() || existsConfigFile()
}
// Check if the customer is registered
func IsRegistered(clusterConfig *ClusterConfig) bool {
// get from armoBE
tenantResponse, err := clusterConfig.backendAPI.GetCustomerGUID(clusterConfig.GetCustomerGUID())
if err == nil && tenantResponse != nil {
if tenantResponse.AdminMail != "" { // this customer already belongs to some user
return true
}
}
return false
}
func DeleteConfig(k8s *k8sinterface.KubernetesApi) error {
if err := DeleteConfigMap(k8s); err != nil {
return err
}
if err := DeleteConfigFile(); err != nil {
return err
}
return nil
}
func DeleteConfigMap(k8s *k8sinterface.KubernetesApi) error {
return k8s.KubernetesClient.CoreV1().ConfigMaps(k8sinterface.GetDefaultNamespace()).Delete(context.Background(), configMapName, metav1.DeleteOptions{})
}
func DeleteConfigFile() error {
return os.Remove(ConfigFileFullPath())
}

View File

@@ -1,25 +1,25 @@
package cautils
import (
"github.com/armosec/kubescape/cautils/armotypes"
"github.com/armosec/kubescape/cautils/opapolicy"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/opa-utils/reporthandling"
)
// K8SResources map[<api group>/<api version>/<resource>]<resource object>
type K8SResources map[string]interface{}
type OPASessionObj struct {
Frameworks []opapolicy.Framework
Frameworks []reporthandling.Framework
K8SResources *K8SResources
Exceptions []armotypes.PostureExceptionPolicy
PostureReport *opapolicy.PostureReport
PostureReport *reporthandling.PostureReport
}
func NewOPASessionObj(frameworks []opapolicy.Framework, k8sResources *K8SResources) *OPASessionObj {
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources) *OPASessionObj {
return &OPASessionObj{
Frameworks: frameworks,
K8SResources: k8sResources,
PostureReport: &opapolicy.PostureReport{
PostureReport: &reporthandling.PostureReport{
ClusterName: ClusterName,
CustomerGUID: CustomerGUID,
},
@@ -30,7 +30,7 @@ func NewOPASessionObjMock() *OPASessionObj {
return &OPASessionObj{
Frameworks: nil,
K8SResources: nil,
PostureReport: &opapolicy.PostureReport{
PostureReport: &reporthandling.PostureReport{
ClusterName: "",
CustomerGUID: "",
ReportID: "",
@@ -44,8 +44,8 @@ type ComponentConfig struct {
}
type Exception struct {
Ignore *bool `json:"ignore"` // ignore test results
MultipleScore *opapolicy.AlertScore `json:"multipleScore"` // MultipleScore number - float32
Namespaces []string `json:"namespaces"`
Regex string `json:"regex"` // not supported
Ignore *bool `json:"ignore"` // ignore test results
MultipleScore *reporthandling.AlertScore `json:"multipleScore"` // MultipleScore number - float32
Namespaces []string `json:"namespaces"`
Regex string `json:"regex"` // not supported
}

View File

@@ -24,8 +24,8 @@ var FailureDisplay = color.New(color.Bold, color.FgHiRed).FprintfFunc()
var WarningDisplay = color.New(color.Bold, color.FgCyan).FprintfFunc()
var FailureTextDisplay = color.New(color.Faint, color.FgHiRed).FprintfFunc()
var InfoDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
var InfoTextDisplay = color.New(color.Faint, color.FgHiYellow).FprintfFunc()
var SimpleDisplay = color.New(color.Bold, color.FgHiWhite).FprintfFunc()
var InfoTextDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
var SimpleDisplay = color.New().FprintfFunc()
var SuccessDisplay = color.New(color.Bold, color.FgHiGreen).FprintfFunc()
var DescriptionDisplay = color.New(color.Faint, color.FgWhite).FprintfFunc()

View File

@@ -3,9 +3,11 @@ package getter
import (
"fmt"
"net/http"
"time"
"github.com/armosec/kubescape/cautils/armotypes"
"github.com/armosec/kubescape/cautils/opapolicy"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/opa-utils/reporthandling"
"github.com/golang/glog"
)
// =======================================================================================================================
@@ -15,29 +17,87 @@ import (
var (
// ATTENTION!!!
// Changes in this URLs variable names, or in the usage is affecting the build process! BE CAREFULL
ArmoBEURL = "eggdashbe.eudev3.cyberarmorsoft.com"
ArmoERURL = "report.eudev3.cyberarmorsoft.com"
ArmoFEURL = "armoui.eudev3.cyberarmorsoft.com"
// ArmoURL = "https://dashbe.euprod1.cyberarmorsoft.com"
armoERURL = "report.armo.cloud"
armoBEURL = "api.armo.cloud"
armoFEURL = "portal.armo.cloud"
armoDevERURL = "report.eudev3.cyberarmorsoft.com"
armoDevBEURL = "eggdashbe.eudev3.cyberarmorsoft.com"
armoDevFEURL = "armoui.eudev3.cyberarmorsoft.com"
)
// Armo API for downloading policies
type ArmoAPI struct {
httpClient *http.Client
apiURL string
erURL string
feURL string
}
func NewArmoAPI() *ArmoAPI {
var globalArmoAPIConnecctor *ArmoAPI
func SetARMOAPIConnector(armoAPI *ArmoAPI) {
globalArmoAPIConnecctor = armoAPI
}
func GetArmoAPIConnector() *ArmoAPI {
if globalArmoAPIConnecctor == nil {
glog.Error("returning nil API connector")
}
return globalArmoAPIConnecctor
}
func NewARMOAPIDev() *ArmoAPI {
apiObj := newArmoAPI()
apiObj.apiURL = armoDevBEURL
apiObj.erURL = armoDevERURL
apiObj.feURL = armoDevFEURL
return apiObj
}
func NewARMOAPIProd() *ArmoAPI {
apiObj := newArmoAPI()
apiObj.apiURL = armoBEURL
apiObj.erURL = armoERURL
apiObj.feURL = armoFEURL
return apiObj
}
func NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL string) *ArmoAPI {
apiObj := newArmoAPI()
apiObj.erURL = armoERURL
apiObj.apiURL = armoBEURL
apiObj.feURL = armoFEURL
return apiObj
}
func newArmoAPI() *ArmoAPI {
return &ArmoAPI{
httpClient: &http.Client{},
httpClient: &http.Client{Timeout: time.Duration(61) * time.Second},
}
}
func (armoAPI *ArmoAPI) GetFramework(name string) (*opapolicy.Framework, error) {
func (armoAPI *ArmoAPI) GetFrontendURL() string {
return armoAPI.feURL
}
func (armoAPI *ArmoAPI) GetReportReceiverURL() string {
return armoAPI.erURL
}
func (armoAPI *ArmoAPI) GetFramework(name string) (*reporthandling.Framework, error) {
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getFrameworkURL(name))
if err != nil {
return nil, err
}
framework := &opapolicy.Framework{}
framework := &reporthandling.Framework{}
if err = JSONDecoder(respStr).Decode(framework); err != nil {
return nil, err
}

View File

@@ -8,7 +8,7 @@ import (
func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
u := url.URL{}
u.Scheme = "https"
u.Host = ArmoBEURL
u.Host = armoAPI.apiURL
u.Path = "v1/armoFrameworks"
q := u.Query()
q.Add("customerGUID", "11111111-1111-1111-1111-111111111111")
@@ -22,7 +22,7 @@ func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
func (armoAPI *ArmoAPI) getExceptionsURL(customerGUID, clusterName string) string {
u := url.URL{}
u.Scheme = "https"
u.Host = ArmoBEURL
u.Host = armoAPI.apiURL
u.Path = "api/v1/armoPostureExceptions"
q := u.Query()
@@ -38,7 +38,7 @@ func (armoAPI *ArmoAPI) getExceptionsURL(customerGUID, clusterName string) strin
func (armoAPI *ArmoAPI) getCustomerURL() string {
u := url.URL{}
u.Scheme = "https"
u.Host = ArmoBEURL
u.Host = armoAPI.apiURL
u.Path = "api/v1/createTenant"
return u.String()
}

View File

@@ -3,10 +3,11 @@ package getter
import (
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"time"
"github.com/armosec/kubescape/cautils/opapolicy"
"github.com/armosec/opa-utils/reporthandling"
)
// =======================================================================================================================
@@ -22,11 +23,11 @@ type DownloadReleasedPolicy struct {
func NewDownloadReleasedPolicy() *DownloadReleasedPolicy {
return &DownloadReleasedPolicy{
hostURL: "",
httpClient: &http.Client{},
httpClient: &http.Client{Timeout: 61 * time.Second},
}
}
func (drp *DownloadReleasedPolicy) GetFramework(name string) (*opapolicy.Framework, error) {
func (drp *DownloadReleasedPolicy) GetFramework(name string) (*reporthandling.Framework, error) {
if err := drp.setURL(name); err != nil {
return nil, err
}
@@ -35,7 +36,7 @@ func (drp *DownloadReleasedPolicy) GetFramework(name string) (*opapolicy.Framewo
return nil, err
}
framework := &opapolicy.Framework{}
framework := &reporthandling.Framework{}
if err = JSONDecoder(respStr).Decode(framework); err != nil {
return framework, err
}
@@ -56,7 +57,7 @@ func (drp *DownloadReleasedPolicy) setURL(frameworkName string) error {
return fmt.Errorf("failed to download file, status code: %s", resp.Status)
}
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read response body from '%s', reason: %s", latestReleases, err.Error())
}

View File

@@ -1,14 +1,17 @@
package getter
import (
"github.com/armosec/kubescape/cautils/armotypes"
"github.com/armosec/kubescape/cautils/opapolicy"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/opa-utils/reporthandling"
)
type IPolicyGetter interface {
GetFramework(name string) (*opapolicy.Framework, error)
GetFramework(name string) (*reporthandling.Framework, error)
}
type IExceptionsGetter interface {
GetExceptions(customerGUID, clusterName string) ([]armotypes.PostureExceptionPolicy, error)
}
type IBackend interface {
GetCustomerGUID(customerGUID string) (*TenantResponse, error)
}

View File

@@ -5,12 +5,12 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"github.com/armosec/kubescape/cautils/opapolicy"
"github.com/armosec/opa-utils/reporthandling"
)
func GetDefaultPath(name string) string {
@@ -21,14 +21,26 @@ func GetDefaultPath(name string) string {
return defaultfilePath
}
func SaveFrameworkInFile(framework *opapolicy.Framework, path string) error {
func SaveFrameworkInFile(framework *reporthandling.Framework, pathStr string) error {
encodedData, err := json.Marshal(framework)
if err != nil {
return err
}
err = os.WriteFile(path, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
if err != nil {
return err
if os.IsNotExist(err) {
pathDir := path.Dir(pathStr)
if err := os.Mkdir(pathDir, 0744); err != nil {
return err
}
} else {
return err
}
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
if err != nil {
return err
}
}
return nil
}
@@ -86,29 +98,3 @@ func httpRespToString(resp *http.Response) (string, error) {
return respStr, err
}
// URLEncoder encode url
func urlEncoder(oldURL string) string {
fullURL := strings.Split(oldURL, "?")
baseURL, err := url.Parse(fullURL[0])
if err != nil {
return ""
}
// Prepare Query Parameters
if len(fullURL) > 1 {
params := url.Values{}
queryParams := strings.Split(fullURL[1], "&")
for _, i := range queryParams {
queryParam := strings.Split(i, "=")
val := ""
if len(queryParam) > 1 {
val = queryParam[1]
}
params.Add(queryParam[0], val)
}
baseURL.RawQuery = params.Encode()
}
return baseURL.String()
}

View File

@@ -3,11 +3,11 @@ package getter
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/armosec/kubescape/cautils/armotypes"
"github.com/armosec/kubescape/cautils/opapolicy"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/opa-utils/reporthandling"
)
// =======================================================================================================================
@@ -26,10 +26,10 @@ func NewLoadPolicy(filePath string) *LoadPolicy {
}
}
func (lp *LoadPolicy) GetFramework(frameworkName string) (*opapolicy.Framework, error) {
func (lp *LoadPolicy) GetFramework(frameworkName string) (*reporthandling.Framework, error) {
framework := &opapolicy.Framework{}
f, err := ioutil.ReadFile(lp.filePath)
framework := &reporthandling.Framework{}
f, err := os.ReadFile(lp.filePath)
if err != nil {
return nil, err
}
@@ -44,7 +44,7 @@ func (lp *LoadPolicy) GetFramework(frameworkName string) (*opapolicy.Framework,
func (lp *LoadPolicy) GetExceptions(customerGUID, clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
exception := []armotypes.PostureExceptionPolicy{}
f, err := ioutil.ReadFile(lp.filePath)
f, err := os.ReadFile(lp.filePath)
if err != nil {
return nil, err
}

View File

@@ -1,265 +0,0 @@
package k8sinterface
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/docker/docker/api/types"
)
// For GCR there are some permissions one need to assign in order to allow ARMO to pull images:
// https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity
// gcloud iam service-accounts create armo-controller-sa
// gcloud projects add-iam-policy-binding <PROJECT_NAME> --role roles/storage.objectViewer --member "serviceAccount:armo-controller-sa@<PROJECT_NAME>.iam.gserviceaccount.com"
// gcloud iam service-accounts add-iam-policy-binding --role roles/iam.workloadIdentityUser --member "serviceAccount:<PROJECT_NAME>.svc.id.goog[cyberarmor-system/ca-controller-service-account]" armo-controller-sa@<PROJECT_NAME>.iam.gserviceaccount.com
// kubectl annotate serviceaccount --overwrite --namespace cyberarmor-system ca-controller-service-account iam.gke.io/gcp-service-account=armo-controller-sa@<PROJECT_NAME>.iam.gserviceaccount.com
const (
gcrDefaultServiceAccountName = "default"
// armoServiceAccountName = "ca-controller-service-account"
)
var (
httpClient = http.Client{Timeout: 5 * time.Second}
)
// CheckIsECRImage check if this image is suspected as ECR hosted image
func CheckIsECRImage(imageTag string) bool {
return strings.Contains(imageTag, "dkr.ecr")
}
// GetLoginDetailsForECR return user name + password using the default iam-role OR ~/.aws/config of the machine
func GetLoginDetailsForECR(imageTag string) (string, string, error) {
// imageTag := "015253967648.dkr.ecr.eu-central-1.amazonaws.com/armo:1"
imageTagSlices := strings.Split(imageTag, ".")
repo := imageTagSlices[0]
region := imageTagSlices[3]
mySession := session.Must(session.NewSession())
ecrClient := ecr.New(mySession, aws.NewConfig().WithRegion(region))
input := &ecr.GetAuthorizationTokenInput{
RegistryIds: []*string{&repo},
}
res, err := ecrClient.GetAuthorizationToken(input)
if err != nil {
return "", "", fmt.Errorf("in PullFromECR, failed to GetAuthorizationToken: %v", err)
}
res64 := (*res.AuthorizationData[0].AuthorizationToken)
resB, err := base64.StdEncoding.DecodeString(res64)
if err != nil {
return "", "", fmt.Errorf("in PullFromECR, failed to DecodeString: %v", err)
}
delimiterIdx := bytes.IndexByte(resB, ':')
// userName := resB[:delimiterIdx]
// resB = resB[delimiterIdx+1:]
// resB, err = base64.StdEncoding.DecodeString(string(resB))
// if err != nil {
// t.Errorf("failed to DecodeString #2: %v\n\n", err)
// }
return string(resB[:delimiterIdx]), string(resB[delimiterIdx+1:]), nil
}
func CheckIsACRImage(imageTag string) bool {
// atest1.azurecr.io/go-inf:1
return strings.Contains(imageTag, ".azurecr.io/")
}
type azureADDResponseJson struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn string `json:"expires_in"`
ExpiresOn string `json:"expires_on"`
NotBefore string `json:"not_before"`
Resource string `json:"resource"`
TokenType string `json:"token_type"`
}
func getAzureAADAccessToken() (string, error) {
msi_endpoint, err := url.Parse("http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01")
if err != nil {
return "", fmt.Errorf("creating URL : %v", err)
}
msi_parameters := url.Values{}
msi_parameters.Add("resource", "https://management.azure.com/")
msi_parameters.Add("api-version", "2018-02-01")
msi_endpoint.RawQuery = msi_parameters.Encode()
req, err := http.NewRequest("GET", msi_endpoint.String(), nil)
if err != nil {
return "", fmt.Errorf("creating HTTP request : %v", err)
}
req.Header.Add("Metadata", "true")
// Call managed services for Azure resources token endpoint
resp, err := httpClient.Do(req)
if err != nil {
return "", fmt.Errorf("calling token endpoint : %v", err)
}
// Pull out response body
responseBytes, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
return "", fmt.Errorf("reading response body : %v", err)
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return "", fmt.Errorf("azure ActiveDirectory AT resp: %v, %v", resp.Status, string(responseBytes))
}
// Unmarshall response body into struct
var r azureADDResponseJson
err = json.Unmarshal(responseBytes, &r)
if err != nil {
return "", fmt.Errorf("unmarshalling the response: %v", err)
}
return r.AccessToken, nil
}
// GetLoginDetailsForAzurCR return user name + password to use
func GetLoginDetailsForAzurCR(imageTag string) (string, string, error) {
// imageTag := "atest1.azurecr.io/go-inf:1"
imageTagSlices := strings.Split(imageTag, "/")
azureIdensAT, err := getAzureAADAccessToken()
if err != nil {
return "", "", err
}
atMap := make(map[string]interface{})
azureIdensATSlices := strings.Split(azureIdensAT, ".")
if len(azureIdensATSlices) < 2 {
return "", "", fmt.Errorf("len(azureIdensATSlices) < 2")
}
resB, err := base64.RawStdEncoding.DecodeString(azureIdensATSlices[1])
if err != nil {
return "", "", fmt.Errorf("in GetLoginDetailsForAzurCR, failed to DecodeString: %v, %s", err, azureIdensATSlices[1])
}
if err := json.Unmarshal(resB, &atMap); err != nil {
return "", "", fmt.Errorf("failed to unmarshal azureIdensAT: %v, %s", err, string(resB))
}
// excahnging AAD for ACR refresh token
refreshToken, err := excahngeAzureAADAccessTokenForACRRefreshToken(imageTagSlices[0], fmt.Sprintf("%v", atMap["tid"]), azureIdensAT)
if err != nil {
return "", "", fmt.Errorf("failed to excahngeAzureAADAccessTokenForACRRefreshToken: %v, registry: %s, tenantID: %s, azureAADAT: %s", err, imageTagSlices[0], fmt.Sprintf("%v", atMap["tid"]), azureIdensAT)
}
return "00000000-0000-0000-0000-000000000000", refreshToken, nil
}
func excahngeAzureAADAccessTokenForACRRefreshToken(registry, tenantID, azureAADAT string) (string, error) {
msi_parameters := url.Values{}
msi_parameters.Add("service", registry)
msi_parameters.Add("grant_type", "access_token")
msi_parameters.Add("tenant", tenantID)
msi_parameters.Add("access_token", azureAADAT)
postBodyStr := msi_parameters.Encode()
req, err := http.NewRequest("POST", fmt.Sprintf("https://%v/oauth2/exchange", registry), strings.NewReader(postBodyStr))
if err != nil {
return "", fmt.Errorf("creating HTTP request : %v", err)
}
req.Header.Add("Metadata", "true")
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
// Call managed services for Azure resources token endpoint
resp, err := httpClient.Do(req)
if err != nil {
return "", fmt.Errorf("calling token endpoint : %v", err)
}
// Pull out response body
responseBytes, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
return "", fmt.Errorf("reading response body : %v", err)
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return "", fmt.Errorf("azure exchange AT resp: %v, %v", resp.Status, string(responseBytes))
}
resultMap := make(map[string]string)
err = json.Unmarshal(responseBytes, &resultMap)
if err != nil {
return "", fmt.Errorf("unmarshalling the response: %v", err)
}
return resultMap["refresh_token"], nil
}
func CheckIsGCRImage(imageTag string) bool {
// gcr.io/elated-pottery-310110/golang-inf:2
return strings.Contains(imageTag, "gcr.io/")
}
// GetLoginDetailsForGCR return user name + password to use
func GetLoginDetailsForGCR(imageTag string) (string, string, error) {
msi_endpoint, err := url.Parse(fmt.Sprintf("http://169.254.169.254/computeMetadata/v1/instance/service-accounts/%s/token", gcrDefaultServiceAccountName))
if err != nil {
return "", "", fmt.Errorf("creating URL : %v", err)
}
req, err := http.NewRequest("GET", msi_endpoint.String(), nil)
if err != nil {
return "", "", fmt.Errorf("creating HTTP request : %v", err)
}
req.Header.Add("Metadata-Flavor", "Google")
// Call managed services for Azure resources token endpoint
resp, err := httpClient.Do(req)
if err != nil {
return "", "", fmt.Errorf("calling token endpoint : %v", err)
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return "", "", fmt.Errorf("HTTP Status : %v, make sure the '%s' service account is configured for ARMO pod", resp.Status, gcrDefaultServiceAccountName)
}
defer resp.Body.Close()
respMap := make(map[string]interface{})
if err := json.NewDecoder(resp.Body).Decode(&respMap); err != nil {
return "", "", fmt.Errorf("json Decode : %v", err)
}
return "oauth2accesstoken", fmt.Sprintf("%v", respMap["access_token"]), nil
}
func GetCloudVendorRegistryCredentials(imageTag string) (map[string]types.AuthConfig, error) {
secrets := map[string]types.AuthConfig{}
var errRes error
if CheckIsACRImage(imageTag) {
userName, password, err := GetLoginDetailsForAzurCR(imageTag)
if err != nil {
errRes = fmt.Errorf("failed to GetLoginDetailsForACR(%s): %v", imageTag, err)
} else {
secrets[imageTag] = types.AuthConfig{
Username: userName,
Password: password,
}
}
}
if CheckIsECRImage(imageTag) {
userName, password, err := GetLoginDetailsForECR(imageTag)
if err != nil {
errRes = fmt.Errorf("failed to GetLoginDetailsForECR(%s): %v", imageTag, err)
} else {
secrets[imageTag] = types.AuthConfig{
Username: userName,
Password: password,
}
}
}
if CheckIsGCRImage(imageTag) {
userName, password, err := GetLoginDetailsForGCR(imageTag)
if err != nil {
errRes = fmt.Errorf("failed to GetLoginDetailsForGCR(%s): %v", imageTag, err)
} else {
secrets[imageTag] = types.AuthConfig{
Username: userName,
Password: password,
}
}
}
return secrets, errRes
}

View File

@@ -1,116 +0,0 @@
package k8sinterface
import (
"context"
"fmt"
"os"
"strings"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
// DO NOT REMOVE - load cloud providers auth
_ "k8s.io/client-go/plugin/pkg/client/auth"
"sigs.k8s.io/controller-runtime/pkg/client/config"
)
var ConnectedToCluster = true
// K8SConfig pointer to k8s config
var K8SConfig *restclient.Config
// KubernetesApi -
type KubernetesApi struct {
KubernetesClient kubernetes.Interface
DynamicClient dynamic.Interface
Context context.Context
}
// NewKubernetesApi -
func NewKubernetesApi() *KubernetesApi {
var kubernetesClient *kubernetes.Clientset
var err error
if !IsConnectedToCluster() {
fmt.Println(fmt.Errorf("failed to load kubernetes config: no configuration has been provided, try setting KUBECONFIG environment variable"))
os.Exit(1)
}
kubernetesClient, err = kubernetes.NewForConfig(GetK8sConfig())
if err != nil {
fmt.Printf("Failed to load config file, reason: %s", err.Error())
os.Exit(1)
}
dynamicClient, err := dynamic.NewForConfig(K8SConfig)
if err != nil {
fmt.Printf("Failed to load config file, reason: %s", err.Error())
os.Exit(1)
}
return &KubernetesApi{
KubernetesClient: kubernetesClient,
DynamicClient: dynamicClient,
Context: context.Background(),
}
}
// RunningIncluster whether running in cluster
var RunningIncluster bool
// LoadK8sConfig load config from local file or from cluster
func LoadK8sConfig() error {
kubeconfig, err := config.GetConfig()
if err != nil {
return fmt.Errorf("failed to load kubernetes config: %s", strings.ReplaceAll(err.Error(), "KUBERNETES_MASTER", "KUBECONFIG"))
}
if _, err := restclient.InClusterConfig(); err == nil {
RunningIncluster = true
}
K8SConfig = kubeconfig
return nil
}
// GetK8sConfig get config. load if not loaded yet
func GetK8sConfig() *restclient.Config {
if !IsConnectedToCluster() {
return nil
}
return K8SConfig
}
func IsConnectedToCluster() bool {
if K8SConfig == nil {
if err := LoadK8sConfig(); err != nil {
ConnectedToCluster = false
}
}
return ConnectedToCluster
}
func GetClusterName() string {
if !ConnectedToCluster {
return ""
}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{})
config, err := kubeConfig.RawConfig()
if err != nil {
return ""
}
// TODO - Handle if empty
return config.CurrentContext
}
func GetDefaultNamespace() string {
clientCfg, err := clientcmd.NewDefaultClientConfigLoadingRules().Load()
if err != nil {
return "default"
}
namespace := clientCfg.Contexts[clientCfg.CurrentContext].Namespace
if namespace == "" {
namespace = "default"
}
return namespace
}

View File

@@ -1,34 +0,0 @@
package k8sinterface
import (
"testing"
"github.com/armosec/kubescape/cautils/cautils"
)
func TestGetGroupVersionResource(t *testing.T) {
wlid := "wlid://cluster-david-v1/namespace-default/deployment-nginx-deployment"
r, err := GetGroupVersionResource(cautils.GetKindFromWlid(wlid))
if err != nil {
t.Error(err)
return
}
if r.Group != "apps" {
t.Errorf("wrong group")
}
if r.Version != "v1" {
t.Errorf("wrong Version")
}
if r.Resource != "deployments" {
t.Errorf("wrong Resource")
}
r2, err := GetGroupVersionResource("NetworkPolicy")
if err != nil {
t.Error(err)
return
}
if r2.Resource != "networkpolicies" {
t.Errorf("wrong Resource")
}
}

View File

@@ -1,145 +0,0 @@
package k8sinterface
import (
"fmt"
"strings"
"github.com/armosec/kubescape/cautils/cautils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
//
// Uncomment to load all auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth
//
// Or uncomment to load specific auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth/azure"
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
// _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
)
func (k8sAPI *KubernetesApi) GetWorkloadByWlid(wlid string) (*Workload, error) {
return k8sAPI.GetWorkload(cautils.GetNamespaceFromWlid(wlid), cautils.GetKindFromWlid(wlid), cautils.GetNameFromWlid(wlid))
}
func (k8sAPI *KubernetesApi) GetWorkload(namespace, kind, name string) (*Workload, error) {
groupVersionResource, err := GetGroupVersionResource(kind)
if err != nil {
return nil, err
}
w, err := k8sAPI.ResourceInterface(&groupVersionResource, namespace).Get(k8sAPI.Context, name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to GET resource, kind: '%s', namespace: '%s', name: '%s', reason: %s", kind, namespace, name, err.Error())
}
return NewWorkloadObj(w.Object), nil
}
func (k8sAPI *KubernetesApi) ListWorkloads(groupVersionResource *schema.GroupVersionResource, namespace string, podLabels, fieldSelector map[string]string) ([]Workload, error) {
listOptions := metav1.ListOptions{}
if podLabels != nil && len(podLabels) > 0 {
set := labels.Set(podLabels)
listOptions.LabelSelector = SelectorToString(set)
}
if fieldSelector != nil && len(fieldSelector) > 0 {
set := labels.Set(fieldSelector)
listOptions.FieldSelector = SelectorToString(set)
}
uList, err := k8sAPI.ResourceInterface(groupVersionResource, namespace).List(k8sAPI.Context, listOptions)
if err != nil {
return nil, fmt.Errorf("failed to LIST resources, reason: %s", err.Error())
}
workloads := make([]Workload, len(uList.Items))
for i := range uList.Items {
workloads[i] = *NewWorkloadObj(uList.Items[i].Object)
}
return workloads, nil
}
func (k8sAPI *KubernetesApi) DeleteWorkloadByWlid(wlid string) error {
groupVersionResource, err := GetGroupVersionResource(cautils.GetKindFromWlid(wlid))
if err != nil {
return err
}
err = k8sAPI.ResourceInterface(&groupVersionResource, cautils.GetNamespaceFromWlid(wlid)).Delete(k8sAPI.Context, cautils.GetNameFromWlid(wlid), metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to DELETE resource, workloadID: '%s', reason: %s", wlid, err.Error())
}
return nil
}
func (k8sAPI *KubernetesApi) CreateWorkload(workload *Workload) (*Workload, error) {
groupVersionResource, err := GetGroupVersionResource(workload.GetKind())
if err != nil {
return nil, err
}
obj, err := workload.ToUnstructured()
if err != nil {
return nil, err
}
w, err := k8sAPI.ResourceInterface(&groupVersionResource, workload.GetNamespace()).Create(k8sAPI.Context, obj, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to CREATE resource, workload: '%s', reason: %s", workload.Json(), err.Error())
}
return NewWorkloadObj(w.Object), nil
}
func (k8sAPI *KubernetesApi) UpdateWorkload(workload *Workload) (*Workload, error) {
groupVersionResource, err := GetGroupVersionResource(workload.GetKind())
if err != nil {
return nil, err
}
obj, err := workload.ToUnstructured()
if err != nil {
return nil, err
}
w, err := k8sAPI.ResourceInterface(&groupVersionResource, workload.GetNamespace()).Update(k8sAPI.Context, obj, metav1.UpdateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to UPDATE resource, workload: '%s', reason: %s", workload.Json(), err.Error())
}
return NewWorkloadObj(w.Object), nil
}
func (k8sAPI *KubernetesApi) GetNamespace(ns string) (*Workload, error) {
groupVersionResource, err := GetGroupVersionResource("namespace")
if err != nil {
return nil, err
}
w, err := k8sAPI.DynamicClient.Resource(groupVersionResource).Get(k8sAPI.Context, ns, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get namespace: '%s', reason: %s", ns, err.Error())
}
return NewWorkloadObj(w.Object), nil
}
func (k8sAPI *KubernetesApi) ResourceInterface(resource *schema.GroupVersionResource, namespace string) dynamic.ResourceInterface {
if IsNamespaceScope(resource.Group, resource.Resource) {
return k8sAPI.DynamicClient.Resource(*resource).Namespace(namespace)
}
return k8sAPI.DynamicClient.Resource(*resource)
}
func (k8sAPI *KubernetesApi) CalculateWorkloadParentRecursive(workload *Workload) (string, string, error) {
ownerReferences, err := workload.GetOwnerReferences() // OwnerReferences in workload
if err != nil {
return workload.GetKind(), workload.GetName(), err
}
if len(ownerReferences) == 0 {
return workload.GetKind(), workload.GetName(), nil // parent found
}
ownerReference := ownerReferences[0]
parentWorkload, err := k8sAPI.GetWorkload(workload.GetNamespace(), ownerReference.Kind, ownerReference.Name)
if err != nil {
if strings.Contains(err.Error(), "not found in resourceMap") { // if parent is RCD
return workload.GetKind(), workload.GetName(), nil // parent found
}
return workload.GetKind(), workload.GetName(), err
}
return k8sAPI.CalculateWorkloadParentRecursive(parentWorkload)
}

View File

@@ -1,43 +0,0 @@
package k8sinterface
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
dynamicfake "k8s.io/client-go/dynamic/fake"
kubernetesfake "k8s.io/client-go/kubernetes/fake"
//
// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// Uncomment to load all auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth
//
// Or uncomment to load specific auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth/azure"
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
// _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
)
// NewKubernetesApi -
func NewKubernetesApiMock() *KubernetesApi {
return &KubernetesApi{
KubernetesClient: kubernetesfake.NewSimpleClientset(),
DynamicClient: dynamicfake.NewSimpleDynamicClient(&runtime.Scheme{}),
Context: context.Background(),
}
}
// func TestListDynamic(t *testing.T) {
// k8s := NewKubernetesApi()
// resource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
// clientResource, err := k8s.DynamicClient.Resource(resource).Namespace("default").List(k8s.Context, metav1.ListOptions{})
// if err != nil {
// t.Errorf("err: %v", err)
// } else {
// bla, _ := json.Marshal(clientResource)
// // t.Errorf("BearerToken: %v", *K8SConfig)
// // ioutil.WriteFile("bla.json", bla, 777)
// t.Errorf("clientResource: %s", string(bla))
// }
// }

View File

@@ -1,66 +0,0 @@
package k8sinterface
import (
"sort"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
)
//
// Uncomment to load all auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth
//
// Or uncomment to load specific auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth/azure"
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
// _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
func ConvertUnstructuredSliceToMap(unstructuredSlice []unstructured.Unstructured) []map[string]interface{} {
converted := make([]map[string]interface{}, len(unstructuredSlice))
for i := range unstructuredSlice {
converted[i] = unstructuredSlice[i].Object
}
return converted
}
func FilterOutOwneredResources(result []unstructured.Unstructured) []unstructured.Unstructured {
response := []unstructured.Unstructured{}
recognizedOwners := []string{"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job", "CronJob"}
for i := range result {
ownerReferences := result[i].GetOwnerReferences()
if len(ownerReferences) == 0 {
response = append(response, result[i])
} else if !IsStringInSlice(recognizedOwners, ownerReferences[0].Kind) {
response = append(response, result[i])
}
}
return response
}
func IsStringInSlice(slice []string, val string) bool {
for _, item := range slice {
if item == val {
return true
}
}
return false
}
// String returns all labels listed as a human readable string.
// Conveniently, exactly the format that ParseSelector takes.
func SelectorToString(ls labels.Set) string {
selector := make([]string, 0, len(ls))
for key, value := range ls {
if value != "" {
selector = append(selector, key+"="+value)
} else {
selector = append(selector, key)
}
}
// Sort for determinism.
sort.StringSlice(selector).Sort()
return strings.Join(selector, ",")
}

View File

@@ -1,10 +0,0 @@
package k8sinterface
import "testing"
func TestConvertUnstructuredSliceToMap(t *testing.T) {
converted := ConvertUnstructuredSliceToMap(V1KubeSystemNamespaceMock().Items)
if len(converted) == 0 { // != 7
t.Errorf("len(converted) == 0")
}
}

View File

@@ -1,71 +0,0 @@
package k8sinterface
import (
"context"
"github.com/armosec/kubescape/cautils/cautils"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
func IsAttached(labels map[string]string) *bool {
return IsLabel(labels, cautils.ArmoAttach)
}
func IsAgentCompatibleLabel(labels map[string]string) *bool {
return IsLabel(labels, cautils.ArmoCompatibleLabel)
}
func IsAgentCompatibleAnnotation(annotations map[string]string) *bool {
return IsLabel(annotations, cautils.ArmoCompatibleAnnotation)
}
func SetAgentCompatibleLabel(labels map[string]string, val bool) {
SetLabel(labels, cautils.ArmoCompatibleLabel, val)
}
func SetAgentCompatibleAnnotation(annotations map[string]string, val bool) {
SetLabel(annotations, cautils.ArmoCompatibleAnnotation, val)
}
func IsLabel(labels map[string]string, key string) *bool {
if labels == nil || len(labels) == 0 {
return nil
}
var k bool
if l, ok := labels[key]; ok {
if l == "true" {
k = true
} else if l == "false" {
k = false
}
return &k
}
return nil
}
func SetLabel(labels map[string]string, key string, val bool) {
if labels == nil {
return
}
v := ""
if val {
v = "true"
} else {
v = "false"
}
labels[key] = v
}
func (k8sAPI *KubernetesApi) ListAttachedPods(namespace string) ([]corev1.Pod, error) {
return k8sAPI.ListPods(namespace, map[string]string{cautils.ArmoAttach: cautils.BoolToString(true)})
}
func (k8sAPI *KubernetesApi) ListPods(namespace string, podLabels map[string]string) ([]corev1.Pod, error) {
listOptions := metav1.ListOptions{}
if podLabels != nil && len(podLabels) > 0 {
set := labels.Set(podLabels)
listOptions.LabelSelector = set.AsSelector().String()
}
pods, err := k8sAPI.KubernetesClient.CoreV1().Pods(namespace).List(context.Background(), listOptions)
if err != nil {
return []corev1.Pod{}, err
}
return pods.Items, nil
}

File diff suppressed because one or more lines are too long

View File

@@ -1,142 +0,0 @@
package k8sinterface
import (
"fmt"
"strings"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const ValueNotFound = -1
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#-strong-api-groups-strong-
var ResourceGroupMapping = map[string]string{
"services": "/v1",
"pods": "/v1",
"replicationcontrollers": "/v1",
"podtemplates": "/v1",
"namespaces": "/v1",
"nodes": "/v1",
"configmaps": "/v1",
"secrets": "/v1",
"serviceaccounts": "/v1",
"persistentvolumeclaims": "/v1",
"limitranges": "/v1",
"resourcequotas": "/v1",
"daemonsets": "apps/v1",
"deployments": "apps/v1",
"replicasets": "apps/v1",
"statefulsets": "apps/v1",
"controllerrevisions": "apps/v1",
"jobs": "batch/v1",
"cronjobs": "batch/v1beta1",
"horizontalpodautoscalers": "autoscaling/v1",
"ingresses": "extensions/v1beta1",
"networkpolicies": "networking.k8s.io/v1",
"clusterroles": "rbac.authorization.k8s.io/v1",
"clusterrolebindings": "rbac.authorization.k8s.io/v1",
"roles": "rbac.authorization.k8s.io/v1",
"rolebindings": "rbac.authorization.k8s.io/v1",
"mutatingwebhookconfigurations": "admissionregistration.k8s.io/v1",
"validatingwebhookconfigurations": "admissionregistration.k8s.io/v1",
}
var GroupsClusterScope = []string{}
var ResourceClusterScope = []string{"nodes", "namespaces", "clusterroles", "clusterrolebindings"}
func GetGroupVersionResource(resource string) (schema.GroupVersionResource, error) {
resource = updateResourceKind(resource)
if r, ok := ResourceGroupMapping[resource]; ok {
gv := strings.Split(r, "/")
return schema.GroupVersionResource{Group: gv[0], Version: gv[1], Resource: resource}, nil
}
return schema.GroupVersionResource{}, fmt.Errorf("resource '%s' not found in resourceMap", resource)
}
func IsNamespaceScope(apiGroup, resource string) bool {
return StringInSlice(GroupsClusterScope, apiGroup) == ValueNotFound &&
StringInSlice(ResourceClusterScope, resource) == ValueNotFound
}
func StringInSlice(strSlice []string, str string) int {
for i := range strSlice {
if strSlice[i] == str {
return i
}
}
return ValueNotFound
}
func JoinResourceTriplets(group, version, resource string) string {
return fmt.Sprintf("%s/%s/%s", group, version, resource)
}
func GetResourceTriplets(group, version, resource string) []string {
resourceTriplets := []string{}
if resource == "" {
// load full map
for k, v := range ResourceGroupMapping {
g := strings.Split(v, "/")
resourceTriplets = append(resourceTriplets, JoinResourceTriplets(g[0], g[1], k))
}
} else if version == "" {
// load by resource
if v, ok := ResourceGroupMapping[resource]; ok {
g := strings.Split(v, "/")
if group == "" {
group = g[0]
}
resourceTriplets = append(resourceTriplets, JoinResourceTriplets(group, g[1], resource))
} else {
glog.Errorf("Resource '%s' unknown", resource)
}
} else if group == "" {
// load by resource and version
if v, ok := ResourceGroupMapping[resource]; ok {
g := strings.Split(v, "/")
resourceTriplets = append(resourceTriplets, JoinResourceTriplets(g[0], version, resource))
} else {
glog.Errorf("Resource '%s' unknown", resource)
}
} else {
resourceTriplets = append(resourceTriplets, JoinResourceTriplets(group, version, resource))
}
return resourceTriplets
}
func ResourceGroupToString(group, version, resource string) []string {
if group == "*" {
group = ""
}
if version == "*" {
version = ""
}
if resource == "*" {
resource = ""
}
resource = updateResourceKind(resource)
return GetResourceTriplets(group, version, resource)
}
func StringToResourceGroup(str string) (string, string, string) {
splitted := strings.Split(str, "/")
for i := range splitted {
if splitted[i] == "*" {
splitted[i] = ""
}
}
return splitted[0], splitted[1], splitted[2]
}
func updateResourceKind(resource string) string {
resource = strings.ToLower(resource)
if resource != "" && !strings.HasSuffix(resource, "s") {
if strings.HasSuffix(resource, "y") {
return fmt.Sprintf("%sies", strings.TrimSuffix(resource, "y")) // e.g. NetworkPolicy -> networkpolicies
} else {
return fmt.Sprintf("%ss", resource) // add 's' at the end of a resource
}
}
return resource
}

View File

@@ -1,22 +0,0 @@
package k8sinterface
import "testing"
func TestResourceGroupToString(t *testing.T) {
allResources := ResourceGroupToString("*", "*", "*")
if len(allResources) != len(ResourceGroupMapping) {
t.Errorf("Expected len: %d, received: %d", len(ResourceGroupMapping), len(allResources))
}
pod := ResourceGroupToString("*", "*", "Pod")
if len(pod) == 0 || pod[0] != "/v1/pods" {
t.Errorf("pod: %v", pod)
}
deployments := ResourceGroupToString("*", "*", "Deployment")
if len(deployments) == 0 || deployments[0] != "apps/v1/deployments" {
t.Errorf("deployments: %v", deployments)
}
cronjobs := ResourceGroupToString("*", "*", "cronjobs")
if len(cronjobs) == 0 || cronjobs[0] != "batch/v1beta1/cronjobs" {
t.Errorf("cronjobs: %v", cronjobs)
}
}

View File

@@ -1,161 +0,0 @@
package k8sinterface
import (
"encoding/json"
"github.com/armosec/kubescape/cautils/apis"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
type IWorkload interface {
IBasicWorkload
// Convert
ToUnstructured() (*unstructured.Unstructured, error)
ToString() string
Json() string // DEPRECATED
// GET
GetWlid() string
GetJobID() *apis.JobTracking
GetVersion() string
GetGroup() string
// SET
SetWlid(string)
SetInject()
SetIgnore()
SetUpdateTime()
SetJobID(apis.JobTracking)
SetCompatible()
SetIncompatible()
SetReplaceheaders()
// EXIST
IsIgnore() bool
IsInject() bool
IsAttached() bool
IsCompatible() bool
IsIncompatible() bool
// REMOVE
RemoveWlid()
RemoveSecretData()
RemoveInject()
RemoveIgnore()
RemoveUpdateTime()
RemoveJobID()
RemoveCompatible()
RemoveArmoMetadata()
RemoveArmoLabels()
RemoveArmoAnnotations()
}
type IBasicWorkload interface {
// Set
SetKind(string)
SetWorkload(map[string]interface{})
SetLabel(key, value string)
SetAnnotation(key, value string)
SetNamespace(string)
SetName(string)
// Get
GetNamespace() string
GetName() string
GetGenerateName() string
GetApiVersion() string
GetKind() string
GetInnerAnnotation(string) (string, bool)
GetPodAnnotation(string) (string, bool)
GetAnnotation(string) (string, bool)
GetLabel(string) (string, bool)
GetAnnotations() map[string]string
GetInnerAnnotations() map[string]string
GetPodAnnotations() map[string]string
GetLabels() map[string]string
GetInnerLabels() map[string]string
GetPodLabels() map[string]string
GetVolumes() ([]corev1.Volume, error)
GetReplicas() int
GetContainers() ([]corev1.Container, error)
GetInitContainers() ([]corev1.Container, error)
GetOwnerReferences() ([]metav1.OwnerReference, error)
GetImagePullSecret() ([]corev1.LocalObjectReference, error)
GetServiceAccountName() string
GetSelector() (*metav1.LabelSelector, error)
GetResourceVersion() string
GetUID() string
GetPodSpec() (*corev1.PodSpec, error)
GetWorkload() map[string]interface{}
// REMOVE
RemoveLabel(string)
RemoveAnnotation(string)
RemovePodStatus()
RemoveResourceVersion()
}
type Workload struct {
workload map[string]interface{}
}
func NewWorkload(bWorkload []byte) (*Workload, error) {
workload := make(map[string]interface{})
if bWorkload != nil {
if err := json.Unmarshal(bWorkload, &workload); err != nil {
return nil, err
}
}
return &Workload{
workload: workload,
}, nil
}
func NewWorkloadObj(workload map[string]interface{}) *Workload {
return &Workload{
workload: workload,
}
}
func (w *Workload) Json() string {
return w.ToString()
}
func (w *Workload) ToString() string {
if w.GetWorkload() == nil {
return ""
}
bWorkload, err := json.Marshal(w.GetWorkload())
if err != nil {
return err.Error()
}
return string(bWorkload)
}
func (workload *Workload) DeepCopy(w map[string]interface{}) {
workload.workload = make(map[string]interface{})
byt, _ := json.Marshal(w)
json.Unmarshal(byt, &workload.workload)
}
func (w *Workload) ToUnstructured() (*unstructured.Unstructured, error) {
obj := &unstructured.Unstructured{}
if w.workload == nil {
return obj, nil
}
bWorkload, err := json.Marshal(w.workload)
if err != nil {
return obj, err
}
if err := json.Unmarshal(bWorkload, obj); err != nil {
return obj, err
}
return obj, nil
}

View File

@@ -1,642 +0,0 @@
package k8sinterface
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"github.com/armosec/kubescape/cautils/apis"
"github.com/armosec/kubescape/cautils/cautils"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ======================================= DELETE ========================================
func (w *Workload) RemoveInject() {
w.RemovePodLabel(cautils.CAInject) // DEPRECATED
w.RemovePodLabel(cautils.CAAttachLabel) // DEPRECATED
w.RemovePodLabel(cautils.ArmoAttach)
w.RemoveLabel(cautils.CAInject) // DEPRECATED
w.RemoveLabel(cautils.CAAttachLabel) // DEPRECATED
w.RemoveLabel(cautils.ArmoAttach)
}
func (w *Workload) RemoveIgnore() {
w.RemovePodLabel(cautils.CAIgnore) // DEPRECATED
w.RemovePodLabel(cautils.ArmoAttach)
w.RemoveLabel(cautils.CAIgnore) // DEPRECATED
w.RemoveLabel(cautils.ArmoAttach)
}
func (w *Workload) RemoveWlid() {
w.RemovePodAnnotation(cautils.CAWlid) // DEPRECATED
w.RemovePodAnnotation(cautils.ArmoWlid)
w.RemoveAnnotation(cautils.CAWlid) // DEPRECATED
w.RemoveAnnotation(cautils.ArmoWlid)
}
func (w *Workload) RemoveCompatible() {
w.RemovePodAnnotation(cautils.ArmoCompatibleAnnotation)
}
func (w *Workload) RemoveJobID() {
w.RemovePodAnnotation(cautils.ArmoJobIDPath)
w.RemovePodAnnotation(cautils.ArmoJobParentPath)
w.RemovePodAnnotation(cautils.ArmoJobActionPath)
w.RemoveAnnotation(cautils.ArmoJobIDPath)
w.RemoveAnnotation(cautils.ArmoJobParentPath)
w.RemoveAnnotation(cautils.ArmoJobActionPath)
}
func (w *Workload) RemoveArmoMetadata() {
w.RemoveArmoLabels()
w.RemoveArmoAnnotations()
}
func (w *Workload) RemoveArmoAnnotations() {
l := w.GetAnnotations()
if l != nil {
for k := range l {
if strings.HasPrefix(k, cautils.ArmoPrefix) {
w.RemoveAnnotation(k)
}
if strings.HasPrefix(k, cautils.CAPrefix) { // DEPRECATED
w.RemoveAnnotation(k)
}
}
}
lp := w.GetPodAnnotations()
if lp != nil {
for k := range lp {
if strings.HasPrefix(k, cautils.ArmoPrefix) {
w.RemovePodAnnotation(k)
}
if strings.HasPrefix(k, cautils.CAPrefix) { // DEPRECATED
w.RemovePodAnnotation(k)
}
}
}
}
func (w *Workload) RemoveArmoLabels() {
l := w.GetLabels()
if l != nil {
for k := range l {
if strings.HasPrefix(k, cautils.ArmoPrefix) {
w.RemoveLabel(k)
}
if strings.HasPrefix(k, cautils.CAPrefix) { // DEPRECATED
w.RemoveLabel(k)
}
}
}
lp := w.GetPodLabels()
if lp != nil {
for k := range lp {
if strings.HasPrefix(k, cautils.ArmoPrefix) {
w.RemovePodLabel(k)
}
if strings.HasPrefix(k, cautils.CAPrefix) { // DEPRECATED
w.RemovePodLabel(k)
}
}
}
}
func (w *Workload) RemoveUpdateTime() {
// remove from pod
w.RemovePodAnnotation(cautils.CAUpdate) // DEPRECATED
w.RemovePodAnnotation(cautils.ArmoUpdate)
// remove from workload
w.RemoveAnnotation(cautils.CAUpdate) // DEPRECATED
w.RemoveAnnotation(cautils.ArmoUpdate)
}
func (w *Workload) RemoveSecretData() {
w.RemoveAnnotation("kubectl.kubernetes.io/last-applied-configuration")
delete(w.workload, "data")
}
func (w *Workload) RemovePodStatus() {
delete(w.workload, "status")
}
func (w *Workload) RemoveResourceVersion() {
if _, ok := w.workload["metadata"]; !ok {
return
}
meta, _ := w.workload["metadata"].(map[string]interface{})
delete(meta, "resourceVersion")
}
func (w *Workload) RemoveLabel(key string) {
w.RemoveMetadata([]string{"metadata"}, "labels", key)
}
func (w *Workload) RemoveAnnotation(key string) {
w.RemoveMetadata([]string{"metadata"}, "annotations", key)
}
func (w *Workload) RemovePodAnnotation(key string) {
w.RemoveMetadata(PodMetadata(w.GetKind()), "annotations", key)
}
func (w *Workload) RemovePodLabel(key string) {
w.RemoveMetadata(PodMetadata(w.GetKind()), "labels", key)
}
func (w *Workload) RemoveMetadata(scope []string, metadata, key string) {
workload := w.workload
for i := range scope {
if _, ok := workload[scope[i]]; !ok {
return
}
workload, _ = workload[scope[i]].(map[string]interface{})
}
if _, ok := workload[metadata]; !ok {
return
}
labels, _ := workload[metadata].(map[string]interface{})
delete(labels, key)
}
// ========================================= SET =========================================
func (w *Workload) SetWorkload(workload map[string]interface{}) {
w.workload = workload
}
func (w *Workload) SetKind(kind string) {
w.workload["kind"] = kind
}
func (w *Workload) SetInject() {
w.SetPodLabel(cautils.ArmoAttach, cautils.BoolToString(true))
}
func (w *Workload) SetJobID(jobTracking apis.JobTracking) {
w.SetPodAnnotation(cautils.ArmoJobIDPath, jobTracking.JobID)
w.SetPodAnnotation(cautils.ArmoJobParentPath, jobTracking.ParentID)
w.SetPodAnnotation(cautils.ArmoJobActionPath, fmt.Sprintf("%d", jobTracking.LastActionNumber))
}
func (w *Workload) SetIgnore() {
w.SetPodLabel(cautils.ArmoAttach, cautils.BoolToString(false))
}
func (w *Workload) SetCompatible() {
w.SetPodAnnotation(cautils.ArmoCompatibleAnnotation, cautils.BoolToString(true))
}
func (w *Workload) SetIncompatible() {
w.SetPodAnnotation(cautils.ArmoCompatibleAnnotation, cautils.BoolToString(false))
}
func (w *Workload) SetReplaceheaders() {
w.SetPodAnnotation(cautils.ArmoReplaceheaders, cautils.BoolToString(true))
}
func (w *Workload) SetWlid(wlid string) {
w.SetPodAnnotation(cautils.ArmoWlid, wlid)
}
func (w *Workload) SetUpdateTime() {
w.SetPodAnnotation(cautils.ArmoUpdate, string(time.Now().UTC().Format("02-01-2006 15:04:05")))
}
func (w *Workload) SetNamespace(namespace string) {
w.SetMetadata([]string{"metadata"}, "namespace", namespace)
}
func (w *Workload) SetName(name string) {
w.SetMetadata([]string{"metadata"}, "name", name)
}
func (w *Workload) SetLabel(key, value string) {
w.SetMetadata([]string{"metadata", "labels"}, key, value)
}
func (w *Workload) SetPodLabel(key, value string) {
w.SetMetadata(append(PodMetadata(w.GetKind()), "labels"), key, value)
}
func (w *Workload) SetAnnotation(key, value string) {
w.SetMetadata([]string{"metadata", "annotations"}, key, value)
}
func (w *Workload) SetPodAnnotation(key, value string) {
w.SetMetadata(append(PodMetadata(w.GetKind()), "annotations"), key, value)
}
func (w *Workload) SetMetadata(scope []string, key string, val interface{}) {
workload := w.workload
for i := range scope {
if _, ok := workload[scope[i]]; !ok {
workload[scope[i]] = make(map[string]interface{})
}
workload, _ = workload[scope[i]].(map[string]interface{})
}
workload[key] = val
}
// ========================================= GET =========================================
func (w *Workload) GetWorkload() map[string]interface{} {
return w.workload
}
func (w *Workload) GetNamespace() string {
if v, ok := InspectWorkload(w.workload, "metadata", "namespace"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetName() string {
if v, ok := InspectWorkload(w.workload, "metadata", "name"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetApiVersion() string {
if v, ok := InspectWorkload(w.workload, "apiVersion"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetVersion() string {
apiVersion := w.GetApiVersion()
splitted := strings.Split(apiVersion, "/")
if len(splitted) == 1 {
return splitted[0]
} else if len(splitted) == 2 {
return splitted[1]
}
return ""
}
func (w *Workload) GetGroup() string {
apiVersion := w.GetApiVersion()
splitted := strings.Split(apiVersion, "/")
if len(splitted) == 2 {
return splitted[0]
}
return ""
}
func (w *Workload) GetGenerateName() string {
if v, ok := InspectWorkload(w.workload, "metadata", "generateName"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetReplicas() int {
if v, ok := InspectWorkload(w.workload, "spec", "replicas"); ok {
replicas, isok := v.(float64)
if isok {
return int(replicas)
}
}
return 1
}
func (w *Workload) GetKind() string {
if v, ok := InspectWorkload(w.workload, "kind"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetSelector() (*metav1.LabelSelector, error) {
selector := &metav1.LabelSelector{}
if v, ok := InspectWorkload(w.workload, "spec", "selector", "matchLabels"); ok && v != nil {
b, err := json.Marshal(v)
if err != nil {
return selector, err
}
if err := json.Unmarshal(b, selector); err != nil {
return selector, err
}
return selector, nil
}
return selector, nil
}
func (w *Workload) GetAnnotation(annotation string) (string, bool) {
if v, ok := InspectWorkload(w.workload, "metadata", "annotations", annotation); ok {
return v.(string), ok
}
return "", false
}
func (w *Workload) GetLabel(label string) (string, bool) {
if v, ok := InspectWorkload(w.workload, "metadata", "labels", label); ok {
return v.(string), ok
}
return "", false
}
func (w *Workload) GetPodLabel(label string) (string, bool) {
if v, ok := InspectWorkload(w.workload, append(PodMetadata(w.GetKind()), "labels", label)...); ok && v != nil {
return v.(string), ok
}
return "", false
}
func (w *Workload) GetLabels() map[string]string {
if v, ok := InspectWorkload(w.workload, "metadata", "labels"); ok && v != nil {
labels := make(map[string]string)
for k, i := range v.(map[string]interface{}) {
labels[k] = i.(string)
}
return labels
}
return nil
}
// GetInnerLabels - DEPRECATED
func (w *Workload) GetInnerLabels() map[string]string {
return w.GetPodLabels()
}
func (w *Workload) GetPodLabels() map[string]string {
if v, ok := InspectWorkload(w.workload, append(PodMetadata(w.GetKind()), "labels")...); ok && v != nil {
labels := make(map[string]string)
for k, i := range v.(map[string]interface{}) {
labels[k] = i.(string)
}
return labels
}
return nil
}
// GetInnerAnnotations - DEPRECATED
func (w *Workload) GetInnerAnnotations() map[string]string {
return w.GetPodAnnotations()
}
// GetPodAnnotations
func (w *Workload) GetPodAnnotations() map[string]string {
if v, ok := InspectWorkload(w.workload, append(PodMetadata(w.GetKind()), "annotations")...); ok && v != nil {
annotations := make(map[string]string)
for k, i := range v.(map[string]interface{}) {
annotations[k] = fmt.Sprintf("%v", i)
}
return annotations
}
return nil
}
// GetInnerAnnotation DEPRECATED
func (w *Workload) GetInnerAnnotation(annotation string) (string, bool) {
return w.GetPodAnnotation(annotation)
}
func (w *Workload) GetPodAnnotation(annotation string) (string, bool) {
if v, ok := InspectWorkload(w.workload, append(PodMetadata(w.GetKind()), "annotations", annotation)...); ok && v != nil {
return v.(string), ok
}
return "", false
}
func (w *Workload) GetAnnotations() map[string]string {
if v, ok := InspectWorkload(w.workload, "metadata", "annotations"); ok && v != nil {
annotations := make(map[string]string)
for k, i := range v.(map[string]interface{}) {
annotations[k] = fmt.Sprintf("%v", i)
}
return annotations
}
return nil
}
// GetVolumes -
func (w *Workload) GetVolumes() ([]corev1.Volume, error) {
volumes := []corev1.Volume{}
interVolumes, _ := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "volumes")...)
if interVolumes == nil {
return volumes, nil
}
volumesBytes, err := json.Marshal(interVolumes)
if err != nil {
return volumes, err
}
err = json.Unmarshal(volumesBytes, &volumes)
return volumes, err
}
func (w *Workload) GetServiceAccountName() string {
if v, ok := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "serviceAccountName")...); ok && v != nil {
return v.(string)
}
return ""
}
func (w *Workload) GetPodSpec() (*corev1.PodSpec, error) {
podSpec := &corev1.PodSpec{}
podSepcRaw, _ := InspectWorkload(w.workload, PodSpec(w.GetKind())...)
if podSepcRaw == nil {
return podSpec, fmt.Errorf("no PodSpec for workload: %v", w)
}
b, err := json.Marshal(podSepcRaw)
if err != nil {
return podSpec, err
}
err = json.Unmarshal(b, podSpec)
return podSpec, err
}
func (w *Workload) GetImagePullSecret() ([]corev1.LocalObjectReference, error) {
imgPullSecrets := []corev1.LocalObjectReference{}
iImgPullSecrets, _ := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "imagePullSecrets")...)
b, err := json.Marshal(iImgPullSecrets)
if err != nil {
return imgPullSecrets, err
}
err = json.Unmarshal(b, &imgPullSecrets)
return imgPullSecrets, err
}
// GetContainers -
func (w *Workload) GetContainers() ([]corev1.Container, error) {
containers := []corev1.Container{}
interContainers, _ := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "containers")...)
if interContainers == nil {
return containers, nil
}
containersBytes, err := json.Marshal(interContainers)
if err != nil {
return containers, err
}
err = json.Unmarshal(containersBytes, &containers)
return containers, err
}
// GetInitContainers -
func (w *Workload) GetInitContainers() ([]corev1.Container, error) {
containers := []corev1.Container{}
interContainers, _ := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "initContainers")...)
if interContainers == nil {
return containers, nil
}
containersBytes, err := json.Marshal(interContainers)
if err != nil {
return containers, err
}
err = json.Unmarshal(containersBytes, &containers)
return containers, err
}
// GetOwnerReferences -
func (w *Workload) GetOwnerReferences() ([]metav1.OwnerReference, error) {
ownerReferences := []metav1.OwnerReference{}
interOwnerReferences, ok := InspectWorkload(w.workload, "metadata", "ownerReferences")
if !ok {
return ownerReferences, nil
}
ownerReferencesBytes, err := json.Marshal(interOwnerReferences)
if err != nil {
return ownerReferences, err
}
err = json.Unmarshal(ownerReferencesBytes, &ownerReferences)
if err != nil {
return ownerReferences, err
}
return ownerReferences, nil
}
func (w *Workload) GetResourceVersion() string {
if v, ok := InspectWorkload(w.workload, "metadata", "resourceVersion"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetUID() string {
if v, ok := InspectWorkload(w.workload, "metadata", "uid"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetWlid() string {
if wlid, ok := w.GetAnnotation(cautils.ArmoWlid); ok {
return wlid
}
return ""
}
func (w *Workload) GetJobID() *apis.JobTracking {
jobTracking := apis.JobTracking{}
if job, ok := w.GetPodAnnotation(cautils.ArmoJobIDPath); ok {
jobTracking.JobID = job
}
if parent, ok := w.GetPodAnnotation(cautils.ArmoJobParentPath); ok {
jobTracking.ParentID = parent
}
if action, ok := w.GetPodAnnotation(cautils.ArmoJobActionPath); ok {
if i, err := strconv.Atoi(action); err == nil {
jobTracking.LastActionNumber = i
}
}
if jobTracking.LastActionNumber == 0 { // start the counter at 1
jobTracking.LastActionNumber = 1
}
return &jobTracking
}
// func (w *Workload) GetJobID() string {
// if status, ok := w.GetAnnotation(cautils.ArmoJobID); ok {
// return status
// }
// return ""
// }
// ========================================= IS =========================================
func (w *Workload) IsInject() bool {
return w.IsAttached()
}
func (w *Workload) IsIgnore() bool {
if attach := cautils.IsAttached(w.GetPodLabels()); attach != nil {
return !(*attach)
}
if attach := cautils.IsAttached(w.GetLabels()); attach != nil {
return !(*attach)
}
return false
}
func (w *Workload) IsCompatible() bool {
if c, ok := w.GetPodAnnotation(cautils.ArmoCompatibleAnnotation); ok {
return cautils.StringToBool(c)
}
if c, ok := w.GetAnnotation(cautils.ArmoCompatibleAnnotation); ok {
return cautils.StringToBool(c)
}
return false
}
func (w *Workload) IsIncompatible() bool {
if c, ok := w.GetPodAnnotation(cautils.ArmoCompatibleAnnotation); ok {
return !cautils.StringToBool(c)
}
if c, ok := w.GetAnnotation(cautils.ArmoCompatibleAnnotation); ok {
return !cautils.StringToBool(c)
}
return false
}
func (w *Workload) IsAttached() bool {
if attach := cautils.IsAttached(w.GetPodLabels()); attach != nil {
return *attach
}
if attach := cautils.IsAttached(w.GetLabels()); attach != nil {
return *attach
}
return false
}
func (w *Workload) IsReplaceheaders() bool {
if c, ok := w.GetPodAnnotation(cautils.ArmoReplaceheaders); ok {
return cautils.StringToBool(c)
}
return false
}
// ======================================= UTILS =========================================
// InspectWorkload -
func InspectWorkload(workload interface{}, scopes ...string) (val interface{}, k bool) {
val, k = nil, false
if len(scopes) == 0 {
if workload != nil {
return workload, true
}
return nil, false
}
if data, ok := workload.(map[string]interface{}); ok {
val, k = InspectWorkload(data[scopes[0]], scopes[1:]...)
}
return val, k
}

View File

@@ -1,155 +0,0 @@
package k8sinterface
import (
"testing"
)
// ========================================= IS =========================================
func TestLabels(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"labels":{"app":"demoservice-server","cyberarmor.inject":"true"},"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
if workload.GetKind() != "Deployment" {
t.Errorf("wrong kind")
}
if workload.GetNamespace() != "default" {
t.Errorf("wrong namespace")
}
if workload.GetName() != "demoservice-server" {
t.Errorf("wrong name")
}
if !workload.IsInject() {
t.Errorf("expect to find inject label")
}
if workload.IsIgnore() {
t.Errorf("expect to find ignore label")
}
}
func TestSetNamespace(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"demoservice-server"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"demoservice-server"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
workload.SetNamespace("default")
if workload.GetNamespace() != "default" {
t.Errorf("wrong namespace")
}
}
func TestSetLabels(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
workload.SetLabel("bla", "daa")
v, ok := workload.GetLabel("bla")
if !ok || v != "daa" {
t.Errorf("expect to find label")
}
workload.RemoveLabel("bla")
v2, ok2 := workload.GetLabel("bla")
if ok2 || v2 == "daa" {
t.Errorf("label not deleted")
}
}
func TestSetAnnotations(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
workload.SetAnnotation("bla", "daa")
v, ok := workload.GetAnnotation("bla")
if !ok || v != "daa" {
t.Errorf("expect to find annotation")
}
workload.RemoveAnnotation("bla")
v2, ok2 := workload.GetAnnotation("bla")
if ok2 || v2 == "daa" {
t.Errorf("annotation not deleted")
}
}
func TestSetPodLabels(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
workload.SetPodLabel("bla", "daa")
v, ok := workload.GetPodLabel("bla")
if !ok || v != "daa" {
t.Errorf("expect to find label")
}
workload.RemovePodLabel("bla")
v2, ok2 := workload.GetPodLabel("bla")
if ok2 || v2 == "daa" {
t.Errorf("label not deleted")
}
}
func TestRemoveArmo(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server", "armo.attach": "true"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
if !workload.IsAttached() {
t.Errorf("expect to be attached")
}
workload.RemoveArmoMetadata()
if workload.IsAttached() {
t.Errorf("expect to be clear")
}
}
func TestSetWlid(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
workload.SetWlid("wlid://bla")
// t.Errorf(workload.Json())
}
func TestGetResourceVersion(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
if workload.GetResourceVersion() != "1016043" {
t.Errorf("wrong resourceVersion")
}
}
func TestGetUID(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
if workload.GetUID() != "e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e" {
t.Errorf("wrong UID")
}
}
func TestIsAttached(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"3"},"creationTimestamp":"2021-06-21T04:52:05Z","generation":3,"name":"emailservice","namespace":"default"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"emailservice"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"annotations":{"armo.last-update":"21-06-2021 06:40:42","armo.wlid":"wlid://cluster-david-demo/namespace-default/deployment-emailservice"},"creationTimestamp":null,"labels":{"app":"emailservice","armo.attach":"true"}},"spec":{"containers":[{"env":[{"name":"PORT","value":"8080"},{"name":"DISABLE_PROFILER","value":"1"}],"image":"gcr.io/google-samples/microservices-demo/emailservice:v0.2.3","imagePullPolicy":"IfNotPresent","livenessProbe":{"exec":{"command":["/bin/grpc_health_probe","-addr=:8080"]},"failureThreshold":3,"periodSeconds":5,"successThreshold":1,"timeoutSeconds":1},"name":"server","ports":[{"containerPort":8080,"protocol":"TCP"}],"readinessProbe":{"exec":{"command":["/bin/grpc_health_probe","-addr=:8080"]},"failureThreshold":3,"periodSeconds":5,"successThreshold":1,"timeoutSeconds":1},"resources":{"limits":{"cpu":"200m","memory":"128Mi"},"requests":{"cpu":"100m","memory":"64Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"serviceAccount":"default","serviceAccountName":"default","terminationGracePeriodSeconds":5}}}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
if !workload.IsAttached() {
t.Errorf("expected attached")
}
}

View File

@@ -1,23 +0,0 @@
package k8sinterface
func PodSpec(kind string) []string {
switch kind {
case "Pod", "Namespace":
return []string{"spec"}
case "CronJob":
return []string{"spec", "jobTemplate", "spec", "template", "spec"}
default:
return []string{"spec", "template", "spec"}
}
}
func PodMetadata(kind string) []string {
switch kind {
case "Pod", "Namespace", "Secret":
return []string{"metadata"}
case "CronJob":
return []string{"spec", "jobTemplate", "spec", "template", "metadata"}
default:
return []string{"spec", "template", "metadata"}
}
}

View File

@@ -1,7 +0,0 @@
package opapolicy
const (
PostureRestAPIPathV1 = "/v1/posture"
PostureRedisPrefix = "_postureReportv1"
K8sPostureNotification = "/k8srestapi/v1/newPostureReport"
)

View File

@@ -1,161 +0,0 @@
package opapolicy
import (
"time"
armotypes "github.com/armosec/kubescape/cautils/armotypes"
)
type AlertScore float32
type RuleLanguages string
const (
RegoLanguage RuleLanguages = "Rego"
RegoLanguage2 RuleLanguages = "rego"
)
// RegoResponse the expected response of single run of rego policy
type RuleResponse struct {
AlertMessage string `json:"alertMessage"`
RuleStatus string `json:"ruleStatus"`
PackageName string `json:"packagename"`
AlertScore AlertScore `json:"alertScore"`
AlertObject AlertObject `json:"alertObject"`
Context []string `json:"context,omitempty"` // TODO - Remove
Rulename string `json:"rulename,omitempty"` // TODO - Remove
ExceptionName string `json:"exceptionName,omitempty"` // Not in use
Exception *armotypes.PostureExceptionPolicy `json:"exception,omitempty"`
}
type AlertObject struct {
K8SApiObjects []map[string]interface{} `json:"k8sApiObjects,omitempty"`
ExternalObjects map[string]interface{} `json:"externalObjects,omitempty"`
}
type FrameworkReport struct {
Name string `json:"name"`
ControlReports []ControlReport `json:"controlReports"`
Score float32 `json:"score,omitempty"`
ARMOImprovement float32 `json:"ARMOImprovement,omitempty"`
WCSScore float32 `json:"wcsScore,omitempty"`
}
type ControlReport struct {
armotypes.PortalBase `json:",inline"`
ControlID string `json:"id"`
Name string `json:"name"`
RuleReports []RuleReport `json:"ruleReports"`
Remediation string `json:"remediation"`
Description string `json:"description"`
Score float32 `json:"score"`
BaseScore float32 `json:"baseScore,omitempty"`
ARMOImprovement float32 `json:"ARMOImprovement,omitempty"`
}
type RuleReport struct {
Name string `json:"name"`
Remediation string `json:"remediation"`
RuleStatus RuleStatus `json:"ruleStatus"` // did we run the rule or not (if there where compile errors, the value will be failed)
RuleResponses []RuleResponse `json:"ruleResponses"`
ListInputResources []map[string]interface{} `json:"-"`
ListInputKinds []string `json:"-"`
}
type RuleStatus struct {
Status string `json:"status"`
Message string `json:"message"`
}
// PostureReport
type PostureReport struct {
CustomerGUID string `json:"customerGUID"`
ClusterName string `json:"clusterName"`
ReportID string `json:"reportID"`
JobID string `json:"jobID"`
ReportGenerationTime time.Time `json:"generationTime"`
FrameworkReports []FrameworkReport `json:"frameworks"`
}
// RuleMatchObjects defines which objects this rule applied on
type RuleMatchObjects struct {
APIGroups []string `json:"apiGroups"` // apps
APIVersions []string `json:"apiVersions"` // v1/ v1beta1 / *
Resources []string `json:"resources"` // dep.., pods,
}
// RuleMatchObjects defines which objects this rule applied on
type RuleDependency struct {
PackageName string `json:"packageName"` // package name
}
// PolicyRule represents single rule, the fundamental executable block of policy
type PolicyRule struct {
armotypes.PortalBase `json:",inline"`
CreationTime string `json:"creationTime"`
Rule string `json:"rule"` // multiline string!
RuleLanguage RuleLanguages `json:"ruleLanguage"`
Match []RuleMatchObjects `json:"match"`
RuleDependencies []RuleDependency `json:"ruleDependencies"`
Description string `json:"description"`
Remediation string `json:"remediation"`
RuleQuery string `json:"ruleQuery"` // default "armo_builtins" - DEPRECATED
}
// Control represents a collection of rules which are combined together to single purpose
type Control struct {
armotypes.PortalBase `json:",inline"`
ControlID string `json:"id"`
CreationTime string `json:"creationTime"`
Description string `json:"description"`
Remediation string `json:"remediation"`
Rules []PolicyRule `json:"rules"`
// for new list of rules in POST/UPADTE requests
RulesIDs *[]string `json:"rulesIDs,omitempty"`
}
type UpdatedControl struct {
Control `json:",inline"`
Rules []interface{} `json:"rules"`
}
// Framework represents a collection of controls which are combined together to expose comprehensive behavior
type Framework struct {
armotypes.PortalBase `json:",inline"`
CreationTime string `json:"creationTime"`
Description string `json:"description"`
Controls []Control `json:"controls"`
// for new list of controls in POST/UPADTE requests
ControlsIDs *[]string `json:"controlsIDs,omitempty"`
}
type UpdatedFramework struct {
Framework `json:",inline"`
Controls []interface{} `json:"controls"`
}
type NotificationPolicyType string
type NotificationPolicyKind string
// Supported NotificationTypes
const (
TypeValidateRules NotificationPolicyType = "validateRules"
TypeExecPostureScan NotificationPolicyType = "execPostureScan"
TypeUpdateRules NotificationPolicyType = "updateRules"
)
// Supported NotificationKinds
const (
KindFramework NotificationPolicyKind = "Framework"
KindControl NotificationPolicyKind = "Control"
KindRule NotificationPolicyKind = "Rule"
)
type PolicyNotification struct {
NotificationType NotificationPolicyType `json:"notificationType"`
Rules []PolicyIdentifier `json:"rules"`
ReportID string `json:"reportID"`
JobID string `json:"jobID"`
Designators armotypes.PortalDesignator `json:"designators"`
}
type PolicyIdentifier struct {
Kind NotificationPolicyKind `json:"kind"`
Name string `json:"name"`
}

View File

@@ -1,301 +0,0 @@
package opapolicy
import (
"time"
armotypes "github.com/armosec/kubescape/cautils/armotypes"
)
// Mock A
var (
AMockCustomerGUID = "5d817063-096f-4d91-b39b-8665240080af"
AMockJobID = "36b6f9e1-3b63-4628-994d-cbe16f81e9c7"
AMockReportID = "2c31e4da-c6fe-440d-9b8a-785b80c8576a"
AMockClusterName = "clusterA"
AMockFrameworkName = "testFrameworkA"
AMockControlName = "testControlA"
AMockRuleName = "testRuleA"
AMockPortalBase = *armotypes.MockPortalBase(AMockCustomerGUID, "", nil)
)
func MockRuleResponseA() *RuleResponse {
return &RuleResponse{
AlertMessage: "test alert message A",
AlertScore: 0,
Rulename: AMockRuleName,
PackageName: "test.package.name.A",
Context: []string{},
}
}
func MockFrameworkReportA() *FrameworkReport {
return &FrameworkReport{
Name: AMockFrameworkName,
ControlReports: []ControlReport{
{
ControlID: "C-0010",
Name: AMockControlName,
RuleReports: []RuleReport{
{
Name: AMockRuleName,
Remediation: "remove privilegedContainer: True flag from your pod spec",
RuleResponses: []RuleResponse{
*MockRuleResponseA(),
},
},
},
},
},
}
}
func MockPostureReportA() *PostureReport {
return &PostureReport{
CustomerGUID: AMockCustomerGUID,
ClusterName: AMockClusterName,
ReportID: AMockReportID,
JobID: AMockJobID,
ReportGenerationTime: time.Now().UTC(),
FrameworkReports: []FrameworkReport{*MockFrameworkReportA()},
}
}
func MockFrameworkA() *Framework {
return &Framework{
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-096f-4d91-b39b-8665240080af", AMockFrameworkName, nil),
CreationTime: "",
Description: "mock framework descryption",
Controls: []Control{
{
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-aaaa-4d91-b39b-8665240080af", AMockControlName, nil),
Rules: []PolicyRule{
*MockRuleA(),
},
},
},
}
}
func MockRuleUntrustedRegistries() *PolicyRule {
return &PolicyRule{
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-aaaa-aaaa-b39b-8665240080af", AMockControlName, nil),
Rule: `
package armo_builtins
# Check for images from blacklisted repos
untrusted_registries(z) = x {
x := ["015253967648.dkr.ecr.eu-central-1.amazonaws.com/"]
}
public_registries(z) = y{
y := ["quay.io/kiali/","quay.io/datawire/","quay.io/keycloak/","quay.io/bitnami/"]
}
untrustedImageRepo[msga] {
pod := input[_]
k := pod.kind
k == "Pod"
container := pod.spec.containers[_]
image := container.image
repo_prefix := untrusted_registries(image)[_]
startswith(image, repo_prefix)
selfLink := pod.metadata.selfLink
containerName := container.name
msga := {
"alertMessage": sprintf("image '%v' in container '%s' in [%s] comes from untrusted registry", [image, containerName, selfLink]),
"alert": true,
"prevent": false,
"alertScore": 2,
"alertObject": [{"pod":pod}]
}
}
untrustedImageRepo[msga] {
pod := input[_]
k := pod.kind
k == "Pod"
container := pod.spec.containers[_]
image := container.image
repo_prefix := public_registries(image)[_]
startswith(pod, repo_prefix)
selfLink := input.metadata.selfLink
containerName := container.name
msga := {
"alertMessage": sprintf("image '%v' in container '%s' in [%s] comes from public registry", [image, containerName, selfLink]),
"alert": true,
"prevent": false,
"alertScore": 1,
"alertObject": [{"pod":pod}]
}
}
`,
RuleLanguage: RegoLanguage,
Match: []RuleMatchObjects{
{
APIVersions: []string{"v1"},
APIGroups: []string{"*"},
Resources: []string{"pods"},
},
},
RuleDependencies: []RuleDependency{
{
PackageName: "kubernetes.api.client",
},
},
}
}
func MockRuleA() *PolicyRule {
return &PolicyRule{
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-aaaa-aaaa-b39b-8665240080af", AMockControlName, nil),
Rule: MockRegoPrivilegedPods(), //
RuleLanguage: RegoLanguage,
Match: []RuleMatchObjects{
{
APIVersions: []string{"v1"},
APIGroups: []string{"*"},
Resources: []string{"pods"},
},
},
RuleDependencies: []RuleDependency{
{
PackageName: "kubernetes.api.client",
},
},
}
}
func MockRuleB() *PolicyRule {
return &PolicyRule{
PortalBase: *armotypes.MockPortalBase("bbbbbbbb-aaaa-aaaa-b39b-8665240080af", AMockControlName, nil),
Rule: MockExternalFacingService(), //
RuleLanguage: RegoLanguage,
Match: []RuleMatchObjects{
{
APIVersions: []string{"v1"},
APIGroups: []string{""},
Resources: []string{"pods"},
},
},
RuleDependencies: []RuleDependency{
{
PackageName: "kubernetes.api.client",
},
},
}
}
func MockPolicyNotificationA() *PolicyNotification {
return &PolicyNotification{
NotificationType: TypeExecPostureScan,
ReportID: AMockReportID,
JobID: AMockJobID,
Designators: armotypes.PortalDesignator{},
Rules: []PolicyIdentifier{
{
Kind: KindFramework,
Name: AMockFrameworkName,
}},
}
}
func MockTemp() string {
return `
package armo_builtins
import data.kubernetes.api.client as client
deny[msga] {
#object := input[_]
object := client.query_all("pods")
obj := object.body.items[_]
msga := {
"packagename": "armo_builtins",
"alertMessage": "found object",
"alertScore": 3,
"alertObject": {"object": obj},
}
}
`
}
func MockRegoPrivilegedPods() string {
return `package armo_builtins
import data.kubernetes.api.client as client
# Deny mutating action unless user is in group owning the resource
#privileged pods
deny[msga] {
pod := input[_]
containers := pod.spec.containers[_]
containers.securityContext.privileged == true
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("the following pods are defined as privileged: %v", [pod]),
"alertScore": 3,
"alertObject": pod,
}
}
#handles majority of workload resources
deny[msga] {
wl := input[_]
spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"}
spec_template_spec_patterns[wl.kind]
containers := wl.spec.template.spec.containers[_]
containers.securityContext.privileged == true
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("the following workloads are defined as privileged: %v", [wl]),
"alertScore": 3,
"alertObject": wl,
}
}
#handles cronjob
deny[msga] {
wl := input[_]
wl.kind == "CronJob"
containers := wl.spec.jobTemplate.spec.template.spec.containers[_]
containers.securityContext.privileged == true
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("the following cronjobs are defined as privileged: %v", [wl]),
"alertScore": 3,
"alertObject": wl,
}
}
`
}
func MockExternalFacingService() string {
return "\n\tpackage armo_builtins\n\n\timport data.kubernetes.api.client as client\n\timport data.cautils as cautils\n\ndeny[msga] {\n\n\twl := input[_]\n\tcluster_resource := client.query_all(\n\t\t\"services\"\n\t)\n\n\tlabels := wl.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n \n#service := cluster_resource.body.items[i]\nservices := [svc | cluster_resource.body.items[i].metadata.namespace == wl.metadata.namespace; svc := cluster_resource.body.items[i]]\nservice := services[_]\nnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\nnp_or_lb[service.spec.type]\ncautils.is_subobject(service.spec.selector,filtered_labels)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v pod %v expose external facing service: %v\",[wl.metadata.namespace, wl.metadata.name, service.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"srvc\":service}\n\t}\n}\n\t"
}
func GetRuntimePods() string {
return `
package armo_builtins
import data.kubernetes.api.client as client
deny[msga] {
cluster_resource := client.query_all(
"pods"
)
pod := cluster_resource.body.items[i]
msga := {
"alertMessage": "got something",
"alertScore": 2,
"packagename": "armo_builtins",
"alertObject": {"pod": pod}
}
}
`
}

View File

@@ -1,42 +0,0 @@
package opapolicy
import (
"encoding/json"
"testing"
)
func TestMockPolicyNotificationA(t *testing.T) {
policy := MockPolicyNotificationA()
bp, err := json.Marshal(policy)
if err != nil {
t.Error(err)
} else {
t.Logf("%s\n", string(bp))
// t.Errorf("%s\n", string(bp))
}
}
func TestMockFrameworkA(t *testing.T) {
policy := MockFrameworkA()
bp, err := json.Marshal(policy)
if err != nil {
t.Error(err)
} else {
t.Logf("%s\n", string(bp))
// t.Errorf("%s\n", string(bp))
}
}
func TestMockPostureReportA(t *testing.T) {
policy := MockPostureReportA()
bp, err := json.Marshal(policy)
if err != nil {
t.Error(err)
} else {
// t.Errorf("%s\n", string(bp))
t.Logf("%s\n", string(bp))
}
}

View File

@@ -1,236 +0,0 @@
package opapolicy
import (
"bytes"
"encoding/json"
"github.com/armosec/kubescape/cautils/k8sinterface"
)
func (pn *PolicyNotification) ToJSONBytesBuffer() (*bytes.Buffer, error) {
res, err := json.Marshal(pn)
if err != nil {
return nil, err
}
return bytes.NewBuffer(res), err
}
func (RuleResponse *RuleResponse) GetSingleResultStatus() string {
if RuleResponse.Exception != nil {
if RuleResponse.Exception.IsAlertOnly() {
return "warning"
}
if RuleResponse.Exception.IsDisable() {
return "ignore"
}
}
return "failed"
}
func (ruleReport *RuleReport) GetRuleStatus() (string, []RuleResponse, []RuleResponse) {
if len(ruleReport.RuleResponses) == 0 {
return "success", nil, nil
}
exceptions := make([]RuleResponse, 0)
failed := make([]RuleResponse, 0)
for _, rule := range ruleReport.RuleResponses {
if rule.ExceptionName != "" {
exceptions = append(exceptions, rule)
} else if rule.Exception != nil {
exceptions = append(exceptions, rule)
} else {
failed = append(failed, rule)
}
}
status := "failed"
if len(failed) == 0 && len(exceptions) > 0 {
status = "warning"
}
return status, failed, exceptions
}
func (controlReport *ControlReport) GetNumberOfResources() int {
sum := 0
for i := range controlReport.RuleReports {
sum += controlReport.RuleReports[i].GetNumberOfResources()
}
return sum
}
func (controlReport *ControlReport) GetNumberOfFailedResources() int {
sum := 0
for i := range controlReport.RuleReports {
sum += controlReport.RuleReports[i].GetNumberOfFailedResources()
}
return sum
}
func (controlReport *ControlReport) GetNumberOfWarningResources() int {
sum := 0
for i := range controlReport.RuleReports {
sum += controlReport.RuleReports[i].GetNumberOfWarningResources()
}
return sum
}
func (controlReport *ControlReport) ListControlsInputKinds() []string {
listControlsInputKinds := []string{}
for i := range controlReport.RuleReports {
listControlsInputKinds = append(listControlsInputKinds, controlReport.RuleReports[i].ListInputKinds...)
}
return listControlsInputKinds
}
func (controlReport *ControlReport) Passed() bool {
for i := range controlReport.RuleReports {
if len(controlReport.RuleReports[i].RuleResponses) != 0 {
return false
}
}
return true
}
func (controlReport *ControlReport) Warning() bool {
if controlReport.Passed() || controlReport.Failed() {
return false
}
for i := range controlReport.RuleReports {
if status, _, _ := controlReport.RuleReports[i].GetRuleStatus(); status == "warning" {
return true
}
}
return false
}
func (controlReport *ControlReport) Failed() bool {
if controlReport.Passed() {
return false
}
for i := range controlReport.RuleReports {
if status, _, _ := controlReport.RuleReports[i].GetRuleStatus(); status == "failed" {
return true
}
}
return false
}
func (ruleReport *RuleReport) GetNumberOfResources() int {
return len(ruleReport.ListInputResources)
}
func (ruleReport *RuleReport) GetNumberOfFailedResources() int {
sum := 0
for i := len(ruleReport.RuleResponses) - 1; i >= 0; i-- {
if ruleReport.RuleResponses[i].GetSingleResultStatus() == "failed" {
if !ruleReport.DeleteIfRedundantResponse(&ruleReport.RuleResponses[i], i) {
sum++
}
}
}
return sum
}
func (ruleReport *RuleReport) DeleteIfRedundantResponse(RuleResponse *RuleResponse, index int) bool {
if b, rr := ruleReport.IsDuplicateResponseOfResource(RuleResponse, index); b {
rr.AddMessageToResponse(RuleResponse.AlertMessage)
ruleReport.RuleResponses = removeResponse(ruleReport.RuleResponses, index)
return true
}
return false
}
func (ruleResponse *RuleResponse) AddMessageToResponse(message string) {
ruleResponse.AlertMessage += message
}
func (ruleReport *RuleReport) IsDuplicateResponseOfResource(RuleResponse *RuleResponse, index int) (bool, *RuleResponse) {
for i := range ruleReport.RuleResponses {
if i != index {
for j := range ruleReport.RuleResponses[i].AlertObject.K8SApiObjects {
for k := range RuleResponse.AlertObject.K8SApiObjects {
w1 := k8sinterface.NewWorkloadObj(ruleReport.RuleResponses[i].AlertObject.K8SApiObjects[j])
w2 := k8sinterface.NewWorkloadObj(RuleResponse.AlertObject.K8SApiObjects[k])
if w1.GetName() == w2.GetName() && w1.GetNamespace() == w2.GetNamespace() && w1.GetKind() != "Role" && w1.GetKind() != "ClusterRole" {
return true, &ruleReport.RuleResponses[i]
}
}
}
}
}
return false, nil
}
func removeResponse(slice []RuleResponse, index int) []RuleResponse {
return append(slice[:index], slice[index+1:]...)
}
func (ruleReport *RuleReport) GetNumberOfWarningResources() int {
sum := 0
for i := range ruleReport.RuleResponses {
if ruleReport.RuleResponses[i].GetSingleResultStatus() == "warning" {
sum += 1
}
}
return sum
}
func (postureReport *PostureReport) RemoveData() {
for i := range postureReport.FrameworkReports {
postureReport.FrameworkReports[i].RemoveData()
}
}
func (frameworkReport *FrameworkReport) RemoveData() {
for i := range frameworkReport.ControlReports {
frameworkReport.ControlReports[i].RemoveData()
}
}
func (controlReport *ControlReport) RemoveData() {
for i := range controlReport.RuleReports {
controlReport.RuleReports[i].RemoveData()
}
}
func (ruleReport *RuleReport) RemoveData() {
for i := range ruleReport.RuleResponses {
ruleReport.RuleResponses[i].RemoveData()
}
}
func (r *RuleResponse) RemoveData() {
r.AlertObject.ExternalObjects = nil
keepFields := []string{"kind", "apiVersion", "metadata"}
keepMetadataFields := []string{"name", "namespace", "labels"}
for i := range r.AlertObject.K8SApiObjects {
deleteFromMap(r.AlertObject.K8SApiObjects[i], keepFields)
for k := range r.AlertObject.K8SApiObjects[i] {
if k == "metadata" {
if b, ok := r.AlertObject.K8SApiObjects[i][k].(map[string]interface{}); ok {
deleteFromMap(b, keepMetadataFields)
r.AlertObject.K8SApiObjects[i][k] = b
}
}
}
}
}
func deleteFromMap(m map[string]interface{}, keepFields []string) {
for k := range m {
if StringInSlice(keepFields, k) {
continue
}
delete(m, k)
}
}
func StringInSlice(strSlice []string, str string) bool {
for i := range strSlice {
if strSlice[i] == str {
return true
}
}
return false
}

View File

@@ -1,47 +0,0 @@
package opapolicy
import (
"github.com/francoispqt/gojay"
"time"
)
/*
responsible on fast unmarshaling of various COMMON containerscan structures and substructures
*/
// UnmarshalJSONObject - File inside a pkg
func (r *PostureReport) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
switch key {
case "customerGUID":
err = dec.String(&(r.CustomerGUID))
case "clusterName":
err = dec.String(&(r.ClusterName))
case "reportID":
err = dec.String(&(r.ReportID))
case "jobID":
err = dec.String(&(r.JobID))
case "generationTime":
err = dec.Time(&(r.ReportGenerationTime), time.RFC3339)
r.ReportGenerationTime = r.ReportGenerationTime.Local()
}
return err
}
// func (files *PkgFiles) UnmarshalJSONArray(dec *gojay.Decoder) error {
// lae := PackageFile{}
// if err := dec.Object(&lae); err != nil {
// return err
// }
// *files = append(*files, lae)
// return nil
// }
func (file *PostureReport) NKeys() int {
return 0
}
//------------------------

View File

@@ -1,219 +0,0 @@
package resources
var RegoCAUtils = `
package cautils
list_contains(lista,element) {
some i
lista[i] == element
}
# getPodName(metadata) = name {
# name := metadata.generateName
#}
getPodName(metadata) = name {
name := metadata.name
}
#returns subobject ,sub1 is partial to parent, e.g parent = {a:a,b:b,c:c,d:d}
# sub1 = {b:b,c:c} - result is {b:b,c:c}, if sub1={b:b,e:f} returns {b:b}
object_intersection(parent,sub1) = r{
r := {k:p | p := sub1[k]
parent[k]== p
}
}
#returns if parent contains sub(both are objects not sets!!)
is_subobject(sub,parent) {
object_intersection(sub,parent) == sub
}
`
var RegoDesignators = `
package designators
import data.cautils
#functions that related to designators
#allowed_namespace
#@input@: receive as part of the input object "included_namespaces" list
#@input@: item's namespace as "namespace"
#returns true if namespace exists in that list
included_namespaces(namespace){
cautils.list_contains(["default"],namespace)
}
#forbidden_namespaces
#@input@: receive as part of the input object "forbidden_namespaces" list
#@input@: item's namespace as "namespace"
#returns true if namespace exists in that list
excluded_namespaces(namespace){
not cautils.list_contains(["excluded"],namespace)
}
forbidden_wlids(wlid){
input.forbidden_wlids[_] == wlid
}
filter_k8s_object(obj) = filtered {
#put
filtered := obj
#filtered := [ x | cautils.list_contains(["default"],obj[i].metadata.namespace) ; x := obj[i] ]
# filtered := [ x | not cautils.list_contains([],filter1Set[i].metadata.namespace); x := filter1Set[i]]
}
`
var RegoKubernetesApiClient = `
package kubernetes.api.client
# service account token
token := data.k8sconfig.token
# Cluster host
host := data.k8sconfig.host
# default certificate path
# crt_file := "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
crt_file := data.k8sconfig.crtfile
client_crt_file := data.k8sconfig.clientcrtfile
client_key_file := data.k8sconfig.clientkeyfile
# This information could be retrieved from the kubernetes API
# too, but would essentially require a request per API group,
# so for now use a lookup table for the most common resources.
resource_group_mapping := {
"services": "api/v1",
"pods": "api/v1",
"configmaps": "api/v1",
"secrets": "api/v1",
"persistentvolumeclaims": "api/v1",
"daemonsets": "apis/apps/v1",
"deployments": "apis/apps/v1",
"statefulsets": "apis/apps/v1",
"horizontalpodautoscalers": "api/autoscaling/v1",
"jobs": "apis/batch/v1",
"cronjobs": "apis/batch/v1beta1",
"ingresses": "api/extensions/v1beta1",
"replicasets": "apis/apps/v1",
"networkpolicies": "apis/networking.k8s.io/v1",
"clusterroles": "apis/rbac.authorization.k8s.io/v1",
"clusterrolebindings": "apis/rbac.authorization.k8s.io/v1",
"roles": "apis/rbac.authorization.k8s.io/v1",
"rolebindings": "apis/rbac.authorization.k8s.io/v1",
"serviceaccounts": "api/v1"
}
# Query for given resource/name in provided namespace
# Example: query_ns("deployments", "my-app", "default")
query_name_ns(resource, name, namespace) = http.send({
"url": sprintf("%v/%v/namespaces/%v/%v/%v", [
host,
resource_group_mapping[resource],
namespace,
resource,
name,
]),
"method": "get",
"headers": {"authorization": token},
"tls_client_cert_file": client_crt_file,
"tls_client_key_file": client_key_file,
"tls_ca_cert_file": crt_file,
"raise_error": true,
})
# Query for given resource type using label selectors
# https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api
# Example: query_label_selector_ns("deployments", {"app": "opa-kubernetes-api-client"}, "default")
query_label_selector_ns(resource, selector, namespace) = http.send({
"url": sprintf("%v/%v/namespaces/%v/%v?labelSelector=%v", [
host,
resource_group_mapping[resource],
namespace,
resource,
label_map_to_query_string(selector),
]),
"method": "get",
"headers": {"authorization": token},
"tls_client_cert_file": client_crt_file,
"tls_client_key_file": client_key_file,
"tls_ca_cert_file": crt_file,
"raise_error": true,
})
# x := field_transform_to_qry_param("spec.selector",input)
# input = {"app": "acmefit", "service": "catalog-db"}
# result: "spec.selector.app%3Dacmefit,spec.selector.service%3Dcatalog-db"
query_field_selector_ns(resource, field, selector, namespace) = http.send({
"url": sprintf("%v/%v/namespaces/%v/%v?fieldSelector=%v", [
host,
resource_group_mapping[resource],
namespace,
resource,
field_transform_to_qry_param(field,selector),
]),
"method": "get",
"headers": {"authorization": token},
"tls_client_cert_file": client_crt_file,
"tls_client_key_file": client_key_file,
"tls_ca_cert_file": crt_file,
"raise_error": true,
})
# # Query for all resources of type resource in all namespaces
# # Example: query_all("deployments")
# query_all(resource) = http.send({
# "url": sprintf("https://%v:%v/%v/%v", [
# ip,
# port,
# resource_group_mapping[resource],
# resource,
# ]),
# "method": "get",
# "headers": {"authorization": sprintf("Bearer %v", [token])},
# "tls_client_cert_file": crt_file,
# "raise_error": true,
# })
# Query for all resources of type resource in all namespaces
# Example: query_all("deployments")
query_all(resource) = http.send({
"url": sprintf("%v/%v/%v", [
host,
resource_group_mapping[resource],
resource,
]),
"method": "get",
"headers": {"authorization": token},
"tls_client_cert_file": client_crt_file,
"tls_client_key_file": client_key_file,
"tls_ca_cert_file": crt_file,
"raise_error": true,
})
# Query for all resources of type resource in all namespaces - without authentication
# Example: query_all("deployments")
query_all_no_auth(resource) = http.send({
"url": sprintf("%v/%v/namespaces/default/%v", [
host,
resource_group_mapping[resource],
resource,
]),
"method": "get",
"raise_error": true,
"tls_insecure_skip_verify" : true,
})
field_transform_to_qry_param(field,map) = finala {
mid := {concat(".",[field,key]): val | val := map[key]}
finala := label_map_to_query_string(mid)
}
label_map_to_query_string(map) = concat(",", [str | val := map[key]; str := concat("%3D", [key, val])])
`

View File

@@ -1,20 +0,0 @@
package armo_builtins
# import data.kubernetes.api.client as client
import data.cautils as cautils
# alert cronjobs
#handles cronjob
deny[msga] {
wl := input[_]
wl.kind == "CronJob"
msga := {
"alertMessage": sprintf("the following cronjobs are defined: %v", [wl]),
"alertScore": 2,
"packagename": "armo_builtins",
"alertObject": wl
}
}

View File

@@ -1,44 +0,0 @@
package armo_builtins
import data.kubernetes.api.client as client
# input: pod
# apiversion: v1
# does:
# returns the external facing services of that pod
#
#
deny[msga] {
pod := input[_]
podns := pod.metadata.namespace
podname := getName(pod.metadata)
# pod := client.query_name_ns("pods","frontend-86c5ffb485-kfp9d", "default")
labels := pod.body.metadata.labels
filtered_labels := json.remove(labels, ["pod-template-hash"])
cluster_resource := client.query_all(
"services"
)
services := [svc | cluster_resource.body.items[i].metadata.namespace == podns; svc := cluster_resource.body.items[i]]
service := services[_]
np_or_lb := {"NodePort", "LoadBalancer"}
np_or_lb[service.spec.type]
service.spec.selector == filtered_labels
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("pod %v/%v exposed services: %v\n", [podns,podname,service]),
"alertScore": 7,
"alertObject": {"service":service,"labels":filtered_labels, "podname":podname,"namespace":podns}
}
}
getName(metadata) = name {
name := metadata.generateName
}
getName(metadata) = name {
name := metadata.name
}

View File

@@ -1,57 +0,0 @@
package armo_builtins
#import data.kubernetes.api.client as client
import data.cautils as cautils
# input: pod
# apiversion: v1
# does:
# returns hostPath volumes
#
#
deny[msga] {
pod := input[_]
pod.kind == "Pod"
volumes := pod.spec.volumes
volume := volumes[_]
# crsrcs.body.spec.containers[_].volumeMounts[_].name = volume.name
volume.hostPath
podname := cautils.getPodName(pod.metadata)
obj := {"volume":volume,"podname": podname}
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("pod: %v has {%v,%v} ashostPath volume \n\n\n", [podname, volume]),
"alertScore": 7,
"alertObject": [obj]
}
}
isRWMount(mount) {
not mount.readOnly
}
isRWMount(mount) {
mount.readOnly == false
}
#handles majority of workload resources
deny[msga] {
wl := input[_]
spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"}
spec_template_spec_patterns[wl.kind]
volumes := wl.spec.template.spec.volumes
volume := volumes[_]
volume.hostPath
wlname := cautils.getPodName(wl.metadata)
obj := {"volume":volume,"podname": wlname}
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("%v: %v has {%v,%v} as hostPath volume\n\n\n", [wl.kind,wlname, volume]),
"alertScore": 7,
"alertObject": [obj]
}
}

View File

@@ -1,56 +0,0 @@
package armo_builtins
#import data.kubernetes.api.client as client
# Deny mutating action unless user is in group owning the resource
#privileged pods
deny[msga] {
pod := input[_]
containers := pod.spec.containers[_]
containers.securityContext.privileged == true
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("the following pods are defined as privileged: %v", [pod]),
"alertScore": 3,
"alertObject": pod,
}
}
#handles majority of workload resources
deny[msga] {
wl := input[_]
spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"}
spec_template_spec_patterns[wl.kind]
containers := wl.spec.template.spec.containers[_]
containers.securityContext.privileged == true
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("the following workloads are defined as privileged: %v", [wl]),
"alertScore": 3,
"alertObject": wl,
}
}
#handles cronjob
deny[msga] {
wl := input[_]
wl.kind == "CronJob"
containers := wl.spec.jobTemplate.spec.template.spec.containers[_]
containers.securityContext.privileged == true
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("the following cronjobs are defined as privileged: %v", [wl]),
"alertScore": 3,
"alertObject": wl,
}
}

View File

@@ -1,98 +0,0 @@
package armo_builtins
import data.kubernetes.api.client as client
import data.cautils as cautils
# input: None
# apiversion: v1
# does:
# returns roles+ related subjects in rolebinding
deny[msga] {
# rsrc := client.query_all("roles")
# role := rsrc.body.items[_]
role := input[_]
role.kind == "Role"
rule := role.rules[_]
cautils.list_contains(rule.resources,"secrets")
canViewSecrets(rule)
rbsrc := client.query_all("rolebindings")
rolebinding := rbsrc.body.items[_]
rolebinding.roleRef.kind == "Role"
rolebinding.roleRef.name == role.metadata.name
msga := {
"alertMessage": sprintf("the following users: %v , got read secret access roles", [rolebinding.subjects]),
"alertScore": 9,
"packagename": "armo_builtins",
"alertObject": {"role":role,"users":rolebinding.subjects}
}
}
# input: None
# apiversion: v1
# does:
# returns clusterroles+ related subjects in rolebinding
deny[msga] {
# rsrc := client.query_all("clusterroles")
# role := rsrc.body.items[_]
role := input[_]
role.kind == "ClusterRole"
rule := role.rules[_]
cautils.list_contains(rule.resources,"secrets")
canViewSecrets(rule)
rbsrc := client.query_all("rolebindings")
rolebinding := rbsrc.body.items[_]
rolebinding.roleRef.kind == "ClusterRole"
rolebinding.roleRef.name == role.metadata.name
msga := {
"alertMessage": sprintf("the following users: %v , got read secret access roles", [rolebinding.subjects]),
"alertScore": 9,
"packagename": "armo_builtins",
"alertObject": {"clusterrole":role,"users":rolebinding.subjects}
}
}
# input: None
# apiversion: v1
# does:
# returns clusterroles+ related subjects in clusterrolebinding
#
#
deny[msga] {
# rsrc := client.query_all("clusterroles")
# role := rsrc.body.items[_]
role := input[_]
role.kind == "ClusterRole"
rule := role.rules[_]
cautils.list_contains(rule.resources,"secrets")
canViewSecrets(rule)
rbsrc := client.query_all("clusterrolebindings")
rolebinding := rbsrc.body.items[_]
rolebinding.roleRef.kind == "ClusterRole"
rolebinding.roleRef.name == role.metadata.name
msga := {
"alertMessage": sprintf("the following users: %v , got read secret access roles", [rolebinding.subjects]),
"alertScore": 9,
"packagename": "armo_builtins",
"alertObject": {"clusterrole":role,"users":rolebinding.subjects}
}
}
canViewSecrets(rule) {
cautils.list_contains(rule.verbs,"get")
}
canViewSecrets(rule) {
cautils.list_contains(rule.verbs,"watch")
}

View File

@@ -1,64 +0,0 @@
package armo_builtins
#import data.kubernetes.api.client as client
import data.cautils as cautils
# input: pod
# apiversion: v1
# does:
# returns rw hostpath volumes of that pod
#
#
deny[msga] {
pod := input[_]
pod.kind == "Pod"
volumes := pod.spec.volumes
volume := volumes[_]
# crsrcs.body.spec.containers[_].volumeMounts[_].name = volume.name
mount := pod.spec.containers[_].volumeMounts[_]
mount.name == volume.name
volume.hostPath
isRWMount(mount)
podname := cautils.getPodName(pod.metadata)
obj := {"volume":volume,"mount":mount,"podname": podname}
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("pod: %v has {%v,%v} as rw hostPath volume and volumemount pair\n\n\n", [podname, volume,mount]),
"alertScore": 7,
"alertObject": [obj],
}
}
isRWMount(mount) {
not mount.readOnly
}
isRWMount(mount) {
mount.readOnly == false
}
#handles majority of workload resources
deny[msga] {
wl := input[_]
spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"}
spec_template_spec_patterns[wl.kind]
volumes := wl.spec.template.spec.volumes
volume := volumes[_]
mount := wl.spec.template.spec.containers[_].volumeMounts[_]
mount.name == volume.name
volume.hostPath
isRWMount(mount)
wlname := cautils.getPodName(wl.metadata)
obj := {"volume":volume,"mount":mount,"podname": wlname}
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("%v: %v has {%v,%v} as rw hostPath volume and volumemount pair\n\n\n", [wl.kind,wlname, volume,mount]),
"alertScore": 7,
"alertObject": [obj],
}
}

View File

@@ -1,57 +0,0 @@
package armo_builtins
import data.kubernetes.api.client as client
import data.cautils as cautils
# input: pod
# apiversion: v1
# does:
# returns the external facing services of that pod
#
#
deny[msga] {
pod := input[_]
podns := pod.metadata.namespace
podname := cautils.getPodName(pod.metadata)
# pod := client.query_name_ns("pods", "catalog-mongo-6f468d99b4-pn242", "default")
labels := pod.body.metadata.labels
filtered_labels := json.remove(labels, ["pod-template-hash"])
cluster_resource := client.query_all(
"services"
)
services := [svc | cluster_resource.body.items[i].metadata.namespace == podns; svc := cluster_resource.body.items[i]]
service := services[_]
service.spec.selector == filtered_labels
hasSSHPorts(service)
msga := {
"alertMessage": sprintf("pod %v/%v exposed by SSH services: %v\n", [podns,podname,service]),
"packagename": "armo_builtins",
"alertScore": 7,
"alertObject": [{"pod":pod,"service":{service}}]
}
}
hasSSHPorts(service) {
port := service.spec.ports[_]
port.port == 22
}
hasSSHPorts(service) {
port := service.spec.ports[_]
port.port == 2222
}
hasSSHPorts(service) {
port := service.spec.ports[_]
port.targetPort == 22
}
hasSSHPorts(service) {
port := service.spec.ports[_]
port.targetPort == 2222
}

View File

@@ -1,33 +0,0 @@
{
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
"name": "[Builtin] rule-deny-access-to-secrets",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "determines which users can get/list/watch secrets",
"attributes": {
"m$K8sThreatMatrix": "Credential Access::List k8s Secrets"
},
"ruleDependencies": [
{
"packageName":"cautils"
},
{
"packageName":"kubernetes.api.client"
}
],
"remediation": "",
"match": [
{
"resources": [
"Role","ClusterRole"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"rbac.authorization.k8s.io"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\nimport data.kubernetes.api.client as client\nimport data.cautils as cautils\n\n\n# input: None\n# apiversion: v1\n# does: \n#\treturns roles+ related subjects in rolebinding\n\n\ndeny[msga] {\n\t# rsrc := client.query_all(\"roles\")\n\t# role := rsrc.body.items[_]\n\trole := input[_]\n\trole.kind == \"Role\"\n\trule := role.rules[_]\n\tcautils.list_contains(rule.resources,\"secrets\")\n\tcanViewSecrets(rule)\n\trbsrc := client.query_all(\"rolebindings\")\n\trolebinding := rbsrc.body.items[_]\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following users: %v , got read secret access roles\", [rolebinding.subjects]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 9,\n\t\t\"alertObject\": {\"role\":role,\"users\":rolebinding.subjects}\n\t\n\t}\n}\n\n\n\n# input: None\n# apiversion: v1\n# does: \n#\treturns clusterroles+ related subjects in rolebinding\n\n\ndeny[msga] {\n\t# rsrc := client.query_all(\"clusterroles\")\n\t# role := rsrc.body.items[_]\n\trole := input[_]\n\trole.kind == \"ClusterRole\"\n\trule := role.rules[_]\n\tcautils.list_contains(rule.resources,\"secrets\")\n\tcanViewSecrets(rule)\n\trbsrc := client.query_all(\"rolebindings\")\n\trolebinding := rbsrc.body.items[_]\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following users: %v , got read secret access roles\", [rolebinding.subjects]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 9,\n\t\t\"alertObject\": {\"clusterrole\":role,\"users\":rolebinding.subjects}\n\t\n\t}\n}\n\n\n# input: None\n# apiversion: v1\n# does: \n#\treturns clusterroles+ related subjects in clusterrolebinding\n#\n#\ndeny[msga] {\n\t# rsrc := client.query_all(\"clusterroles\")\n\t# role := rsrc.body.items[_]\n\trole := input[_]\n\trole.kind == \"ClusterRole\"\n\trule := role.rules[_]\n\tcautils.list_contains(rule.resources,\"secrets\")\n\tcanViewSecrets(rule)\n\trbsrc := client.query_all(\"clusterrolebindings\")\n\trolebinding := rbsrc.body.items[_]\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following users: %v , got read secret access roles\", [rolebinding.subjects]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 9,\n\t\t\"alertObject\": {\"clusterrole\":role,\"users\":rolebinding.subjects}\n\t\n\t}\n}\n\ncanViewSecrets(rule) {\n\tcautils.list_contains(rule.verbs,\"get\")\n}\ncanViewSecrets(rule) {\n\tcautils.list_contains(rule.verbs,\"watch\")\n}\n"
}

View File

@@ -1,34 +0,0 @@
{
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
"name": "[Builtin] rule-can-ssh-to-pod",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "denies pods with SSH ports opened(22/222)",
"attributes": {
"microsoftK8sThreatMatrix": "val1"
},
"ruleDependencies": [
{
"packageName":"cautils"
},
{
"packageName":"kubernetes.api.client"
}
],
"remediation": "create a network policy that protects SSH ports",
"match": [
{
"resources": [
"Pods"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"*"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\nimport data.kubernetes.api.client as client\nimport data.cautils as cautils\n\n# input: pod\n# apiversion: v1\n# does: \n#\treturns the external facing services of that pod\n#\n#\ndeny[msga] {\n\tpod := input[_]\n\tpodns := pod.metadata.namespace\n\tpodname := cautils.getPodName(pod.metadata)\n\t# pod := client.query_name_ns(\"pods\", \"catalog-mongo-6f468d99b4-pn242\", \"default\")\n\tlabels := pod.body.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n \n\t cluster_resource := client.query_all(\n\t \t\"services\"\n\t )\n\n\tservices := [svc | cluster_resource.body.items[i].metadata.namespace == podns; svc := cluster_resource.body.items[i]]\n\tservice := \tservices[_]\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\n\", [podns,podname,service]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": [{\"pod\":pod,\"service\":{service}}]\n\t\n\t}\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n"
}

View File

@@ -1,33 +0,0 @@
{
"guid": "",
"name": "[Builtin] rule-identify-blacklisted-image-registries",
"creationTime": "",
"description": "Identifying if pod container images are from unallowed registries",
"attributes": {
"m$K8sThreatMatrix": "Initial Access::Compromised images in registry"
},
"ruleDependencies": [
{
"packageName": "cautils"
},
{
"packageName": "kubernetes.api.client"
}
],
"remediation": "Use images from safe registry",
"match": [
{
"resources": [
"Pods"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"*"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\n# Check for images from blacklisted repos\n\nuntrusted_registries(z) = x {\n\tx := [\"015253967648.dkr.ecr.eu-central-1.amazonaws.com/\"]\t\n}\n\npublic_registries(z) = y{\n\ty := [\"quay.io/kiali/\",\"quay.io/datawire/\",\"quay.io/keycloak/\",\"quay.io/bitnami/\"]\n}\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[_]\n\timage := container.image\n repo_prefix := untrusted_registries(image)[_]\n\tstartswith(image, repo_prefix)\n\tselfLink := pod.metadata.selfLink\n\tcontainerName := container.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' in [%s] comes from untrusted registry\", [image, containerName, selfLink]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 2,\n\t\t\"alertObject\": [{\"pod\":pod}]\n\t}\n}\n\nuntrustedImageRepo[msga] {\n pod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[_]\n\timage := container.image\n repo_prefix := public_registries(image)[_]\n\tstartswith(pod, repo_prefix)\n\tselfLink := input.metadata.selfLink\n\tcontainerName := container.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' in [%s] comes from public registry\", [image, containerName, selfLink]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 1,\n\t\t\"alertObject\": [{\"pod\":pod}]\n\t}\n}"
}

View File

@@ -1,31 +0,0 @@
{
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
"name": "[Builtin] rule-pod-external-facing",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "denies pods with external facing services, grabs related services",
"attributes": {
"microsoftK8sThreatMatrix": "val1"
},
"ruleDependencies": [
{
"packageName":"kubernetes.api.client"
}
],
"remediation": "create a network policy that controls which protect your cluster from unwanted connections and the outside world",
"match": [
{
"resources": [
"Pods"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"*"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\n\nimport data.kubernetes.api.client as client\n\n\n# input: pod\n# apiversion: v1\n# does: \n#\treturns the external facing services of that pod\n#\n#\ndeny[msga] {\n\tpod := input[_]\n\tpodns := pod.metadata.namespace\n\tpodname := getName(pod.metadata)\n\t# pod := client.query_name_ns(\"pods\",\"frontend-86c5ffb485-kfp9d\", \"default\")\n\tlabels := pod.body.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n \n\t cluster_resource := client.query_all(\n\t \t\"services\"\n\t )\n\n\n\tservices := [svc | cluster_resource.body.items[i].metadata.namespace == podns; svc := cluster_resource.body.items[i]]\n\tservice := \tservices[_]\n\tnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\n\tnp_or_lb[service.spec.type]\n\tservice.spec.selector == filtered_labels\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed services: %v\n\", [podns,podname,service]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\"service\":service,\"labels\":filtered_labels, \"podname\":podname,\"namespace\":podns}\n\t\n\t}\n}\n\ngetName(metadata) = name {\n\tname := metadata.generateName\n}\ngetName(metadata) = name {\n\tname := metadata.name\n}\n"
}

View File

@@ -1,31 +0,0 @@
{
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
"name": "[Builtin] alert-any-hostpath",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "determines if any workload contains a hostPath volume",
"attributes": {
"m$K8sThreatMatrix": "Privilege Escalation::hostPath mount"
},
"ruleDependencies": [
{
"packageName":"cautils"
}
],
"remediation": "consider if hostPath is really necessary - reading sensitive data like hostPath credentials might endanger cluster, if so consider encrypting the data",
"match": [
{
"resources": [
"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"*"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\nimport data.kubernetes.api.client as client\nimport data.cautils as cautils\n\n# input: pod\n# apiversion: v1\n# does: \n#\treturns hostPath volumes\n#\n#\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n # crsrcs.body.spec.containers[_].volumeMounts[_].name = volume.name\n volume.hostPath\n podname := cautils.getPodName(pod.metadata)\n obj := {\"volume\":volume,\"podname\": podname}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has {%v,%v} ashostPath volume \n\n\n\", [podname, volume]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": [obj],\n\t\n\t}\n}\n\nisRWMount(mount) {\n not mount.readOnly\n}\nisRWMount(mount) {\n mount.readOnly == false\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n wlname := cautils.getPodName(wl.metadata)\n obj := {\"volume\":volume,\"podname\": wlname}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has {%v,%v} as hostPath volume\n\n\n\", [wl.kind,wlname, volume]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": [obj],\n\t\n\t}\n}\n\n\n"
}

View File

@@ -1,27 +0,0 @@
{
"guid": "82f19070-2826-4fe4-a079-f5f7e7a1b04d",
"name": "[Builtin] instance-metadata-api-access",
"attributes": {
"m$K8sThreatMatrix": "Credential Access::Instance Metadata API"
},
"creationTime": "2021-04-25T10:48:48.861806",
"rule": "package armo_builtins\n# Check for images from blacklisted repos\n\nmetadata_azure(z) = http.send({\n\t\"url\": \"http://169.254.169.254/metadata/instance?api-version=2020-09-01\",\n\t\"method\": \"get\",\n\t\"headers\": {\"Metadata\": \"true\"},\n\t\"raise_error\": true,\t\n})\n\nmetadata_gcp(z) = http.send({\n\t\"url\": \"http://169.254.169.254/computeMetadata/v1/?alt=json&recursive=true\",\n\t\"method\": \"get\",\n\t\"headers\": {\"Metadata-Flavor\": \"Google\"},\n\t\"raise_error\": true,\t\n})\n\nmetadata_aws(z) = metadata_object { \n\thostname := http.send({\n\t\"url\": \"http://169.254.169.254/latest/meta-data/local-hostname\",\n\t\"method\": \"get\",\n\t\"raise_error\": true,\t\n })\n\tmetadata_object := {\n\t\t\"raw_body\": hostname.raw_body,\n\t\t\"hostname\" : hostname.raw_body,\n\t\t\"status_code\" : hostname.status_code\n\t}\n}\n\nazure_metadata[msga] {\t\n\tmetadata_object := metadata_azure(\"aaa\")\n\tmetadata_object.status_code == 200\n\tnode_name := metadata_object.body.compute.name\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of Azure.\", [node_name]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 1,\n\t\t\"alertObject\": [{\"nodeMetadata\":metadata_object.body}]\n\t}\n}\n\ngcp_metadata[msga] {\t\n\tmetadata_object := metadata_gcp(\"aaa\")\n\tmetadata_object.status_code == 200\n\tnode_name := metadata_object.body.instance.hostname\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of GCP.\", [node_name]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 1,\n\t\t\"alertObject\": [{\"nodeMetadata\": metadata_object.raw_body}]\n\t}\n}\n\naws_metadata[msga] {\t\n\tmetadata_object := metadata_aws(\"aaa\")\n\tmetadata_object.status_code == 200\n\tnode_name := metadata_object.hostname\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of AWS.\", [node_name]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 1,\n\t\t\"alertObject\": [{\"nodeMetadata\": metadata_object.raw_body}]\n\t}\n}",
"ruleLanguage": "Rego",
"match": [
{
"apiGroups": [
"*"
],
"apiVersions": [
"*"
],
"resources": [
"nodes"
]
}
],
"ruleDependencies": [],
"description": "Checks if there is access from the nodes to cloud prividers instance metadata services",
"remediation": "From https://attack.mitre.org/techniques/T1552/005/ :Option A: Disable or Remove Feature or Program, Option B: Filter Network Traffic",
"ruleQuery": ""
}

View File

@@ -1,30 +0,0 @@
{
"guid": "[Builtin] 3b0467c9-488d-c244-99d0-90fbf600aaff",
"name": "rule-deny-cronjobs",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "determines if it's cronjob",
"attributes": {
"m$K8sThreatMatrix": "Persistence::Cronjob"
},
"ruleDependencies": [
{
"packageName":"cautils"
}
],
"remediation": "",
"match": [
{
"resources": [
"CronJob"
],
"apiVersions": [
"v1beta1"
],
"apiGroups": [
"batch"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\n\n# import data.kubernetes.api.client as client\nimport data.cautils as cautils\n\n\n# alert cronjobs\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 2,\n\t\t\"alertObject\": wl\n\t\n\t}\n}\n"
}

View File

@@ -1,29 +0,0 @@
{
"guid": "",
"name": "[Builtin] rule-privilege-escalation",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "determines if pods/deployments defined as privileged true",
"attributes": {
"mitre": "Privilege Escalation",
"mitreCode": "TA0004",
"m$K8sThreatMatrix": "Privilege Escalation::privileged container"
},
"ruleDependencies": [
],
"remediation": "avoid defining pods as privilleged",
"match": [
{
"resources": [
"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod","CronJob"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"*"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\n\nimport data.kubernetes.api.client as client\nimport data.designators as scope\nimport data.cautils as cautils\n\n\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n \n\tpod := input[_]\n\tcontainers := pod.spec.containers[_]\n\tcontainers.securityContext.privileged == true\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 3,\n\t\t\"alertObject\": pod,\n\t\n\t}\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainers := wl.spec.template.spec.containers[_]\n\tcontainers.securityContext.privileged == true\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following workloads are defined as privileged: %v\", [wl]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 3,\n\t\t\"alertObject\": wl,\n\t\n\t}\n}\n\n\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainers := wl.spec.jobTemplate.spec.template.spec.containers[_]\n\tcontainers.securityContext.privileged == true\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 3,\n\t\t\"alertObject\": wl,\n\t\n\t}\n}\n\n"
}

View File

@@ -1,31 +0,0 @@
{
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
"name": "[Builtin] alert-rw-hostpath",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "determines if any workload contains a hostPath volume with rw permissions",
"attributes": {
"m$K8sThreatMatrix": "Persistance::Writable hostPath mount"
},
"ruleDependencies": [
{
"packageName":"cautils"
}
],
"remediation": "consider if hostPath is really necessary- sensitive data like hostPath credentials might endanger cluster, if so consider encrypting the data",
"match": [
{
"resources": [
"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"*"
]
}
],
"ruleLanguage": "Rego",
"rule": "\"\\npackage armo_builtins\\nimport data.kubernetes.api.client as client\\nimport data.cautils as cautils\\n\\n# input: pod\\n# apiversion: v1\\n# does: \\n#\\treturns hostPath volumes\\n#\\n#\\ndeny[msga] {\\n pod := input[_]\\n pod.kind == \\\"Pod\\\"\\n volumes := pod.spec.volumes\\n volume := volumes[_]\\n # crsrcs.body.spec.containers[_].volumeMounts[_].name = volume.name\\n volume.hostPath\\n podname := cautils.getPodName(pod.metadata)\\n obj := {\\\"volume\\\":volume,\\\"podname\\\": podname}\\n\\n\\tmsga := {\\n\\t\\t\\\"alertMessage\\\": sprintf(\\\"pod: %v has {%v,%v} ashostPath volume \\n\\n\\n\\\", [podname, volume]),\\n\\t\\t\\\"alert\\\": true,\\n\\t\\t\\\"prevent\\\": false,\\n\\t\\t\\\"alertScore\\\": 7,\\n\\t\\t\\\"alertObject\\\": [obj],\\n\\t\\n\\t}\\n}\\n\\nisRWMount(mount) {\\n not mount.readOnly\\n}\\nisRWMount(mount) {\\n mount.readOnly == false\\n}\\n\\n\\n#handles majority of workload resources\\ndeny[msga] {\\n\\n\\twl := input[_]\\n\\tspec_template_spec_patterns := {\\\"Deployment\\\",\\\"ReplicaSet\\\",\\\"DaemonSet\\\",\\\"StatefulSet\\\",\\\"Job\\\"}\\n\\tspec_template_spec_patterns[wl.kind]\\n volumes := wl.spec.template.spec.volumes\\n volume := volumes[_]\\n volume.hostPath\\n wlname := cautils.getPodName(wl.metadata)\\n obj := {\\\"volume\\\":volume,\\\"podname\\\": wlname}\\n\\n\\tmsga := {\\n\\t\\t\\\"alertMessage\\\": sprintf(\\\"%v: %v has {%v,%v} as hostPath volume\\n\\n\\n\\\", [wl.kind,wlname, volume]),\\n\\t\\t\\\"alert\\\": true,\\n\\t\\t\\\"prevent\\\": false,\\n\\t\\t\\\"alertScore\\\": 7,\\n\\t\\t\\\"alertObject\\\": [obj],\\n\\t\\n\\t}\\n}\\n\\n\\n\""
}

View File

@@ -1,119 +0,0 @@
package resources
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/armosec/kubescape/cautils/k8sinterface"
"github.com/golang/glog"
"github.com/open-policy-agent/opa/storage"
"github.com/open-policy-agent/opa/storage/inmem"
"github.com/open-policy-agent/opa/util"
"k8s.io/client-go/rest"
)
var (
RegoDependenciesPath = "/resources/rego/dependencies"
)
type RegoDependenciesData struct {
K8sConfig RegoK8sConfig `json:"k8sconfig"`
}
type RegoK8sConfig struct {
Token string `json:"token"`
IP string `json:"ip"`
Host string `json:"host"`
Port string `json:"port"`
CrtFile string `json:"crtfile"`
ClientCrtFile string `json:"clientcrtfile"`
ClientKeyFile string `json:"clientkeyfile"`
// ClientKeyFile string `json:"crtfile"`
}
func NewRegoDependenciesDataMock() *RegoDependenciesData {
return NewRegoDependenciesData(k8sinterface.GetK8sConfig())
}
func NewRegoDependenciesData(k8sConfig *rest.Config) *RegoDependenciesData {
regoDependenciesData := RegoDependenciesData{}
if k8sConfig != nil {
regoDependenciesData.K8sConfig = *NewRegoK8sConfig(k8sConfig)
}
return &regoDependenciesData
}
func NewRegoK8sConfig(k8sConfig *rest.Config) *RegoK8sConfig {
host := k8sConfig.Host
if host == "" {
ip := os.Getenv("KUBERNETES_SERVICE_HOST")
port := os.Getenv("KUBERNETES_SERVICE_PORT")
host = fmt.Sprintf("https://%s:%s", ip, port)
}
token := ""
if k8sConfig.BearerToken != "" {
token = fmt.Sprintf("Bearer %s", k8sConfig.BearerToken)
}
regoK8sConfig := RegoK8sConfig{
Token: token,
Host: host,
CrtFile: k8sConfig.CAFile,
ClientCrtFile: k8sConfig.CertFile,
ClientKeyFile: k8sConfig.KeyFile,
}
return &regoK8sConfig
}
func (data *RegoDependenciesData) TOStorage() (storage.Store, error) {
var jsonObj map[string]interface{}
bytesData, err := json.Marshal(*data)
if err != nil {
return nil, err
}
// glog.Infof("RegoDependenciesData: %s", bytesData)
if err := util.UnmarshalJSON(bytesData, &jsonObj); err != nil {
return nil, err
}
return inmem.NewFromObject(jsonObj), nil
}
// LoadRegoDependenciesFromDir loads the policies list from *.rego file in given directory
func LoadRegoFiles(dir string) map[string]string {
modules := make(map[string]string)
// Compile the module. The keys are used as identifiers in error messages.
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err == nil && strings.HasSuffix(path, ".rego") && !info.IsDir() {
content, err := ioutil.ReadFile(path)
if err != nil {
glog.Errorf("LoadRegoFiles, Failed to load: %s: %v", path, err)
} else {
modules[strings.Trim(filepath.Base(path), ".rego")] = string(content)
}
}
return nil
})
return modules
}
// LoadRegoModules loads the policies from variables
func LoadRegoModules() map[string]string {
modules := make(map[string]string)
modules["cautils"] = RegoCAUtils
modules["designators"] = RegoDesignators
modules["kubernetes.api.client"] = RegoKubernetesApiClient
return modules
}

View File

@@ -1,17 +0,0 @@
package resources
import (
"os"
"path/filepath"
"testing"
)
func TestLoadRegoDependenciesFromDir(t *testing.T) {
dir, _ := os.Getwd()
t.Errorf("%s", filepath.Join(dir, "rego/dependencies"))
return
// modules := LoadRegoDependenciesFromDir("")
// if len(modules) == 0 {
// t.Errorf("modules len == 0")
// }
}

View File

@@ -4,22 +4,24 @@ import (
"path/filepath"
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/kubescape/cautils/opapolicy"
"github.com/armosec/opa-utils/reporthandling"
)
type ScanInfo struct {
Getters
PolicyIdentifier opapolicy.PolicyIdentifier
UseExceptions string
UseFrom string
UseDefault bool
Format string
Output string
ExcludedNamespaces string
InputPatterns []string
Silent bool
FailThreshold uint16
DoNotSendResults bool
PolicyIdentifier reporthandling.PolicyIdentifier
UseExceptions string // Load exceptions configuration
UseFrom string // Load framework from local file (instead of download). Use when running offline
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
Format string // Format results (table, json, junit ...)
Output string // Store results in an output file, Output file name
ExcludedNamespaces string // DEPRECATED?
InputPatterns []string // Yaml files input patterns
Silent bool // Silent mode - Do not print progress logs
FailThreshold uint16 // Failure score threshold
Submit bool // Submit results to Armo BE
Local bool // Do not submit results
Account string // account ID
}
type Getters struct {
@@ -40,7 +42,7 @@ func (scanInfo *ScanInfo) setUseExceptions() {
// load exceptions from file
scanInfo.ExceptionsGetter = getter.NewLoadPolicy(scanInfo.UseExceptions)
} else {
scanInfo.ExceptionsGetter = getter.NewArmoAPI()
scanInfo.ExceptionsGetter = getter.GetArmoAPIConnector()
}
}

View File

@@ -13,7 +13,13 @@ func TestConvertLabelsToString(t *testing.T) {
spilltedA := strings.Split(rsrt, ";")
spilltedB := strings.Split(str, ";")
for i := range spilltedA {
if spilltedA[i] != spilltedB[i] {
exists := false
for j := range spilltedB {
if spilltedB[j] == spilltedA[i] {
exists = true
}
}
if !exists {
t.Errorf("%s != %s", spilltedA[i], spilltedB[i])
}
}

View File

@@ -4,9 +4,9 @@ import (
"fmt"
"strings"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/kubescape/cautils/k8sinterface"
"github.com/spf13/cobra"
)
@@ -31,7 +31,7 @@ var getCmd = &cobra.Command{
key := keyValue[0]
k8s := k8sinterface.NewKubernetesApi()
clusterConfig := cautils.NewClusterConfig(k8s, getter.NewArmoAPI())
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector())
val, err := clusterConfig.GetValueByKeyFromConfigMap(key)
if err != nil {
if err.Error() == "value does not exist." {

View File

@@ -4,9 +4,9 @@ import (
"fmt"
"strings"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/kubescape/cautils/k8sinterface"
"github.com/spf13/cobra"
)
@@ -30,7 +30,7 @@ var setCmd = &cobra.Command{
data := keyValue[1]
k8s := k8sinterface.NewKubernetesApi()
clusterConfig := cautils.NewClusterConfig(k8s, getter.NewArmoAPI())
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector())
if err := clusterConfig.SetKeyValueInConfigmap(key, data); err != nil {
return err
}

View File

@@ -10,7 +10,6 @@ var configCmd = &cobra.Command{
Short: "Set configuration",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
},
}

View File

@@ -11,7 +11,7 @@ import (
var downloadInfo cautils.DownloadInfo
var downloadCmd = &cobra.Command{
Use: "download framework <framework-name>",
Use: fmt.Sprintf("download framework <framework-name> [flags]\nSupported frameworks: %s", validFrameworks),
Short: "Download framework controls",
Long: ``,
Args: func(cmd *cobra.Command, args []string) error {

View File

@@ -1,29 +1,28 @@
package cmd
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/armotypes"
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/kubescape/cautils/k8sinterface"
"github.com/armosec/kubescape/cautils/opapolicy"
"github.com/armosec/kubescape/opaprocessor"
"github.com/armosec/kubescape/policyhandler"
"github.com/armosec/kubescape/resultshandling"
"github.com/armosec/kubescape/resultshandling/printer"
"github.com/armosec/kubescape/resultshandling/reporter"
"github.com/armosec/opa-utils/reporthandling"
"github.com/spf13/cobra"
)
var scanInfo cautils.ScanInfo
var supportedFrameworks = []string{"nsa"}
var supportedFrameworks = []string{"nsa", "mitre"}
var validFrameworks = strings.Join(supportedFrameworks, ", ")
type CLIHandler struct {
policyHandler *policyhandler.PolicyHandler
@@ -31,7 +30,8 @@ type CLIHandler struct {
}
var frameworkCmd = &cobra.Command{
Use: "framework <framework name> [`<glob patter>`/`-`] [flags]",
Use: fmt.Sprintf("framework <framework name> [`<glob pattern>`/`-`] [flags]\nSupported frameworks: %s", validFrameworks),
Short: fmt.Sprintf("The framework you wish to use. Supported frameworks: %s", strings.Join(supportedFrameworks, ", ")),
Long: "Execute a scan on a running Kubernetes cluster or `yaml`/`json` files (use glob) or `-` for stdin",
ValidArgs: supportedFrameworks,
@@ -39,24 +39,24 @@ var frameworkCmd = &cobra.Command{
if len(args) < 1 && !(cmd.Flags().Lookup("use-from").Changed) {
return fmt.Errorf("requires at least one argument")
} else if len(args) > 0 {
if !isValidFramework(args[0]) {
if !isValidFramework(strings.ToLower(args[0])) {
return fmt.Errorf(fmt.Sprintf("supported frameworks: %s", strings.Join(supportedFrameworks, ", ")))
}
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
scanInfo.PolicyIdentifier = opapolicy.PolicyIdentifier{}
scanInfo.PolicyIdentifier.Kind = opapolicy.KindFramework
scanInfo.PolicyIdentifier = reporthandling.PolicyIdentifier{}
scanInfo.PolicyIdentifier.Kind = reporthandling.KindFramework
if !(cmd.Flags().Lookup("use-from").Changed) {
scanInfo.PolicyIdentifier.Name = args[0]
scanInfo.PolicyIdentifier.Name = strings.ToLower(args[0])
}
if len(args) > 0 {
if len(args[1:]) == 0 || args[1] != "-" {
scanInfo.InputPatterns = args[1:]
} else { // store stout to file
tempFile, err := ioutil.TempFile(".", "tmp-kubescape*.yaml")
tempFile, err := os.CreateTemp(".", "tmp-kubescape*.yaml")
if err != nil {
return err
}
@@ -86,30 +86,32 @@ func isValidFramework(framework string) bool {
func init() {
scanCmd.AddCommand(frameworkCmd)
scanInfo = cautils.ScanInfo{}
frameworkCmd.Flags().StringVar(&scanInfo.UseFrom, "use-from", "", "Path to load framework from")
frameworkCmd.Flags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load framework from default path")
frameworkCmd.Flags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to file containing list of exceptions")
frameworkCmd.Flags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from check")
frameworkCmd.Flags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. supported formats: "pretty-printer"/"json"/"junit"`)
frameworkCmd.Flags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. print output to file and not stdout")
frameworkCmd.Flags().StringVar(&scanInfo.UseFrom, "use-from", "", "Load local framework object from specified path. If not used will download latest")
frameworkCmd.Flags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local framework object from default path. If not used will download latest")
frameworkCmd.Flags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from Armo management portal")
frameworkCmd.Flags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system, kube-public")
frameworkCmd.Flags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer"/"json"/"junit"`)
frameworkCmd.Flags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
frameworkCmd.Flags().BoolVarP(&scanInfo.Silent, "silent", "s", false, "Silent progress messages")
frameworkCmd.Flags().Uint16VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 0, "Failure threshold is the percent bellow which the command fails and returns exit code -1")
frameworkCmd.Flags().BoolVarP(&scanInfo.DoNotSendResults, "results-locally", "", false, "Kubescape sends scan results to Armosec backend to allow users to control exceptions and maintain chronological scan results. Use this flag if you do not wish to use these features")
frameworkCmd.Flags().Uint16VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 0, "Failure threshold is the percent bellow which the command fails and returns exit code 1")
frameworkCmd.Flags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
frameworkCmd.Flags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to Armo backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
frameworkCmd.Flags().StringVarP(&scanInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
}
func CliSetup() error {
flag.Parse()
if 100 < scanInfo.FailThreshold {
fmt.Println("bad argument: out of range threshold")
os.Exit(1)
}
flagValidation()
var k8s *k8sinterface.KubernetesApi
var clusterConfig cautils.IClusterConfig
if !scanInfo.ScanRunningCluster() {
k8sinterface.ConnectedToCluster = false
clusterConfig = cautils.NewEmptyConfig()
} else {
k8s = k8sinterface.NewKubernetesApi()
// setup cluster config
clusterConfig = cautils.ClusterConfigSetup(&scanInfo, k8s, getter.GetArmoAPIConnector())
}
processNotification := make(chan *cautils.OPASessionObj)
@@ -118,17 +120,10 @@ func CliSetup() error {
// policy handler setup
policyHandler := policyhandler.NewPolicyHandler(&processNotification, k8s)
// load cluster config
var clusterConfig cautils.IClusterConfig
if !scanInfo.DoNotSendResults && k8sinterface.ConnectedToCluster {
clusterConfig = cautils.NewClusterConfig(k8s, getter.NewArmoAPI())
} else {
clusterConfig = cautils.NewEmptyConfig()
}
if err := clusterConfig.SetCustomerGUID(); err != nil {
if err := clusterConfig.SetCustomerGUID(scanInfo.Account); err != nil {
fmt.Println(err)
}
cautils.CustomerGUID = clusterConfig.GetCustomerGUID()
cautils.ClusterName = k8sinterface.GetClusterName()
@@ -170,15 +165,15 @@ func NewCLIHandler(policyHandler *policyhandler.PolicyHandler) *CLIHandler {
func (clihandler *CLIHandler) Scan() error {
cautils.ScanStartDisplay()
policyNotification := &opapolicy.PolicyNotification{
NotificationType: opapolicy.TypeExecPostureScan,
Rules: []opapolicy.PolicyIdentifier{
policyNotification := &reporthandling.PolicyNotification{
NotificationType: reporthandling.TypeExecPostureScan,
Rules: []reporthandling.PolicyIdentifier{
clihandler.scanInfo.PolicyIdentifier,
},
Designators: armotypes.PortalDesignator{},
}
switch policyNotification.NotificationType {
case opapolicy.TypeExecPostureScan:
case reporthandling.TypeExecPostureScan:
//
if err := clihandler.policyHandler.HandleNotificationRequest(policyNotification, clihandler.scanInfo); err != nil {
return err
@@ -188,3 +183,15 @@ func (clihandler *CLIHandler) Scan() error {
}
return nil
}
func flagValidation() {
if scanInfo.Submit && scanInfo.Local {
fmt.Println("You can use `keep-local` or `submit`, but not both")
os.Exit(1)
}
if 100 < scanInfo.FailThreshold {
fmt.Println("bad argument: out of range threshold")
os.Exit(1)
}
}

View File

@@ -5,7 +5,6 @@ import (
"strings"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/getter"
"github.com/spf13/cobra"
)
@@ -31,7 +30,7 @@ var localGetCmd = &cobra.Command{
val, err := cautils.GetValueFromConfigJson(key)
if err != nil {
if err.Error() == "value does not exist." {
fmt.Printf("Could net get value from: %s, reason: %s\n", getter.GetDefaultPath(cautils.ConfigFileName+".json"), err)
fmt.Printf("Could net get value from: %s, reason: %s\n", cautils.ConfigFileFullPath(), err)
return nil
}
return err

View File

@@ -1,15 +1,29 @@
package cmd
import (
"flag"
"os"
"strings"
"github.com/armosec/kubescape/cautils/getter"
"github.com/golang/glog"
"github.com/spf13/cobra"
)
var cfgFile string
var armoBEURLs = ""
const envFlagUsage = "Send report results to specific URL. Format:<ReportReceiver>,<Backend>,<Frontend>.\n\t\tExample:report.armo.cloud,api.armo.cloud,portal.armo.cloud"
var rootCmd = &cobra.Command{
Use: "kubescape",
Short: "Kubescape is a tool for testing Kubernetes security posture",
Long: `Kubescape is a tool for testing Kubernetes security posture based on NSA specifications.`,
Long: `Kubescape is a tool for testing Kubernetes security posture based on NSA \ MITRE ATT&CK® specifications.`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
flag.Parse()
InitArmoBEConnector()
return nil
},
}
func Execute() {
@@ -17,9 +31,38 @@ func Execute() {
}
func init() {
flag.CommandLine.StringVar(&armoBEURLs, "environment", "", envFlagUsage)
rootCmd.PersistentFlags().StringVar(&armoBEURLs, "environment", "", envFlagUsage)
rootCmd.PersistentFlags().MarkHidden("environment")
cobra.OnInitialize(initConfig)
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
}
func InitArmoBEConnector() {
urlSlices := strings.Split(armoBEURLs, ",")
if len(urlSlices) > 3 {
glog.Errorf("Too many URLs")
os.Exit(1)
}
switch len(urlSlices) {
case 1:
switch urlSlices[0] {
case "dev":
getter.SetARMOAPIConnector(getter.NewARMOAPIDev())
case "":
getter.SetARMOAPIConnector(getter.NewARMOAPIProd())
default:
glog.Errorf("--environment flag usage: %s", envFlagUsage)
os.Exit(1)
}
case 2:
glog.Errorf("--environment flag usage: %s", envFlagUsage)
os.Exit(1)
case 3:
getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(urlSlices[0], urlSlices[1], urlSlices[2]))
}
}

View File

@@ -3,7 +3,7 @@ package cmd
import (
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"github.com/spf13/cobra"
@@ -32,7 +32,7 @@ func GetLatestVersion() (string, error) {
return "", fmt.Errorf("failed to download file, status code: %s", resp.Status)
}
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("failed to read response body from '%s', reason: %s", latestVersion, err.Error())
}

BIN
docs/discord-banner.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

BIN
docs/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

24
docs/index.html Normal file
View File

@@ -0,0 +1,24 @@
<html>
<head>
<title>
Kubscape Website
</title>
<link rel="icon" type="image/x-icon" href="favicon.ico">
</head>
<style>
img {
display: block;
margin-left: auto;
margin-right: auto;
}
</style>
<body style="text-align: center;">
<img src="kubescape.png" alt="Kubescap logo" style="width:20%">
<iframe src="https://discordapp.com/widget?id=893048809884643379&theme=dark" width="350" height="500"
allowtransparency="true" frameborder="0"
sandbox="allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts"></iframe>
</body>
</html>

89
docs/run-options.md Normal file
View File

@@ -0,0 +1,89 @@
<img src="kubescape.png" width="300" alt="logo" align="center">
# More detailed look on command line arguments and options
## Simple run:
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
```
## Flags
| flag | default | description | options |
| --- | --- | --- | --- |
| `-e`/`--exclude-namespaces` | Scan all namespaces | Namespaces to exclude from scanning. Recommended to exclude `kube-system` and `kube-public` namespaces |
| `-s`/`--silent` | Display progress messages | Silent progress messages |
| `-t`/`--fail-threshold` | `0` (do not fail) | fail command (return exit code 1) if result bellow threshold| `0` -> `100` |
| `-f`/`--format` | `pretty-printer` | Output format | `pretty-printer`/`json`/`junit` |
| `-o`/`--output` | print to stdout | Save scan result in file |
| `--use-from` | | Load local framework object from specified path. If not used will download latest |
| `--use-default` | `false` | Load local framework object from default path. If not used will download latest | `true`/`false` |
| `--exceptions` | | Path to an [exceptions obj](examples/exceptions.json). If not set will download exceptions from Armo management portal |
| `--results-locally` | `false` | Kubescape sends scan results to Armo management portal to allow users to control exceptions and maintain chronological scan results. Use this flag if you do not wish to use these features | `true`/`false`|
## Usage & Examples
### Examples
* Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/News-Features/Feature-Stories/Article-View/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
```
* Scan local `yaml`/`json` files before deploying
```
kubescape scan framework nsa *.yaml
```
* Scan `yaml`/`json` files from url
```
kubescape scan framework nsa https://raw.githubusercontent.com/GoogleCloudPlatform/microservices-demo/master/release/kubernetes-manifests.yaml
```
* Output in `json` format
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format json --output results.json
```
* Output in `junit xml` format
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format junit --output results.xml
```
* Scan with exceptions, objects with exceptions will be presented as `warning` and not `fail` <img src="docs/new-feature.svg">
```
kubescape scan framework nsa --exceptions examples/exceptions.json
```
### Helm Support
* Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
```
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan framework nsa -
```
for example:
```
helm template bitnami/mysql --generate-name --dry-run | kubescape scan framework nsa -
```
### Offline Support <img src="docs/new-feature.svg">
It is possible to run Kubescape offline!
First download the framework and then scan with `--use-from` flag
* Download and save in file, if file name not specified, will store save to `~/.kubescape/<framework name>.json`
```
kubescape download framework nsa --output nsa.json
```
* Scan using the downloaded framework
```
kubescape scan framework nsa --use-from nsa.json
```
Kubescape is an open source project, we welcome your feedback and ideas for improvement. Were also aiming to collaborate with the Kubernetes community to help make the tests themselves more robust and complete as Kubernetes develops.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

After

Width:  |  Height:  |  Size: 62 KiB

79
go.mod
View File

@@ -1,30 +1,87 @@
module github.com/armosec/kubescape
go 1.16
go 1.17
require (
github.com/aws/aws-sdk-go v1.40.30
github.com/briandowns/spinner v1.16.0
github.com/coreos/go-oidc v2.2.1+incompatible
github.com/docker/docker v20.10.8+incompatible
github.com/coreos/go-oidc v2.2.1+incompatible // indirect
github.com/docker/docker v20.10.9+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/enescakir/emoji v1.0.0
github.com/fatih/color v1.12.0
github.com/francoispqt/gojay v1.2.13
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/gofrs/uuid v4.0.0+incompatible
github.com/golang/glog v1.0.0
github.com/mattn/go-isatty v0.0.13
github.com/olekukonko/tablewriter v0.0.5
github.com/open-policy-agent/opa v0.31.0
github.com/open-policy-agent/opa v0.33.1
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/satori/go.uuid v1.2.0
github.com/spf13/cobra v1.2.1
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 // indirect
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.22.1
k8s.io/apimachinery v0.22.1
k8s.io/client-go v0.22.1
sigs.k8s.io/controller-runtime v0.9.6
k8s.io/api v0.22.2
k8s.io/apimachinery v0.22.2
k8s.io/client-go v0.22.2
sigs.k8s.io/controller-runtime v0.10.2 // indirect
)
require (
github.com/armosec/armoapi-go v0.0.7
github.com/armosec/k8s-interface v0.0.5
github.com/armosec/opa-utils v0.0.7
github.com/armosec/utils-go v0.0.3
)
require (
cloud.google.com/go v0.81.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/OneOfOne/xxhash v1.2.8 // indirect
github.com/armosec/utils-k8s-go v0.0.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-logr/logr v0.4.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.5 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/json-iterator/go v1.1.11 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pquerna/cachecontrol v0.1.0 // indirect
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b // indirect
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 // indirect
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf // indirect
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/klog/v2 v2.9.0 // indirect
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)

1135
go.sum Normal file

File diff suppressed because it is too large Load Diff

26
install.ps1 Normal file
View File

@@ -0,0 +1,26 @@
Write-Host "Installing Kubescape..." -ForegroundColor Cyan
$BASE_DIR=$env:USERPROFILE + "\.kubescape"
$packageName = "/kubescape-windows-latest"
# Get latest release url
$config = Invoke-WebRequest "https://api.github.com/repos/armosec/kubescape/releases/latest" | ConvertFrom-Json
$url = $config.html_url.Replace("/tag/","/download/")
$fullUrl = $url + $packageName
# Create a new directory if needed
New-Item -Path $BASE_DIR -ItemType "directory" -ErrorAction SilentlyContinue
# Download the binary
Invoke-WebRequest -Uri $fullUrl -OutFile $BASE_DIR\kubescape.exe
# Update user PATH if needed
$currentPath = [Environment]::GetEnvironmentVariable("Path", "User")
if (-not $currentPath.Contains($BASE_DIR)) {
$confirmation = Read-Host "Add kubescape to user path? (y/n)"
if ($confirmation -eq 'y') {
[Environment]::SetEnvironmentVariable("Path", [Environment]::GetEnvironmentVariable("Path", "User") + ";$BASE_DIR;", "User")
}
}
Write-Host "Finished Installation" -ForegroundColor Green

View File

@@ -9,31 +9,41 @@ KUBESCAPE_EXEC=kubescape
osName=$(uname -s)
if [[ $osName == *"MINGW"* ]]; then
osName=windows-latest
osName=windows
elif [[ $osName == *"Darwin"* ]]; then
osName=macos-latest
osName=macos
else
osName=ubuntu-latest
osName=ubuntu
fi
GITHUB_OWNER=armosec
DOWNLOAD_URL=$(curl --silent "https://api.github.com/repos/$GITHUB_OWNER/kubescape/releases/latest" | grep -o "browser_download_url.*${osName}.*")
DOWNLOAD_URL=${DOWNLOAD_URL//\"}
DOWNLOAD_URL=${DOWNLOAD_URL/browser_download_url: /}
mkdir -p $BASE_DIR
OUTPUT=$BASE_DIR/$KUBESCAPE_EXEC
DOWNLOAD_URL="https://github.com/armosec/kubescape/releases/latest/download/kubescape-${osName}-latest"
curl --progress-bar -L $DOWNLOAD_URL -o $OUTPUT
# Ping download counter
curl --silent https://us-central1-elated-pottery-310110.cloudfunctions.net/kubescape-download-counter -o /dev/null
chmod +x $OUTPUT 2>/dev/null || sudo chmod +x $OUTPUT
sudo rm -f /usr/local/bin/$KUBESCAPE_EXEC 2>/dev/null || sudo rm -f /usr/local/bin/$KUBESCAPE_EXEC
sudo cp $OUTPUT /usr/local/bin 2>/dev/null || sudo cp $OUTPUT /usr/local/bin
# Checking if SUDO needed/exists
SUDO=
if [ "$(id -u)" -ne 0 ] && [ -n "$(which sudo)" ]; then
SUDO=sudo
fi
# Find install dir
install_dir=/usr/local/bin #default
for pdir in ${PATH//:/ }; do
edir="${pdir/#\~/$HOME}"
if [[ $edir == $HOME/* ]]; then
install_dir=$edir
mkdir -p $install_dir 2>/dev/null || true
SUDO=
break
fi
done
chmod +x $OUTPUT 2>/dev/null
$SUDO rm -f /usr/local/bin/$KUBESCAPE_EXEC 2>/dev/null || true # clearning up old install
$SUDO cp $OUTPUT $install_dir/$KUBESCAPE_EXEC
rm -rf $OUTPUT
echo

View File

@@ -6,13 +6,13 @@ import (
"time"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/scapepkg/exceptions"
"github.com/armosec/kubescape/scapepkg/score"
"github.com/armosec/opa-utils/exceptions"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/score"
"github.com/armosec/kubescape/cautils/k8sinterface"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils/opapolicy"
"github.com/armosec/kubescape/cautils/opapolicy/resources"
"github.com/armosec/opa-utils/resources"
"github.com/golang/glog"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/rego"
@@ -42,7 +42,7 @@ func NewOPAProcessor(sessionObj *cautils.OPASessionObj) *OPAProcessor {
func NewOPAProcessorHandler(processedPolicy, reportResults *chan *cautils.OPASessionObj) *OPAProcessorHandler {
regoDependenciesData := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig())
regoDependenciesData := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), cautils.ClusterName)
store, err := regoDependenciesData.TOStorage()
if err != nil {
panic(err)
@@ -81,7 +81,7 @@ func (opap *OPAProcessor) Process() error {
// glog.Infof(fmt.Sprintf("Starting 'Process'. reportID: %s", opap.PostureReport.ReportID))
cautils.ProgressTextDisplay(fmt.Sprintf("Scanning cluster %s", cautils.ClusterName))
cautils.StartSpinner()
frameworkReports := []opapolicy.FrameworkReport{}
frameworkReports := []reporthandling.FrameworkReport{}
var errs error
for i := range opap.Frameworks {
frameworkReport, err := opap.processFramework(&opap.Frameworks[i])
@@ -100,36 +100,39 @@ func (opap *OPAProcessor) Process() error {
return errs
}
func (opap *OPAProcessor) processFramework(framework *opapolicy.Framework) (*opapolicy.FrameworkReport, error) {
func (opap *OPAProcessor) processFramework(framework *reporthandling.Framework) (*reporthandling.FrameworkReport, error) {
var errs error
frameworkReport := opapolicy.FrameworkReport{}
frameworkReport := reporthandling.FrameworkReport{}
frameworkReport.Name = framework.Name
controlReports := []opapolicy.ControlReport{}
controlReports := []reporthandling.ControlReport{}
for i := range framework.Controls {
controlReport, err := opap.processControl(&framework.Controls[i])
if err != nil {
errs = fmt.Errorf("%v\n%s", errs, err.Error())
}
controlReports = append(controlReports, *controlReport)
if controlReport != nil {
controlReports = append(controlReports, *controlReport)
}
}
frameworkReport.ControlReports = controlReports
return &frameworkReport, errs
}
func (opap *OPAProcessor) processControl(control *opapolicy.Control) (*opapolicy.ControlReport, error) {
func (opap *OPAProcessor) processControl(control *reporthandling.Control) (*reporthandling.ControlReport, error) {
var errs error
controlReport := opapolicy.ControlReport{}
controlReport := reporthandling.ControlReport{}
controlReport.PortalBase = control.PortalBase
controlReport.ControlID = control.ControlID
controlReport.Control_ID = control.Control_ID // TODO: delete when 'id' is deprecated
controlReport.Name = control.Name
controlReport.Description = control.Description
controlReport.Remediation = control.Remediation
ruleReports := []opapolicy.RuleReport{}
ruleReports := []reporthandling.RuleReport{}
for i := range control.Rules {
ruleReport, err := opap.processRule(&control.Rules[i])
if err != nil {
@@ -139,11 +142,14 @@ func (opap *OPAProcessor) processControl(control *opapolicy.Control) (*opapolicy
ruleReports = append(ruleReports, *ruleReport)
}
}
if len(ruleReports) == 0 {
return nil, nil
}
controlReport.RuleReports = ruleReports
return &controlReport, errs
}
func (opap *OPAProcessor) processRule(rule *opapolicy.PolicyRule) (*opapolicy.RuleReport, error) {
func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (*reporthandling.RuleReport, error) {
if ruleWithArmoOpaDependency(rule.Attributes) {
return nil, nil
}
@@ -160,17 +166,17 @@ func (opap *OPAProcessor) processRule(rule *opapolicy.PolicyRule) (*opapolicy.Ru
return &ruleReport, err
}
func (opap *OPAProcessor) runOPAOnSingleRule(rule *opapolicy.PolicyRule, k8sObjects []map[string]interface{}) (opapolicy.RuleReport, error) {
func (opap *OPAProcessor) runOPAOnSingleRule(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}) (reporthandling.RuleReport, error) {
switch rule.RuleLanguage {
case opapolicy.RegoLanguage, opapolicy.RegoLanguage2:
case reporthandling.RegoLanguage, reporthandling.RegoLanguage2:
return opap.runRegoOnK8s(rule, k8sObjects)
default:
return opapolicy.RuleReport{}, fmt.Errorf("rule: '%s', language '%v' not supported", rule.Name, rule.RuleLanguage)
return reporthandling.RuleReport{}, fmt.Errorf("rule: '%s', language '%v' not supported", rule.Name, rule.RuleLanguage)
}
}
func (opap *OPAProcessor) runRegoOnK8s(rule *opapolicy.PolicyRule, k8sObjects []map[string]interface{}) (opapolicy.RuleReport, error) {
func (opap *OPAProcessor) runRegoOnK8s(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}) (reporthandling.RuleReport, error) {
var errs error
ruleReport := opapolicy.RuleReport{
ruleReport := reporthandling.RuleReport{
Name: rule.Name,
}
@@ -197,7 +203,7 @@ func (opap *OPAProcessor) runRegoOnK8s(rule *opapolicy.PolicyRule, k8sObjects []
return ruleReport, errs
}
func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRego *ast.Compiler) ([]opapolicy.RuleResponse, error) {
func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRego *ast.Compiler) ([]reporthandling.RuleResponse, error) {
rego := rego.New(
rego.Query("data.armo_builtins"), // get package name from rule
rego.Compiler(compiledRego),
@@ -210,7 +216,7 @@ func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRe
if err != nil {
return nil, fmt.Errorf("in 'regoEval', failed to evaluate rule, reason: %s", err.Error())
}
results, err := parseRegoResult(&resultSet)
results, err := reporthandling.ParseRegoResult(&resultSet)
// results, err := ParseRegoResult(&resultSet)
if err != nil {
@@ -232,22 +238,26 @@ func (opap *OPAProcessor) updateScore() {
}
func (opap *OPAProcessor) updateResults() {
for f, frameworkReport := range opap.PostureReport.FrameworkReports {
sumFailed := 0
sumTotal := 0
for c, controlReport := range opap.PostureReport.FrameworkReports[f].ControlReports {
for f := range opap.PostureReport.FrameworkReports {
// set exceptions
exceptions.SetFrameworkExceptions(&opap.PostureReport.FrameworkReports[f], opap.Exceptions, cautils.ClusterName)
// set counters
reporthandling.SetUniqueResourcesCounter(&opap.PostureReport.FrameworkReports[f])
// set default score
reporthandling.SetDefaultScore(&opap.PostureReport.FrameworkReports[f])
// edit results - remove data
// TODO - move function to pkg - use RemoveData
for c := range opap.PostureReport.FrameworkReports[f].ControlReports {
for r, ruleReport := range opap.PostureReport.FrameworkReports[f].ControlReports[c].RuleReports {
// editing the responses -> removing duplications, clearing secret data, etc.
opap.PostureReport.FrameworkReports[f].ControlReports[c].RuleReports[r].RuleResponses = editRuleResponses(ruleReport.RuleResponses)
// adding exceptions to the rules
ruleExceptions := exceptions.ListRuleExceptions(opap.Exceptions, frameworkReport.Name, controlReport.Name, ruleReport.Name)
exceptions.AddExceptionsToRuleResponses(opap.PostureReport.FrameworkReports[f].ControlReports[c].RuleReports[r].RuleResponses, ruleExceptions)
}
sumFailed += controlReport.GetNumberOfFailedResources()
sumTotal += controlReport.GetNumberOfResources()
opap.PostureReport.FrameworkReports[f].ControlReports[c].Score = float32(percentage(controlReport.GetNumberOfResources(), controlReport.GetNumberOfFailedResources()))
}
opap.PostureReport.FrameworkReports[f].Score = float32(percentage(sumTotal, sumTotal-sumFailed))
}
}

View File

@@ -4,11 +4,10 @@ import (
"testing"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/kubescape/cautils/k8sinterface"
"github.com/armosec/k8s-interface/k8sinterface"
// _ "k8s.io/client-go/plugin/pkg/client/auth"
"github.com/armosec/kubescape/cautils/opapolicy"
)
func NewOPAProcessorMock() *OPAProcessor {
@@ -22,7 +21,7 @@ func TestProcess(t *testing.T) {
// set opaSessionObj
opaSessionObj := cautils.NewOPASessionObjMock()
opaSessionObj.Frameworks = []opapolicy.Framework{*opapolicy.MockFrameworkA()}
opaSessionObj.Frameworks = []reporthandling.Framework{*reporthandling.MockFrameworkA()}
opaSessionObj.K8SResources = &k8sResources
opap := NewOPAProcessor(opaSessionObj)

View File

@@ -1,23 +1,20 @@
package opaprocessor
import (
"encoding/json"
"fmt"
pkgcautils "github.com/armosec/kubescape/cautils/cautils"
pkgcautils "github.com/armosec/utils-go/utils"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/k8sinterface"
"github.com/armosec/kubescape/cautils/opapolicy"
resources "github.com/armosec/kubescape/cautils/opapolicy/resources"
"github.com/open-policy-agent/opa/rego"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/opa-utils/reporthandling"
resources "github.com/armosec/opa-utils/resources"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func getKubernetesObjects(k8sResources *cautils.K8SResources, match []opapolicy.RuleMatchObjects) []map[string]interface{} {
func getKubernetesObjects(k8sResources *cautils.K8SResources, match []reporthandling.RuleMatchObjects) []map[string]interface{} {
k8sObjects := []map[string]interface{}{}
for m := range match {
for _, groups := range match[m].APIGroups {
@@ -54,56 +51,19 @@ func getRuleDependencies() (map[string]string, error) {
}
return modules, nil
}
func parseRegoResult(regoResult *rego.ResultSet) ([]opapolicy.RuleResponse, error) {
var errs error
ruleResponses := []opapolicy.RuleResponse{}
for _, result := range *regoResult {
for desicionIdx := range result.Expressions {
if resMap, ok := result.Expressions[desicionIdx].Value.(map[string]interface{}); ok {
for objName := range resMap {
jsonBytes, err := json.Marshal(resMap[objName])
if err != nil {
err = fmt.Errorf("in parseRegoResult, json.Marshal failed. name: %s, obj: %v, reason: %s", objName, resMap[objName], err)
glog.Error(err)
errs = fmt.Errorf("%s\n%s", errs, err)
continue
}
desObj := make([]opapolicy.RuleResponse, 0)
if err := json.Unmarshal(jsonBytes, &desObj); err != nil {
err = fmt.Errorf("in parseRegoResult, json.Unmarshal failed. name: %s, obj: %v, reason: %s", objName, resMap[objName], err)
glog.Error(err)
errs = fmt.Errorf("%s\n%s", errs, err)
continue
}
ruleResponses = append(ruleResponses, desObj...)
}
}
}
}
return ruleResponses, errs
}
//editRuleResponses editing the responses -> removing duplications, clearing secret data, etc.
func editRuleResponses(ruleResponses []opapolicy.RuleResponse) []opapolicy.RuleResponse {
uniqueRuleResponses := map[string]bool{}
func editRuleResponses(ruleResponses []reporthandling.RuleResponse) []reporthandling.RuleResponse {
lenRuleResponses := len(ruleResponses)
for i := 0; i < lenRuleResponses; i++ {
for j := range ruleResponses[i].AlertObject.K8SApiObjects {
w := k8sinterface.NewWorkloadObj(ruleResponses[i].AlertObject.K8SApiObjects[j])
w := workloadinterface.NewWorkloadObj(ruleResponses[i].AlertObject.K8SApiObjects[j])
if w == nil {
continue
}
resourceID := fmt.Sprintf("%s/%s/%s/%s", w.GetApiVersion(), w.GetNamespace(), w.GetKind(), w.GetName())
if found := uniqueRuleResponses[resourceID]; found {
// resource found -> remove from slice
ruleResponses = removeFromSlice(ruleResponses, i)
lenRuleResponses -= 1
break
} else {
cleanRuleResponses(w)
ruleResponses[i].AlertObject.K8SApiObjects[j] = w.GetWorkload()
uniqueRuleResponses[resourceID] = true
}
cleanRuleResponses(w)
ruleResponses[i].AlertObject.K8SApiObjects[j] = w.GetWorkload()
}
}
return ruleResponses
@@ -114,14 +74,6 @@ func cleanRuleResponses(workload k8sinterface.IWorkload) {
}
}
func removeFromSlice(ruleResponses []opapolicy.RuleResponse, i int) []opapolicy.RuleResponse {
if i != len(ruleResponses)-1 {
ruleResponses[i] = ruleResponses[len(ruleResponses)-1]
}
return ruleResponses[:len(ruleResponses)-1]
}
func ruleWithArmoOpaDependency(annotations map[string]interface{}) bool {
if annotations == nil {
return false
@@ -131,21 +83,3 @@ func ruleWithArmoOpaDependency(annotations map[string]interface{}) bool {
}
return false
}
func listMatchKinds(match []opapolicy.RuleMatchObjects) []string {
matchKinds := []string{}
for i := range match {
matchKinds = append(matchKinds, match[i].Resources...)
}
return matchKinds
}
func percentage(big, small int) int {
if big == 0 {
if small == 0 {
return 100
}
return 0
}
return int(float64(float64(big-small)/float64(big)) * 100)
}

Some files were not shown because too many files have changed in this diff Show More