Compare commits

..

280 Commits

Author SHA1 Message Date
dwertent
bd089d76af adding cluster flag - support submiting yaml file 2022-01-09 16:13:15 +02:00
dwertent
740497047d cli print support v2 2022-01-09 10:33:47 +02:00
dwertent
4c2a5e9a11 suooirt scan all 2022-01-06 16:21:41 +02:00
dwertent
a41d2a46ff cli support list 2022-01-06 15:28:01 +02:00
dwertent
4794cbfb36 update opa version 2022-01-06 14:31:46 +02:00
dwertent
4573d83831 fixed counters and skipped ctr 2022-01-06 13:05:51 +02:00
dwertent
670ff4a15d support download 2022-01-05 20:46:56 +02:00
dwertent
b616a37800 fixed test 2022-01-05 16:45:50 +02:00
dwertent
ce488a3645 update latest fixes 2022-01-05 16:45:02 +02:00
dwertent
fb47a9c742 support v2 printing 2022-01-05 16:38:37 +02:00
David Wertenteil
80ace81a12 Fixing typo in the ActionSendReport error message 2022-01-05 16:16:52 +02:00
yiscah
1efdae5197 begin download config + download exceptions 2022-01-05 15:56:38 +02:00
yiscah
a4c88edfca begin download config + download exceptions 2022-01-05 15:56:24 +02:00
YiscahLevySilas1
8f38c2f627 Merge branch 'armosec:dev' into dev 2022-01-05 15:10:49 +02:00
Jonas Kint
bbf68d4ce8 Fixing typo in the ActionSendReport error message 2022-01-05 13:49:26 +01:00
dwertent
e1eec47a22 fixed report 2022-01-04 18:28:48 +02:00
Rotem Refael
fc05075817 Merge pull request #294 from armosec/dev
Minor features and improvements
2022-01-04 15:29:55 +02:00
dwertent
5bb64b634a support loading ks config in env 2022-01-04 14:42:25 +02:00
dwertent
7bc2c2be13 fliter ot reources based on owners 2022-01-03 13:36:29 +02:00
yiscah
27e2c044da update rbac-utils version for SAID2WLIDmap obj 2022-01-03 09:59:34 +02:00
dwertent
1213e8d6ac convert reports 2022-01-02 21:46:09 +02:00
dwertent
3f58d68d2a mocks 2021-12-30 17:48:42 +02:00
Rotem Refael
803e62020e add devopsbest framework 2021-12-30 16:40:07 +02:00
dwertent
fde437312f report v1 to v2 2021-12-30 11:52:17 +02:00
Ben Hirschberg
18425c915b Merge pull request #291 from slashben/dev
adding container image vulnerability adaptor proposal
2021-12-30 10:44:57 +02:00
Benyamin Hirschberg
0de6892ddd adding container image vunerability adaptor proposal 2021-12-30 10:44:08 +02:00
David Wertenteil
dfb92ffec3 Remove RBAC deprecated objects 2021-12-29 17:49:52 +02:00
yiscah
85317f1ee1 Merge branch 'dev' of https://github.com/YiscahLevySilas1/kubescape into dev 2021-12-29 16:23:29 +02:00
yiscah
f22f60508f rbacTable and rbac struct deprecated 2021-12-29 16:23:14 +02:00
dwertent
716bdaaf38 support kind List 2021-12-29 12:06:48 +02:00
dwertent
1b0e2b87de Handle all resources failure 2021-12-28 10:47:12 +02:00
David Wertenteil
2c57b809d2 show warnings for host sensor and send kubelet cmd 2021-12-28 10:42:26 +02:00
David Wertenteil
d9c96db212 Merge branch 'dev' into master 2021-12-28 10:41:39 +02:00
Daniel-GrunbergerCA
5f7391a76b stdout to stderror 2021-12-28 09:20:05 +02:00
Daniel-GrunbergerCA
accd80eda8 rm cmdline map 2021-12-28 09:07:50 +02:00
Daniel-GrunbergerCA
e49499f085 use regoes from master 2021-12-27 08:45:50 +02:00
David Delarosa
521f8930d7 Merge branch 'dev' into dev 2021-12-26 14:43:06 +02:00
David Wertenteil
11b9a8eb6e fix ControlsInputsGetter init 2021-12-23 17:39:49 +02:00
yiscah
0d4350ae24 fix ControlsInputsGetter init 2021-12-23 17:31:23 +02:00
David Wertenteil
62a6a25aa1 support pulling config inputs from git 2021-12-23 16:48:29 +02:00
yiscah
14a74e7312 support pulling config inputs from git 2021-12-23 10:33:23 +02:00
Rotem Refael
3fad2f3430 Merge pull request #279 from armosec/dev
Cli improvements
2021-12-22 21:16:54 +02:00
David Delarosa
c35d1e8791 Use stderr
By using stderr fd we can separate the information logs from the
application output
2021-12-22 20:27:38 +02:00
David Wertenteil
0367255a2a cli improvement
* Support risk-score calculation
* Update spinner support
* Update url display
* Adding control url to each control
2021-12-22 16:56:11 +02:00
dwertent
f5f5552ecd support threshold 2021-12-22 16:48:18 +02:00
dwertent
046a22bd2b print to error stderr 2021-12-22 12:59:46 +02:00
Daniel-GrunbergerCA
ad94ac7595 rm json print 2021-12-22 08:29:35 +02:00
Daniel-GrunbergerCA
cfa3993b79 print json 2021-12-21 20:31:12 +02:00
Daniel-GrunbergerCA
972793b98a print json 2021-12-21 20:27:23 +02:00
Daniel-GrunbergerCA
35682bf5b8 pull regoes from dev 2021-12-21 19:02:16 +02:00
Daniel-GrunbergerCA
b023f592aa Merge remote-tracking branch 'upstream/dev' 2021-12-21 13:37:32 +02:00
Daniel-GrunbergerCA
a1c34646f1 waning for host sensor 2021-12-21 13:34:31 +02:00
David Wertenteil
9ac3768f1d Merge pull request #277 from dwertent/master
Fixed host sensor ignore ns
2021-12-21 13:23:20 +02:00
dwertent
ff7881130f fixed host sensor issue 2021-12-21 13:09:31 +02:00
dwertent
37effda7c5 update pkg 2021-12-21 11:04:18 +02:00
Daniel-GrunbergerCA
0cac7cb1a5 fix kubeletcmd for marshalling 2021-12-21 09:23:38 +02:00
David Wertenteil
8d41d11ca3 Merge pull request #275 from dwertent/master
CLI improvements
2021-12-20 18:26:55 +02:00
dwertent
0ef516d147 support printing sensor and cloud resources 2021-12-20 18:22:08 +02:00
dwertent
f57a30898c print skipped 2021-12-20 13:32:14 +02:00
dwertent
a10c67555d adding spinner when sending reporting 2021-12-20 11:14:31 +02:00
dwertent
14d0df3926 update generated url 2021-12-20 11:11:31 +02:00
dwertent
c085aeaa68 adding control url to results 2021-12-20 10:53:40 +02:00
David Wertenteil
8543afccca Update objects groping and kinds 2021-12-19 23:34:41 +02:00
dwertent
61b5603a3b support host sensor and cloud descirption 2021-12-19 23:21:05 +02:00
dwertent
e3efffb2ec Merge remote-tracking branch 'upstream/dev' 2021-12-19 15:07:48 +02:00
dwertent
fe9a342b42 update submit testing 2021-12-19 15:06:30 +02:00
Rotem Refael
c7668b4436 Merge pull request #272 from LiorAlafiArmo/dev
ARMO risk-score
2021-12-16 14:33:53 +02:00
Rotem Refael
ccdf6b227f Merge pull request #271 from LiorAlafiArmo/master
ARMO risk-score
2021-12-16 14:32:13 +02:00
Lior Alafi
0aea384f41 changed prettyprint to work with risk-score 2021-12-15 19:04:40 +02:00
Lior Alafi
467059cd26 adding score 2021-12-15 18:13:58 +02:00
David Wertenteil
f41af36ea9 Adding cloudSupport objects to interface 2021-12-14 20:10:45 +02:00
dwertent
e2f8902222 use objectsenvelopes pkg 2021-12-14 20:06:38 +02:00
David Wertenteil
52bfd4cadc Submit configmap and env vars keys 2021-12-14 18:13:42 +02:00
Daniel-GrunbergerCA
7cdc556292 go mod 2021-12-14 14:16:50 +02:00
Daniel-GrunbergerCA
039bda9eaf Merge remote-tracking branch 'upstream/dev' into send_keys 2021-12-14 14:05:13 +02:00
Daniel-GrunbergerCA
a6d73d6f8b ssend keys for configmaps and env vars 2021-12-14 14:05:07 +02:00
Ben Hirschberg
8e5af59153 Merge pull request #267 from armosec/dev
Hot fix - Download command
2021-12-14 12:16:24 +02:00
dwertent
278467518e call SetRegoObjects when downloading 2021-12-14 12:07:07 +02:00
dwertent
a7080a5778 remove data from resource after saving in list 2021-12-14 11:33:33 +02:00
Daniel-GrunbergerCA
6a71ef6745 Merge remote-tracking branch 'upstream/dev' 2021-12-13 15:44:44 +02:00
Rotem Refael
10eb576260 Merge pull request #264 from armosec/dev
Hot fix - all resource list missing some of the failed resources
2021-12-13 14:35:26 +02:00
David Wertenteil
f14acb79bf Merge pull request #263 from dwertent/master
Hot fix - adding failed resources to the all-list of resources
2021-12-13 14:18:55 +02:00
dwertent
b8e011bd27 Merge remote-tracking branch 'upstream/dev' 2021-12-13 14:06:23 +02:00
dwertent
f6295308cd hot-fix eks failing resources 2021-12-13 14:06:07 +02:00
Rotem Refael
f981675850 Merge pull request #262 from armosec/dev
New features and bug fixing - new release
2021-12-12 18:06:25 +02:00
dwertent
93bb7610e6 update summary table 2021-12-12 17:43:34 +02:00
Rotem Refael
23975ee359 change dev-ui link 2021-12-12 13:50:06 +02:00
David Wertenteil
14eaedf375 revert prometheus output
* Revert prometheus output
* Revert sensor behavior
2021-12-12 11:37:50 +02:00
dwertent
ced0b741b9 Do not ask user about host sensor 2021-12-12 11:36:35 +02:00
dwertent
13e805b213 remove passed resources 2021-12-12 10:50:22 +02:00
dwertent
c424c1e394 revert prometheus output 2021-12-12 10:41:09 +02:00
David Wertenteil
77d68bdc73 version and submit improvements 2021-12-09 15:10:50 +02:00
dwertent
a1555bb9cd after merge with dev branch 2021-12-09 14:10:44 +02:00
dwertent
3ca61b218e execute the ResourceEnumerator 2021-12-09 13:41:19 +02:00
dwertent
e7917277e7 move rbac objects to cautils 2021-12-09 13:27:23 +02:00
dwertent
aa18be17fa remove rego from armo, support release fallback 2021-12-09 11:54:25 +02:00
Daniel-GrunbergerCA
39c7af5f8d Merge remote-tracking branch 'upstream/dev' 2021-12-08 13:36:51 +02:00
David Wertenteil
a5f7f8bbe4 Merge pull request #258 from Bezbran/dev
take nodes list from corev1 API.
2021-12-08 11:04:20 +02:00
Bezalel Brandwine
420e491963 add some more host sensor data 2021-12-08 08:58:07 +02:00
Bezbran
36f2ff997a Merge pull request #13 from armosec/dev
Dev
2021-12-08 08:44:04 +02:00
YiscahLevySilas1
c33807d052 Merge pull request #257 from YiscahLevySilas1/dev
add apiVersion to rbac obj
2021-12-07 20:13:00 +02:00
yiscah
fb3946b64f Merge branch 'dev' of https://github.com/YiscahLevySilas1/kubescape into dev 2021-12-07 19:50:31 +02:00
yiscah
51322e7270 add apiVersion to rbac objs 2021-12-07 19:50:14 +02:00
David Wertenteil
3f084d8525 Merge pull request #256 from dwertent/master
* Fixed url scanning 
* Support preRun rego
2021-12-07 17:15:06 +02:00
David Wertenteil
b1f4002036 Merge pull request #255 from AlexsJones/dev
spelling mistake on clihandler/cmd/control.go:19
2021-12-07 16:51:15 +02:00
dwertent
bb1cbe0902 fixed url scanning, support preRun rego 2021-12-07 16:50:43 +02:00
Alex Jones
a095634755 spelling mistake on clihandler/cmd/control.go:19 2021-12-07 13:03:44 +00:00
Daniel-GrunbergerCA
1b9ff074af run workflow 2021-12-07 14:51:34 +02:00
Daniel-GrunbergerCA
f8361446a4 integrate cloud provider description 2021-12-07 14:49:56 +02:00
Bezalel Brandwine
5713490f14 Merge branch 'dev' of github.com:Bezbran/kubescape into dev 2021-12-07 12:48:51 +02:00
Bezalel Brandwine
1ceac2a0a0 take node list from core v1 2021-12-07 12:48:45 +02:00
Daniel-GrunbergerCA
8a2967a0db Merge remote-tracking branch 'upstream/dev' 2021-12-07 10:49:23 +02:00
David Wertenteil
86297720d5 Merge pull request #254 from dwertent/master
store data only once
2021-12-07 10:33:15 +02:00
dwertent
1aeb2b96e2 store data only once 2021-12-07 10:30:03 +02:00
Bezbran
4ee8b9d7f6 Merge pull request #12 from armosec/dev
Dev
2021-12-06 10:18:33 +02:00
David Wertenteil
1d208ed5ec Merge pull request #252 from Bezbran/dev
Merge host-sensor capability
2021-12-05 17:32:57 +02:00
Bezalel Brandwine
3883aaabab fault tolerence for host sensor installing failures 2021-12-05 16:01:38 +02:00
Bezalel Brandwine
6fb3c070d0 dont print skipping node scanning 2021-12-05 15:42:07 +02:00
Bezalel Brandwine
d8d8b4ed73 add k8s resources map all in all to all resources report 2021-12-05 15:40:21 +02:00
David Wertenteil
907f46769f Merge pull request #251 from dwertent/master
return list of strings
2021-12-05 15:14:31 +02:00
dwertent
1ffdb717f7 return list of string 2021-12-05 15:11:44 +02:00
Bezalel Brandwine
9080603bce integrate host sensor into k8s IMetadata resource map 2021-12-05 15:08:05 +02:00
David Wertenteil
5796ae9084 supporting list of include-namespaces
Fixed issue #247
2021-12-05 13:54:23 +02:00
dwertent
50636e3a7e supporting list of include-namespaces 2021-12-05 13:52:06 +02:00
David Wertenteil
501d4c9dfc Update k8s-interface version 2021-12-05 13:12:46 +02:00
dwertent
84cbc4ae04 Update verswion 2021-12-05 12:42:07 +02:00
Bezalel Brandwine
cbb2a3e46f go get + build after merge from armosec 2021-12-05 12:28:28 +02:00
Bezbran
493197c073 Merge pull request #11 from armosec/dev
Dev
2021-12-05 12:24:29 +02:00
Bezbran
31a2952101 Merge branch 'dev' into dev 2021-12-05 12:24:10 +02:00
Bezalel Brandwine
acaccc23e8 merge conflicts 1 2021-12-05 12:15:55 +02:00
David Wertenteil
70e339164d Separate offline behavior from yaml input 2021-12-05 09:57:54 +02:00
dwertent
0de5d72d75 Merge remote-tracking branch 'upstream/dev' 2021-12-05 09:54:57 +02:00
dwertent
d604cc7faf update k8sinterface version 2021-12-05 09:50:56 +02:00
dwertent
d843a3e359 set tenant if config not found 2021-12-02 19:18:35 +02:00
dwertent
37586662b3 handle yaml files and armo api behavior 2021-12-02 19:13:05 +02:00
David Wertenteil
193687418f RBAC object sent using pagination mechanism 2021-12-02 10:13:04 +02:00
yiscah
72e6bb9537 send rbac objs in all resources 2021-12-01 21:30:04 +02:00
David Wertenteil
d69e790c61 Supporting verbose flag 2021-12-01 14:49:53 +02:00
dwertent
01d41520d4 initialize mock resourceMap when scanning yamls 2021-12-01 14:12:00 +02:00
dwertent
aea9eb9e01 use mapResource mock when testing 2021-12-01 12:35:09 +02:00
dwertent
26717b13e9 supporing verbose flag 2021-12-01 12:30:16 +02:00
dwertent
5f36417bd9 update ver 2021-11-30 17:06:20 +02:00
dwertent
021ea34814 update k8s package 2021-11-30 15:47:56 +02:00
David Wertenteil
4a08fbdf28 Adding pagination to report 2021-11-30 10:59:30 +02:00
dwertent
268753091d fixed test 2021-11-30 10:49:05 +02:00
dwertent
ec688829b5 support report pagination 2021-11-29 17:24:50 +02:00
Rotem Refael
ec5bf58b0f Merge pull request #242 from YiscahLevySilas1/dev
add comment for isRuleKubescapeVersionCompatible()
2021-11-29 13:06:05 +02:00
Bezalel Brandwine
f877d821f0 in the middle of refactoring 2021-11-28 16:47:44 +02:00
Bezalel Brandwine
6c22cfef1e in the middle of refactoring 2021-11-28 16:47:24 +02:00
Bezalel Brandwine
05305d858b host sensor flag + user input asking 2021-11-28 12:35:21 +02:00
yiscah
e094237bbf add comment for isRuleKubescapeVersionCompatible() 2021-11-28 10:23:47 +02:00
David Wertenteil
77eb52bc51 Working with IMetadata interface 2021-11-28 08:14:00 +02:00
dwertent
c79834cec7 working with IMetadata interface 2021-11-26 00:52:03 +02:00
Bezalel Brandwine
aefc5fded7 kubelet configuration is in kubescape 2021-11-25 17:54:26 +02:00
Bezalel Brandwine
5fd5a5d4fa [host-sensor] first integration in kubescape 2021-11-25 17:43:21 +02:00
David Wertenteil
0368ecf7f3 External object support
* Aggregate rego input
* Display `User` and `Group` in output
* Update dependencies
2021-11-25 14:33:28 +02:00
yiscah
d9ec5dcb56 rule version - kubescape version check 2021-11-25 12:30:02 +02:00
yiscah
030bc6c6b6 handle pretty print external objects 2021-11-25 12:29:05 +02:00
yiscah
c1dd2fe0f4 print warning in local build to work with latest version 2021-11-25 12:27:28 +02:00
Bezalel Brandwine
4e0851868e initial host sensor deployment stage 2021-11-24 15:23:50 +02:00
Bezbran
276178c27c Merge pull request #10 from armosec/dev
Dev
2021-11-23 10:53:41 +02:00
yiscah
3006e6bcbf add isKindToBeGrouped 2021-11-23 09:07:37 +02:00
yiscah
3a50c5686e print externalObjects with their relatedObjects 2021-11-22 20:34:29 +02:00
yiscah
f8eea4d082 use inputaggregator on k8sresources, don't use v0 rules 2021-11-22 11:06:29 +02:00
YiscahLevySilas1
8a42d77990 Merge branch 'armosec:dev' into dev 2021-11-22 11:05:07 +02:00
Rotem Refael
3980d1a9b0 Merge pull request #235 from armosec/dev
* Hot fixes
* Smoke tests
* Update documentation
2021-11-21 10:22:32 +02:00
Rotem Refael
53741ec26e Update README.md 2021-11-21 10:13:31 +02:00
David Wertenteil
c398cf46c9 Comment out policy version check 2021-11-21 09:23:21 +02:00
dwertent
e869ce4a64 comment out policy version check 2021-11-21 08:49:24 +02:00
YiscahLevySilas1
4064be6577 Merge branch 'armosec:dev' into dev 2021-11-18 16:47:58 +02:00
David Wertenteil
1f00cf4151 Fixed stdin support
* Fixed stdin
* Adding smoke tests
2021-11-16 17:30:06 +02:00
dwertent
bae0ca62b8 update smoke testing 2021-11-16 16:03:31 +02:00
dwertent
b7a51a2495 fixed stdin support 2021-11-16 15:57:23 +02:00
Rotem Refael
4f6a3e39d0 Update SAAS link 2021-11-15 21:59:30 +02:00
David Wertenteil
528f6b7402 Fix broken links
#231
2021-11-14 16:42:40 +02:00
David Wertenteil
c252f29e6d Adding basic exceptions documentation
#232 
#80
2021-11-14 14:46:31 +02:00
dwertent
fea84c9652 update opa-utils pkg version 2021-11-14 14:42:10 +02:00
dwertent
9b9940f708 adding exceptions docs 2021-11-14 14:31:53 +02:00
Thibault Le Reste
a34ab17307 fix Kubernetes Hardening Guidance broken links 2021-11-14 13:16:28 +01:00
yiscah
477a3e7263 update nsa url in readme 2021-11-14 08:59:34 +02:00
Rotem Refael
f94c9496df Merge pull request #223 from armosec/dev
Adding features and fixing bugs
2021-11-11 15:08:38 +02:00
lalafi@cyberarmor.io
1c31281b7b add baseScore to controlReport 2021-11-11 13:57:52 +02:00
dwertent
0e5204ecb4 support custom frameworks 2021-11-11 11:15:33 +02:00
David Wertenteil
f3dc6235d7 Merge pull request #225 from Daniel-GrunbergerCA/master
Supporting custom frameworks
2021-11-11 09:43:40 +02:00
Daniel-GrunbergerCA
37cdf1a19e erase repetitive frameworks 2021-11-10 18:57:05 +02:00
Daniel-GrunbergerCA
1fb642c777 scan with custom framework 2021-11-10 18:30:03 +02:00
dwertent
8f791ceb12 Improve readme 2021-11-10 09:38:34 +02:00
dwertent
f40eaa0f56 Merge branch 'dev' 2021-11-10 08:17:51 +02:00
dwertent
cb34d17ba1 fixed merge 2021-11-10 08:01:33 +02:00
dwertent
328ba82007 Merge branch 'master' of github.com:armosec/kubescape 2021-11-10 07:59:05 +02:00
David Wertenteil
010ed1b047 Merge pull request #222 from dwertent/master
Adding json to http headers
2021-11-10 07:55:38 +02:00
dwertent
5a81a77d92 adding json to http headers 2021-11-10 07:53:35 +02:00
David Wertenteil
c7ea10d206 Merge pull request #221 from dwertent/master
Checking latest version
2021-11-09 17:09:01 +02:00
dwertent
a37d00b40a checking latest version 2021-11-09 17:07:06 +02:00
David Wertenteil
0168b768d2 Merge pull request #214 from mboersma/fix-spelling-fail-threshold
Fix spelling in --fail-threshold description
2021-11-09 15:10:17 +02:00
David Wertenteil
9a85b57ba4 Merge pull request #201 from Joibel/fix/spelling
Minor spelling fixes
2021-11-09 15:10:02 +02:00
dwertent
eafece6497 update helm command in readme 2021-11-09 11:18:30 +02:00
dwertent
8f08271664 udpate cronjob configmap 2021-11-09 11:16:13 +02:00
David Wertenteil
da0271e624 Merge pull request #218 from yonahd/helm_chart
Helm chart for kubescape
2021-11-08 13:17:37 +02:00
Yonah Dissen
94f52fb4ac Documentation running using docker 2021-11-08 11:52:04 +02:00
David Wertenteil
524c2922a4 Merge pull request #219 from Daniel-GrunbergerCA/master
Support scanning multiple controls
2021-11-08 11:29:50 +02:00
Daniel-GrunbergerCA
0891d64654 comment to run workflow 2021-11-08 10:48:39 +02:00
Daniel-GrunbergerCA
d1c23f7442 scan multiple controls 2021-11-08 10:39:31 +02:00
Daniel-GrunbergerCA
8cbbe35f24 Merge remote-tracking branch 'upstream/dev' 2021-11-08 08:53:27 +02:00
Yonah Dissen
a21e9d706e small changes in helm chart 2021-11-07 21:23:57 +02:00
Yonah Dissen
57160c4d04 add helm chart to deploy kubescape in cluster 2021-11-07 21:17:45 +02:00
Yonah Dissen
8b46a49e23 add helm chart to deploy kubescape in cluster 2021-11-07 21:09:30 +02:00
David Wertenteil
c11ebb49f7 Merge pull request #217 from dwertent/master
Fixed include namespaces
2021-11-07 13:56:56 +02:00
dwertent
e4c3935a1b fixed include ns 2021-11-07 13:50:46 +02:00
David Wertenteil
ade062fdd3 Merge pull request #216 from dwertent/master
support armoBest framework name
2021-11-07 09:23:27 +02:00
dwertent
b0f6357482 support armoBest 2021-11-07 09:13:51 +02:00
Matt Boersma
38a9c11286 Fix spelling in --fail-threshold description 2021-11-05 10:33:06 -06:00
David Wertenteil
0d95f02e60 Merge pull request #213 from dwertent/master
support include namespaces
2021-11-04 12:09:27 +02:00
dwertent
1c30528eea support include ns 2021-11-04 12:06:34 +02:00
David Wertenteil
d1b116d314 Merge pull request #210 from dwertent/master
Sbumit support
2021-11-03 17:31:23 +02:00
dwertent
9d20fd41a8 fixed rbac submit 2021-11-03 17:28:37 +02:00
dwertent
54648bb973 update opa pkg 2021-11-03 15:28:42 +02:00
dwertent
fc4edb12f9 adding stdout to smoke tests 2021-11-02 18:59:34 +02:00
dwertent
9a1b8d7ce2 support submit 2021-11-02 16:14:09 +02:00
dwertent
6909975503 controls support yaml inputs 2021-11-02 10:14:39 +02:00
David Wertenteil
5d94bd990a Merge pull request #208 from dwertent/master
Adding smoke testing and support inputs for controls
2021-11-01 17:22:31 +02:00
dwertent
67c8719f34 adding smoke tests to PR 2021-11-01 14:04:20 +02:00
dwertent
d5b60c6ac8 update config api 2021-11-01 13:52:40 +02:00
dwertent
a99d2e9e26 remove scan 2021-11-01 12:41:54 +02:00
dwertent
5c7d89cb9e use command 2021-11-01 11:55:54 +02:00
dwertent
ae7810f0d3 support input from file 2021-11-01 11:44:07 +02:00
Yonah Dissen
5a90dc46f0 fix version for cli in docker image 2021-11-01 09:17:46 +02:00
Yonah Dissen
294f886588 fix version for cli in docker image 2021-10-31 17:50:55 +02:00
dwertent
17aec665cf updated tests 2021-10-31 15:31:23 +02:00
dwertent
959b25e8b7 adding smoke tests 2021-10-31 15:05:22 +02:00
dwertent
9fd2bf3480 Merge remote-tracking branch 'upstream/dev' 2021-10-31 14:38:36 +02:00
dwertent
7b061a4e51 update opa pkg 2021-10-31 14:38:13 +02:00
David Wertenteil
4fcd89390b Merge pull request #206 from Joibel/feature/prometheus
Add a prometheus output format
2021-10-31 10:57:49 +02:00
dwertent
667ffe9cd3 Merge remote-tracking branch 'prometheus/feature/prometheus' 2021-10-31 08:58:57 +02:00
dwertent
6f4086cd8c Merge branch 'master' of github.com:armosec/kubescape 2021-10-31 08:58:44 +02:00
dwertent
2a45a1a400 support controls input 2021-10-28 16:29:28 +03:00
David Wertenteil
eee201de1e Merge pull request #205 from dwertent/master
Adding cronJob support doc
2021-10-28 11:30:17 +03:00
dwertent
6be24bd22a change repeatedly to periodically 2021-10-28 10:21:25 +03:00
dwertent
ca927dec30 update naming convention 2021-10-28 10:05:30 +03:00
dwertent
3a78ef46a3 Merge remote-tracking branch 'upstream/dev' 2021-10-28 09:40:26 +03:00
David Wertenteil
bdb1cd0905 Merge pull request #199 from Daniel-GrunbergerCA/master
Scan with multiple frameworks/control support
2021-10-28 09:35:07 +03:00
dwertent
ffb556a637 update readme 2021-10-28 09:15:22 +03:00
dwertent
40acfb5e9d Adding cronJob doc 2021-10-28 09:10:37 +03:00
Daniel-GrunbergerCA
de8bcfa0d2 enhance help msgs 2021-10-27 14:44:25 +03:00
Daniel-GrunbergerCA
9439f407da add env var to not check latest release 2021-10-27 13:39:03 +03:00
Daniel-GrunbergerCA
5095e62961 support scanning multiple frameworks from multiple files 2021-10-27 13:02:45 +03:00
Daniel-GrunbergerCA
3301907864 print only one table for controls & enhance help msg 2021-10-27 10:25:26 +03:00
Daniel-GrunbergerCA
151175c40f read single control from framework file 2021-10-27 08:49:40 +03:00
Daniel-GrunbergerCA
234d4fa537 Merge remote-tracking branch 'upstream/dev' 2021-10-27 08:27:21 +03:00
Rotem Refael
f384e8a6e3 Merge pull request #203 from armosec/cluster-name-issue
adopt cluster name (HotFix)
2021-10-26 20:51:42 +03:00
dwertent
66068757e1 update cluster name in mock struct 2021-10-26 20:39:18 +03:00
dwertent
8a7cda5dd1 adopt cluster name 2021-10-26 20:27:33 +03:00
Alan Clucas
8e67104ba4 Add prometheus to readme 2021-10-26 16:23:05 +01:00
Alan Clucas
0c9da9ddc8 Add a prometheus metrics style output
Output per control results and also per object counts

This can lead to running this as a service that prometheus can collect from
2021-10-26 16:19:35 +01:00
Alan Clucas
a5ef6aa126 Minor spelling fixes 2021-10-26 15:19:05 +01:00
Rotem Refael
c133b7a2c2 Merge pull request #191 from armosec/dev
Hot fixes relates to submit & account options
2021-10-26 14:10:23 +03:00
Daniel-GrunbergerCA
a0ca68cc41 update json and junit for multiple frameworks 2021-10-26 13:55:12 +03:00
Daniel-GrunbergerCA
41cae0bc93 Merge remote-tracking branch 'upstream/dev' 2021-10-26 13:23:00 +03:00
David Wertenteil
b4198fde8c Merge pull request #198 from dwertent/master
update pkg tag
2021-10-26 12:28:02 +03:00
dwertent
bd24f35738 update tag 2021-10-26 12:26:44 +03:00
Daniel-GrunbergerCA
6fcbb757b5 Merge remote-tracking branch 'upstream/dev' 2021-10-25 17:41:15 +03:00
Daniel-GrunbergerCA
3b8825e5d2 scan multiple frameworks and controls 2021-10-25 17:41:04 +03:00
Rotem Refael
5cf3244918 Merge pull request #192 from dwertent/master
Update multiple score
2021-10-25 17:40:19 +03:00
dwertent
934c9ccc8b fixed lowest 2021-10-25 15:51:23 +03:00
dwertent
41dfdfd1e8 support more than score 2021-10-25 15:14:31 +03:00
David Wertenteil
427fb59c99 Merge pull request #190 from dwertent/master
Fixed submit and url
2021-10-25 12:08:23 +03:00
David Wertenteil
ae825800f6 Merge pull request #189 from Daniel-GrunbergerCA/master
Update tag for newest release of k8s-interface
2021-10-25 12:08:06 +03:00
dwertent
d72700acf6 update submit 2021-10-25 12:05:51 +03:00
dwertent
3310a6a26f Merge remote-tracking branch 'upstream/dev' 2021-10-25 11:55:30 +03:00
dwertent
740b5aa772 add full url 2021-10-25 11:55:08 +03:00
Daniel-GrunbergerCA
04b55e764a fix k8s-interface pkg tag 2021-10-25 10:44:43 +03:00
Daniel-GrunbergerCA
beb4062bb1 update tag 2021-10-25 09:29:43 +03:00
David Wertenteil
5d4cd4acdc Merge pull request #188 from dwertent/master
Use interfaces
2021-10-25 09:21:43 +03:00
dwertent
aec8198131 adding score to interface 2021-10-25 08:41:15 +03:00
dwertent
0a850e47df use interfaces 2021-10-24 17:51:03 +03:00
Rotem Refael
5544820c5e Merge pull request #187 from armosec/dev
Fixed junit counter
2021-10-21 16:23:09 +03:00
dwertent
4f466d517a fixed junit counter 2021-10-21 16:05:51 +03:00
Bezbran
cd0f20ca2f Merge pull request #9 from armosec/master
Dev
2021-10-21 14:38:17 +03:00
dwertent
5a71c3270a Merge branch 'master' of github.com:armosec/kubescape 2021-10-21 11:32:40 +03:00
dwertent
70a9a7bbbd Update resource count 2021-10-18 10:23:06 +03:00
141 changed files with 12565 additions and 1575 deletions

View File

@@ -46,6 +46,12 @@ jobs:
CGO_ENABLED: 0
run: python3 --version && python3 build.py
- name: Smoke Testing
env:
RELEASE: v1.0.${{ github.run_number }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
- name: Upload Release binaries
id: upload-release-asset
uses: actions/upload-release-asset@v1
@@ -71,8 +77,8 @@ jobs:
run: echo quay.io/armosec/kubescape:v1.0.${{ github.run_number }} > build_tag.txt
- name: Build the Docker image
run: docker build . --file build/Dockerfile --tag $(cat build_tag.txt)
run: docker build . --file build/Dockerfile --tag $(cat build_tag.txt) --build-arg run_number=${{ github.run_number }}
- name: Re-Tag Image to latest
run: docker tag $(cat build_tag.txt) quay.io/armosec/kubescape:latest

View File

@@ -30,6 +30,12 @@ jobs:
CGO_ENABLED: 0
run: python3 --version && python3 build.py
- name: Smoke Testing
env:
RELEASE: v1.0.${{ github.run_number }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
- name: Upload build artifacts
uses: actions/upload-artifact@v2
with:
@@ -50,7 +56,7 @@ jobs:
run: echo quay.io/armosec/kubescape:dev-v1.0.${{ github.run_number }} > build_tag.txt
- name: Build the Docker image
run: docker build . --file build/Dockerfile --tag $(cat build_tag.txt)
run: docker build . --file build/Dockerfile --tag $(cat build_tag.txt) --build-arg run_number=${{ github.run_number }}
- name: Login to Quay.io
env: # Or as an environment variable

View File

@@ -31,8 +31,9 @@ jobs:
CGO_ENABLED: 0
run: python3 --version && python3 build.py
- name: Upload build artifacts
uses: actions/upload-artifact@v2
with:
name: kubescape-${{ matrix.os }}
path: build/${{ matrix.os }}/kubescape
- name: Smoke Testing
env:
RELEASE: v1.0.${{ github.run_number }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape

1
.gitignore vendored
View File

@@ -2,4 +2,5 @@
*kubescape*
*debug*
*vender*
*.pyc*
.idea

View File

@@ -19,7 +19,8 @@ Please note we have a code of conduct, please follow it in all your interactions
build.
2. Update the README.md with details of changes to the interface, this includes new environment
variables, exposed ports, useful file locations and container parameters.
3. We will merge the Pull Request in once you have the sign-off.
3. Open Pull Request to `dev` branch - we test the component before merging into the `master` branch
4. We will merge the Pull Request in once you have the sign-off.
## Code of Conduct

141
README.md
View File

@@ -24,13 +24,22 @@ curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh |
## Run:
```
kubescape scan framework nsa
kubescape scan --submit
```
<img src="docs/summary.png">
</br>
> Kubescape is an open source project, we welcome your feedback and ideas for improvement. Were also aiming to collaborate with the Kubernetes community to help make the tests themselves more robust and complete as Kubernetes develops.
</br>
### Click [👍](https://github.com/armosec/kubescape/stargazers) if you want us to continue to develop and improve Kubescape 😀
</br>
# Being part of the team
We invite you to our team! We are excited about this project and want to return the love we get.
@@ -42,8 +51,16 @@ Want to contribute? Want to discuss something? Have an issue?
[<img src="docs/discord-banner.png" width="100" alt="logo" align="center">](https://armosec.github.io/kubescape/)
# Options and examples
## Tutorials
* [Overview](https://youtu.be/wdBkt_0Qhbg)
* [Scanning Kubernetes YAML files](https://youtu.be/Ox6DaR7_4ZI)
* [Scan Kubescape on an air-gapped environment (offline support)](https://youtu.be/IGXL9s37smM)
* [Managing exceptions in the Kubescape SaaS version](https://youtu.be/OzpvxGmCR80)
## Install on Windows
**Requires powershell v5.0+**
@@ -69,80 +86,106 @@ Set-ExecutionPolicy RemoteSigned -scope CurrentUser
## Flags
| flag | default | description | options |
| --- | --- | --- | --- |
| `-e`/`--exclude-namespaces` | Scan all namespaces | Namespaces to exclude from scanning. Recommended to exclude `kube-system` and `kube-public` namespaces |
| `-s`/`--silent` | Display progress messages | Silent progress messages |
| `-t`/`--fail-threshold` | `0` (do not fail) | fail command (return exit code 1) if result bellow threshold| `0` -> `100` |
| `-f`/`--format` | `pretty-printer` | Output format | `pretty-printer`/`json`/`junit` |
| `-o`/`--output` | print to stdout | Save scan result in file |
| `--use-from` | | Load local framework object from specified path. If not used will download latest |
| `--use-default` | `false` | Load local framework object from default path. If not used will download latest | `true`/`false` |
| `--exceptions` | | Path to an [exceptions obj](examples/exceptions.json). If not set will download exceptions from Armo management portal |
| `--submit` | `false` | If set, Kubescape will send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not sent | `true`/`false`|
| `--keep-local` | `false` | Kubescape will not send scan results to Armo management portal. Use this flag if you ran with the `--submit` flag in the past and you do not want to submit your current scan results | `true`/`false`|
| `--account` | | Armo portal account ID. Default will load account ID from configMap or config file | |
| flag | default | description | options |
|-----------------------------|---------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|
| `-e`/`--exclude-namespaces` | Scan all namespaces | Namespaces to exclude from scanning. Recommended to exclude `kube-system` and `kube-public` namespaces | |
| `--include-namespaces` | Scan all namespaces | Scan specific namespaces | |
| `-s`/`--silent` | Display progress messages | Silent progress messages | |
| `-t`/`--fail-threshold` | `100` (do not fail) | fail command (return exit code 1) if result is above threshold | `0` -> `100` |
| `-f`/`--format` | `pretty-printer` | Output format | `pretty-printer`/`json`/`junit`/`prometheus` |
| `-o`/`--output` | print to stdout | Save scan result in file | |
| `--use-from` | | Load local framework object from specified path. If not used will download latest | |
| `--use-default` | `false` | Load local framework object from default path. If not used will download latest | `true`/`false` |
| `--exceptions` | | Path to an [exceptions obj](examples/exceptions.json). If not set will download exceptions from Armo management portal | |
| `--submit` | `false` | If set, Kubescape will send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not sent | `true`/`false` |
| `--keep-local` | `false` | Kubescape will not send scan results to Armo management portal. Use this flag if you ran with the `--submit` flag in the past and you do not want to submit your current scan results | `true`/`false` |
| `--account` | | Armo portal account ID. Default will load account ID from configMap or config file | |
| `--cluster` | current-context | Cluster context to scan | |
| `--verbose` | `false` | Display all of the input resources and not only failed resources | `true`/`false` |
## Usage & Examples
### Examples
* Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/News-Features/Feature-Stories/Article-View/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework and submit results to [ARMO portal](https://portal.armo.cloud/)
#### Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
```
kubescape scan framework nsa --submit
```
* Scan a running Kubernetes cluster with [`MITRE ATT&CK®`](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) framework and submit results to [ARMO portal](https://portal.armo.cloud/)
#### Scan a running Kubernetes cluster with [`MITRE ATT&CK®`](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) framework and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
```
kubescape scan framework mitre --submit
```
* Scan a running Kubernetes cluster with a specific control using the control name or control ID. [List of controls](https://hub.armo.cloud/docs/controls)
#### Scan a running Kubernetes cluster with a specific control using the control name or control ID. [List of controls](https://hub.armo.cloud/docs/controls)
```
kubescape scan control "Privileged container"
```
* Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI)
#### Scan specific namespaces
```
kubescape scan framework nsa --include-namespaces development,staging,production
```
#### Scan cluster and exclude some namespaces
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
```
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI)
```
kubescape scan framework nsa *.yaml
```
* Scan kubernetes manifest files from a public github repository
#### Scan kubernetes manifest files from a public github repository
```
kubescape scan framework nsa https://github.com/armosec/kubescape
```
* Output in `json` format
#### Display all scanned resources (including the resources who passed)
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format json --output results.json
kubescape scan framework nsa --verbose
```
* Output in `junit xml` format
#### Output in `json` format
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format junit --output results.xml
kubescape scan framework nsa --format json --output results.json
```
* Scan with exceptions, objects with exceptions will be presented as `exclude` and not `fail`
#### Output in `junit xml` format
```
kubescape scan framework nsa --exceptions examples/exceptions.json
kubescape scan framework nsa --format junit --output results.xml
```
### Helm Support
#### Output in `prometheus` metrics format - Contributed by [@Joibel](https://github.com/Joibel)
```
kubescape scan framework nsa --format prometheus
```
* Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
#### Scan with exceptions, objects with exceptions will be presented as `exclude` and not `fail`
[Full documentation](examples/exceptions/README.md)
```
kubescape scan framework nsa --exceptions examples/exceptions/exclude-kube-namespaces.json
```
#### Scan Helm charts - Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
```
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan framework nsa -
```
for example:
e.g.
```
helm template bitnami/mysql --generate-name --dry-run | kubescape scan framework nsa -
```
### Offline Support
[Video tutorial](https://youtu.be/IGXL9s37smM)
It is possible to run Kubescape offline!
First download the framework and then scan with `--use-from` flag
@@ -157,13 +200,40 @@ kubescape download framework nsa --output nsa.json
kubescape scan framework nsa --use-from nsa.json
```
Kubescape is an open source project, we welcome your feedback and ideas for improvement. Were also aiming to collaborate with the Kubernetes community to help make the tests themselves more robust and complete as Kubernetes develops.
## Scan Periodically using Helm - Contributed by [@yonahd](https://github.com/yonahd)
You can scan your cluster periodically by adding a `CronJob` that will repeatedly trigger kubescape
```
helm install kubescape examples/helm_chart/
```
## Scan using docker image
Official Docker image `quay.io/armosec/kubescape`
```
docker run -v "$(pwd)/example.yaml:/app/example.yaml quay.io/armosec/kubescape scan framework nsa /app/example.yaml
```
# Submit data manually
Use the `submit` command if you wish to submit data manually
## Submit scan results manually
First, scan your cluster using the `json` format flag: `kubescape scan framework <name> --format json --output path/to/results.json`.
Now you can submit the results to the Kubaescape SaaS version -
```
kubescape submit results path/to/results.json
```
# How to build
## Build using python (3.7^) script
Kubescpae can be built using:
Kubescape can be built using:
``` sh
python build.py
@@ -199,12 +269,8 @@ go build -o kubescape .
4. Enjoy :zany_face:
## Docker Support
## Docker Build
### Official Docker image
```
quay.io/armosec/kubescape
```
### Build your own Docker image
1. Clone Project
@@ -217,10 +283,11 @@ git clone https://github.com/armosec/kubescape.git kubescape && cd "$_"
docker build -t kubescape -f build/Dockerfile .
```
# Under the hood
## Tests
Kubescape is running the following tests according to what is defined by [Kubernetes Hardening Guidance by NSA and CISA](https://www.nsa.gov/News-Features/Feature-Stories/Article-View/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/)
Kubescape is running the following tests according to what is defined by [Kubernetes Hardening Guidance by NSA and CISA](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/)
* Non-root containers
* Immutable container filesystem
* Privileged containers

View File

@@ -41,7 +41,7 @@ def main():
# Set some variables
packageName = getPackageName()
buildUrl = "github.com/armosec/kubescape/clihandler/cmd.BuildNumber"
buildUrl = "github.com/armosec/kubescape/cautils.BuildNumber"
releaseVersion = os.getenv("RELEASE")
ArmoBEServer = os.getenv("ArmoBEServer")
ArmoERServer = os.getenv("ArmoERServer")
@@ -60,9 +60,6 @@ def main():
status = subprocess.call(["go", "build", "-o", "%s/%s" % (buildDir, packageName), "-ldflags" ,ldflags])
checkStatus(status, "Failed to build kubescape")
test_cli_prints(buildDir,packageName)
sha1 = hashlib.sha1()
with open(buildDir + "/" + packageName, "rb") as kube:
sha1.update(kube.read())
@@ -70,13 +67,7 @@ def main():
kube_sha.write(sha1.hexdigest())
print("Build Done")
def test_cli_prints(buildDir,packageName):
bin_cli = os.path.abspath(os.path.join(buildDir,packageName))
print(f"testing CLI prints on {bin_cli}")
status = str(subprocess.check_output([bin_cli, "-h"]))
assert "download" in status, "download is missing: " + status
if __name__ == "__main__":
main()

View File

@@ -1,5 +1,10 @@
FROM golang:1.17-alpine as builder
#ENV GOPROXY=https://goproxy.io,direct
ARG run_number
ENV RELEASE=v1.0.${run_number}
ENV GO111MODULE=
ENV CGO_ENABLED=0

View File

@@ -4,21 +4,17 @@ import (
"context"
"encoding/json"
"fmt"
"net/url"
"os"
"strings"
"github.com/armosec/kubescape/cautils/getter"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils/getter"
corev1 "k8s.io/api/core/v1"
)
const (
configMapName = "kubescape"
configFileName = "config"
)
const configFileName = "config"
func ConfigFileFullPath() string { return getter.GetDefaultPath(configFileName + ".json") }
@@ -57,161 +53,86 @@ func (co *ConfigObj) Config() []byte {
// ======================================================================================
// =============================== interface ============================================
// ======================================================================================
type IClusterConfig interface {
type ITenantConfig interface {
// set
SetConfig(customerGUID string) error
SetTenant() error
// getters
GetClusterName() string
GetCustomerGUID() string
GetConfigObj() *ConfigObj
GetK8sAPI() *k8sinterface.KubernetesApi
GetBackendAPI() getter.IBackend
GetDefaultNS() string
GenerateURL()
}
// GetBackendAPI() getter.IBackend
// GenerateURL()
// ClusterConfigSetup - Setup the desired cluster behavior regarding submittion to the Armo BE
func ClusterConfigSetup(scanInfo *ScanInfo, k8s *k8sinterface.KubernetesApi, beAPI getter.IBackend) IClusterConfig {
/*
If "First run (local config not found)" -
Default - Do not send report (local)
Local - Do not send report
Submit - Create tenant & Submit report
If "Submitted but not signed up" -
Default - Delete local config & Do not send report (local)
Local - Delete local config & Do not send report
Submit - Submit report
If "Signed up user" -
Default - Submit report (submit)
Local - Do not send report
Submit - Submit report
*/
clusterConfig := NewClusterConfig(k8s, beAPI)
clusterConfig.LoadConfig()
if !IsSubmitted(clusterConfig) {
if scanInfo.Submit {
return clusterConfig // submit - Create tenant & Submit report
}
return NewEmptyConfig() // local/default - Do not send report
}
if !IsRegistered(clusterConfig) {
if scanInfo.Submit {
return clusterConfig // submit/default - Submit report
}
DeleteConfig(k8s)
return NewEmptyConfig() // local - Delete local config & Do not send report
}
if scanInfo.Local {
return NewEmptyConfig() // local - Do not send report
}
return clusterConfig // submit/default - Submit report
IsConfigFound() bool
}
// ======================================================================================
// ============================= Mock Config ============================================
// ============================ Local Config ============================================
// ======================================================================================
type EmptyConfig struct {
}
func NewEmptyConfig() *EmptyConfig { return &EmptyConfig{} }
func (c *EmptyConfig) SetConfig(customerGUID string) error { return nil }
func (c *EmptyConfig) GetConfigObj() *ConfigObj { return &ConfigObj{} }
func (c *EmptyConfig) GetCustomerGUID() string { return "" }
func (c *EmptyConfig) GetK8sAPI() *k8sinterface.KubernetesApi { return nil } // TODO: return mock obj
func (c *EmptyConfig) GetDefaultNS() string { return k8sinterface.GetDefaultNamespace() }
func (c *EmptyConfig) GetBackendAPI() getter.IBackend { return nil } // TODO: return mock obj
func (c *EmptyConfig) GetClusterName() string { return k8sinterface.GetClusterName() }
func (c *EmptyConfig) GenerateURL() {
message := fmt.Sprintf("\nCheckout for more cool features: https://%s\n", getter.GetArmoAPIConnector().GetFrontendURL())
InfoTextDisplay(os.Stdout, fmt.Sprintf("\n%s\n", message))
}
// ======================================================================================
// ========================== Cluster Config ============================================
// ======================================================================================
type ClusterConfig struct {
k8s *k8sinterface.KubernetesApi
defaultNS string
// Config when scanning YAML files or URL but not a Kubernetes cluster
type LocalConfig struct {
backendAPI getter.IBackend
configObj *ConfigObj
}
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend) *ClusterConfig {
return &ClusterConfig{
k8s: k8s,
func NewLocalConfig(backendAPI getter.IBackend, customerGUID, clusterName string) *LocalConfig {
var configObj *ConfigObj
lc := &LocalConfig{
backendAPI: backendAPI,
configObj: &ConfigObj{},
defaultNS: k8sinterface.GetDefaultNamespace(),
}
// get from configMap
if existsConfigFile() { // get from file
configObj, _ = loadConfigFromFile()
} else {
configObj = &ConfigObj{}
}
if configObj != nil {
lc.configObj = configObj
}
if customerGUID != "" {
lc.configObj.CustomerGUID = customerGUID // override config customerGUID
}
if clusterName != "" {
lc.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
}
if lc.configObj.CustomerGUID != "" {
if err := lc.SetTenant(); err != nil {
fmt.Println(err)
}
}
return lc
}
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
func (c *ClusterConfig) GetK8sAPI() *k8sinterface.KubernetesApi { return c.k8s }
func (c *ClusterConfig) GetDefaultNS() string { return c.defaultNS }
func (c *ClusterConfig) GetBackendAPI() getter.IBackend { return c.backendAPI }
func (c *ClusterConfig) GenerateURL() {
u := url.URL{}
u.Scheme = "https"
u.Host = getter.GetArmoAPIConnector().GetFrontendURL()
if c.configObj == nil {
return
func (lc *LocalConfig) GetConfigObj() *ConfigObj { return lc.configObj }
func (lc *LocalConfig) GetCustomerGUID() string { return lc.configObj.CustomerGUID }
func (lc *LocalConfig) SetCustomerGUID(customerGUID string) { lc.configObj.CustomerGUID = customerGUID }
func (lc *LocalConfig) GetClusterName() string { return lc.configObj.ClusterName }
func (lc *LocalConfig) IsConfigFound() bool { return existsConfigFile() }
func (lc *LocalConfig) SetTenant() error {
// ARMO tenant GUID
if err := getTenantConfigFromBE(lc.backendAPI, lc.configObj); err != nil {
return err
}
message := fmt.Sprintf("\nCheckout for more cool features: https://%s\n", getter.GetArmoAPIConnector().GetFrontendURL())
if c.configObj.CustomerAdminEMail != "" {
InfoTextDisplay(os.Stdout, message+"\n")
return
}
u.Path = "account/sign-up"
q := u.Query()
q.Add("invitationToken", c.configObj.Token)
q.Add("customerGUID", c.configObj.CustomerGUID)
u.RawQuery = q.Encode()
InfoTextDisplay(os.Stdout, message+"\n")
updateConfigFile(lc.configObj)
return nil
}
func (c *ClusterConfig) GetCustomerGUID() string {
if c.configObj != nil {
return c.configObj.CustomerGUID
}
return ""
}
func (c *ClusterConfig) SetConfig(customerGUID string) error {
if c.configObj == nil {
c.configObj = &ConfigObj{}
}
// cluster name
if c.GetClusterName() == "" {
c.setClusterName(k8sinterface.GetClusterName())
}
// ARMO customer GUID
if customerGUID != "" && c.GetCustomerGUID() != customerGUID {
c.setCustomerGUID(customerGUID) // override config customerGUID
}
customerGUID = c.GetCustomerGUID()
func getTenantConfigFromBE(backendAPI getter.IBackend, configObj *ConfigObj) error {
// get from armoBE
tenantResponse, err := c.backendAPI.GetCustomerGUID(customerGUID)
backendAPI.SetCustomerGUID(configObj.CustomerGUID)
tenantResponse, err := backendAPI.GetCustomerGUID()
if err == nil && tenantResponse != nil {
if tenantResponse.AdminMail != "" { // this customer already belongs to some user
c.setCustomerAdminEMail(tenantResponse.AdminMail)
} else {
c.setToken(tenantResponse.Token)
c.setCustomerGUID(tenantResponse.TenantID)
if tenantResponse.AdminMail != "" { // registered tenant
configObj.CustomerAdminEMail = tenantResponse.AdminMail
} else { // new tenant
configObj.Token = tenantResponse.Token
configObj.CustomerGUID = tenantResponse.TenantID
}
} else {
if err != nil && !strings.Contains(err.Error(), "already exists") {
@@ -219,44 +140,100 @@ func (c *ClusterConfig) SetConfig(customerGUID string) error {
}
}
return nil
}
// ======================================================================================
// ========================== Cluster Config ============================================
// ======================================================================================
// ClusterConfig configuration of specific cluster
/*
Supported environments variables:
KS_DEFAULT_CONFIGMAP_NAME // name of configmap, if not set default is 'kubescape'
KS_DEFAULT_CONFIGMAP_NAMESPACE // configmap namespace, if not set default is 'default'
TODO - supprot:
KS_ACCOUNT // Account ID
KS_CACHE // path to cached files
*/
type ClusterConfig struct {
k8s *k8sinterface.KubernetesApi
configMapName string
configMapNamespace string
backendAPI getter.IBackend
configObj *ConfigObj
}
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, customerGUID, clusterName string) *ClusterConfig {
var configObj *ConfigObj
c := &ClusterConfig{
k8s: k8s,
backendAPI: backendAPI,
configObj: &ConfigObj{},
configMapName: getConfigMapName(),
configMapNamespace: getConfigMapNamespace(),
}
// get from configMap
if c.existsConfigMap() {
configObj, _ = c.loadConfigFromConfigMap()
}
if configObj == nil && existsConfigFile() { // get from file
configObj, _ = loadConfigFromFile()
}
if configObj != nil {
c.configObj = configObj
}
if customerGUID != "" {
c.configObj.CustomerGUID = customerGUID // override config customerGUID
}
if clusterName != "" {
c.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
}
if c.configObj.CustomerGUID != "" {
if err := c.SetTenant(); err != nil {
fmt.Println(err)
}
}
if c.configObj.ClusterName == "" {
c.configObj.ClusterName = AdoptClusterName(k8sinterface.GetClusterName())
} else { // override the cluster name if it has unwanted characters
c.configObj.ClusterName = AdoptClusterName(c.configObj.ClusterName)
}
return c
}
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
func (c *ClusterConfig) GetDefaultNS() string { return c.configMapNamespace }
func (c *ClusterConfig) GetCustomerGUID() string { return c.configObj.CustomerGUID }
func (c *ClusterConfig) SetCustomerGUID(customerGUID string) { c.configObj.CustomerGUID = customerGUID }
func (c *ClusterConfig) IsConfigFound() bool {
return existsConfigFile() || c.existsConfigMap()
}
func (c *ClusterConfig) SetTenant() error {
// ARMO tenant GUID
if err := getTenantConfigFromBE(c.backendAPI, c.configObj); err != nil {
return err
}
// update/create config
if c.existsConfigMap() {
c.updateConfigMap()
} else {
c.createConfigMap()
}
c.updateConfigFile()
updateConfigFile(c.configObj)
return nil
}
func (c *ClusterConfig) setToken(token string) {
c.configObj.Token = token
}
func (c *ClusterConfig) setCustomerAdminEMail(customerAdminEMail string) {
c.configObj.CustomerAdminEMail = customerAdminEMail
}
func (c *ClusterConfig) setCustomerGUID(customerGUID string) {
c.configObj.CustomerGUID = customerGUID
}
func (c *ClusterConfig) setClusterName(clusterName string) {
c.configObj.ClusterName = clusterName
}
func (c *ClusterConfig) GetClusterName() string {
return c.configObj.ClusterName
}
func (c *ClusterConfig) LoadConfig() {
// get from configMap
if c.existsConfigMap() {
c.configObj, _ = c.loadConfigFromConfigMap()
} else if existsConfigFile() { // get from file
c.configObj, _ = loadConfigFromFile()
} else {
c.configObj = &ConfigObj{}
}
}
func (c *ClusterConfig) ToMapString() map[string]interface{} {
m := map[string]interface{}{}
@@ -266,10 +243,7 @@ func (c *ClusterConfig) ToMapString() map[string]interface{} {
return m
}
func (c *ClusterConfig) loadConfigFromConfigMap() (*ConfigObj, error) {
if c.k8s == nil {
return nil, nil
}
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
if err != nil {
return nil, err
}
@@ -281,14 +255,14 @@ func (c *ClusterConfig) loadConfigFromConfigMap() (*ConfigObj, error) {
}
func (c *ClusterConfig) existsConfigMap() bool {
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
// TODO - check if has customerGUID
return err == nil
}
func (c *ClusterConfig) GetValueByKeyFromConfigMap(key string) (string, error) {
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
if err != nil {
return "", err
@@ -340,11 +314,11 @@ func SetKeyValueInConfigJson(key string, value string) error {
func (c *ClusterConfig) SetKeyValueInConfigmap(key string, value string) error {
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
if err != nil {
configMap = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: configMapName,
Name: c.configMapName,
},
}
}
@@ -356,9 +330,9 @@ func (c *ClusterConfig) SetKeyValueInConfigmap(key string, value string) error {
configMap.Data[key] = value
if err != nil {
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Create(context.Background(), configMap, metav1.CreateOptions{})
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Create(context.Background(), configMap, metav1.CreateOptions{})
} else {
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(configMap.Namespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
}
return err
@@ -375,12 +349,12 @@ func (c *ClusterConfig) createConfigMap() error {
}
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: configMapName,
Name: c.configMapName,
},
}
c.updateConfigData(configMap)
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Create(context.Background(), configMap, metav1.CreateOptions{})
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Create(context.Background(), configMap, metav1.CreateOptions{})
return err
}
@@ -388,7 +362,7 @@ func (c *ClusterConfig) updateConfigMap() error {
if c.k8s == nil {
return nil
}
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
if err != nil {
return err
@@ -396,12 +370,12 @@ func (c *ClusterConfig) updateConfigMap() error {
c.updateConfigData(configMap)
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(configMap.Namespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
return err
}
func (c *ClusterConfig) updateConfigFile() error {
if err := os.WriteFile(ConfigFileFullPath(), c.configObj.Config(), 0664); err != nil {
func updateConfigFile(configObj *ConfigObj) error {
if err := os.WriteFile(ConfigFileFullPath(), configObj.Config(), 0664); err != nil {
return err
}
return nil
@@ -432,21 +406,23 @@ func readConfig(dat []byte) (*ConfigObj, error) {
return nil, nil
}
configObj := &ConfigObj{}
err := json.Unmarshal(dat, configObj)
return configObj, err
if err := json.Unmarshal(dat, configObj); err != nil {
return nil, err
}
return configObj, nil
}
// Check if the customer is submitted
func IsSubmitted(clusterConfig *ClusterConfig) bool {
func (clusterConfig *ClusterConfig) IsSubmitted() bool {
return clusterConfig.existsConfigMap() || existsConfigFile()
}
// Check if the customer is registered
func IsRegistered(clusterConfig *ClusterConfig) bool {
func (clusterConfig *ClusterConfig) IsRegistered() bool {
// get from armoBE
tenantResponse, err := clusterConfig.backendAPI.GetCustomerGUID(clusterConfig.GetCustomerGUID())
clusterConfig.backendAPI.SetCustomerGUID(clusterConfig.GetCustomerGUID())
tenantResponse, err := clusterConfig.backendAPI.GetCustomerGUID()
if err == nil && tenantResponse != nil {
if tenantResponse.AdminMail != "" { // this customer already belongs to some user
return true
@@ -455,8 +431,8 @@ func IsRegistered(clusterConfig *ClusterConfig) bool {
return false
}
func DeleteConfig(k8s *k8sinterface.KubernetesApi) error {
if err := DeleteConfigMap(k8s); err != nil {
func (clusterConfig *ClusterConfig) DeleteConfig() error {
if err := clusterConfig.DeleteConfigMap(); err != nil {
return err
}
if err := DeleteConfigFile(); err != nil {
@@ -464,10 +440,28 @@ func DeleteConfig(k8s *k8sinterface.KubernetesApi) error {
}
return nil
}
func DeleteConfigMap(k8s *k8sinterface.KubernetesApi) error {
return k8s.KubernetesClient.CoreV1().ConfigMaps(k8sinterface.GetDefaultNamespace()).Delete(context.Background(), configMapName, metav1.DeleteOptions{})
func (clusterConfig *ClusterConfig) DeleteConfigMap() error {
return clusterConfig.k8s.KubernetesClient.CoreV1().ConfigMaps(clusterConfig.configMapNamespace).Delete(context.Background(), clusterConfig.configMapName, metav1.DeleteOptions{})
}
func DeleteConfigFile() error {
return os.Remove(ConfigFileFullPath())
}
func AdoptClusterName(clusterName string) string {
return strings.ReplaceAll(clusterName, "/", "-")
}
func getConfigMapName() string {
if n := os.Getenv("KS_DEFAULT_CONFIGMAP_NAME"); n != "" {
return n
}
return "kubescape"
}
func getConfigMapNamespace() string {
if n := os.Getenv("KS_DEFAULT_CONFIGMAP_NAMESPACE"); n != "" {
return n
}
return "default"
}

View File

@@ -2,23 +2,33 @@ package cautils
import (
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
)
// K8SResources map[<api group>/<api version>/<resource>]<resource object>
type K8SResources map[string]interface{}
// K8SResources map[<api group>/<api version>/<resource>][]<resourceID>
type K8SResources map[string][]string
type OPASessionObj struct {
Frameworks []reporthandling.Framework
K8SResources *K8SResources
Exceptions []armotypes.PostureExceptionPolicy
PostureReport *reporthandling.PostureReport
K8SResources *K8SResources // input k8s objects
Frameworks []reporthandling.Framework // list of frameworks to scan
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
PostureReport *reporthandling.PostureReport // scan results v1
Report *reporthandlingv2.PostureReport // scan results v2
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
}
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources) *OPASessionObj {
return &OPASessionObj{
Frameworks: frameworks,
K8SResources: k8sResources,
Report: &reporthandlingv2.PostureReport{},
Frameworks: frameworks,
K8SResources: k8sResources,
AllResources: make(map[string]workloadinterface.IMetadata),
ResourcesResult: make(map[string]resourcesresults.Result),
PostureReport: &reporthandling.PostureReport{
ClusterName: ClusterName,
CustomerGUID: CustomerGUID,
@@ -28,8 +38,11 @@ func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SRe
func NewOPASessionObjMock() *OPASessionObj {
return &OPASessionObj{
Frameworks: nil,
K8SResources: nil,
Frameworks: nil,
K8SResources: nil,
AllResources: make(map[string]workloadinterface.IMetadata),
ResourcesResult: make(map[string]resourcesresults.Result),
Report: &reporthandlingv2.PostureReport{},
PostureReport: &reporthandling.PostureReport{
ClusterName: "",
CustomerGUID: "",
@@ -49,3 +62,14 @@ type Exception struct {
Namespaces []string `json:"namespaces"`
Regex string `json:"regex"` // not supported
}
type RegoInputData struct {
PostureControlInputs map[string][]string `json:"postureControlInputs"`
// ClusterName string `json:"clusterName"`
// K8sConfig RegoK8sConfig `json:"k8sconfig"`
}
type Policies struct {
Frameworks []string
Controls map[string]reporthandling.Control // map[<control ID>]<control>
}

View File

@@ -0,0 +1,67 @@
package cautils
import (
pkgcautils "github.com/armosec/utils-go/utils"
"github.com/armosec/opa-utils/reporthandling"
)
func NewPolicies() *Policies {
return &Policies{
Frameworks: make([]string, 0),
Controls: make(map[string]reporthandling.Control),
}
}
func (policies *Policies) Set(frameworks []reporthandling.Framework, version string) {
for i := range frameworks {
if frameworks[i].Name != "" {
policies.Frameworks = append(policies.Frameworks, frameworks[i].Name)
}
for j := range frameworks[i].Controls {
compatibleRules := []reporthandling.PolicyRule{}
for r := range frameworks[i].Controls[j].Rules {
if !ruleWithArmoOpaDependency(frameworks[i].Controls[j].Rules[r].Attributes) && isRuleKubescapeVersionCompatible(frameworks[i].Controls[j].Rules[r].Attributes, version) {
compatibleRules = append(compatibleRules, frameworks[i].Controls[j].Rules[r])
}
}
if len(compatibleRules) > 0 {
frameworks[i].Controls[j].Rules = compatibleRules
policies.Controls[frameworks[i].Controls[j].ControlID] = frameworks[i].Controls[j]
}
}
}
}
func ruleWithArmoOpaDependency(attributes map[string]interface{}) bool {
if attributes == nil {
return false
}
if s, ok := attributes["armoOpa"]; ok { // TODO - make global
return pkgcautils.StringToBool(s.(string))
}
return false
}
// Checks that kubescape version is in range of use for this rule
// In local build (BuildNumber = ""):
// returns true only if rule doesn't have the "until" attribute
func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version string) bool {
if from, ok := attributes["useFromKubescapeVersion"]; ok && from != nil {
if version != "" {
if from.(string) > BuildNumber {
return false
}
}
}
if until, ok := attributes["useUntilKubescapeVersion"]; ok && until != nil {
if version != "" {
if until.(string) <= BuildNumber {
return false
}
} else {
return false
}
}
return true
}

View File

@@ -35,15 +35,15 @@ func ScanStartDisplay() {
if IsSilent() {
return
}
InfoDisplay(os.Stdout, "ARMO security scanner starting\n")
InfoDisplay(os.Stderr, "ARMO security scanner starting\n")
}
func SuccessTextDisplay(str string) {
if IsSilent() {
return
}
SuccessDisplay(os.Stdout, "[success] ")
SimpleDisplay(os.Stdout, fmt.Sprintf("%s\n", str))
SuccessDisplay(os.Stderr, "[success] ")
SimpleDisplay(os.Stderr, fmt.Sprintf("%s\n", str))
}
@@ -51,8 +51,8 @@ func ErrorDisplay(str string) {
if IsSilent() {
return
}
SuccessDisplay(os.Stdout, "[Error] ")
SimpleDisplay(os.Stdout, fmt.Sprintf("%s\n", str))
FailureDisplay(os.Stderr, "[Error] ")
SimpleDisplay(os.Stderr, fmt.Sprintf("%s\n", str))
}
@@ -60,8 +60,8 @@ func ProgressTextDisplay(str string) {
if IsSilent() {
return
}
InfoDisplay(os.Stdout, "[progress] ")
SimpleDisplay(os.Stdout, fmt.Sprintf("%s\n", str))
InfoDisplay(os.Stderr, "[progress] ")
SimpleDisplay(os.Stderr, fmt.Sprintf("%s\n", str))
}
func StartSpinner() {

View File

@@ -1,7 +1,8 @@
package cautils
type DownloadInfo struct {
Path string
FrameworkName string
ControlName string
Path string
Target string
Name string
Account string
}

View File

@@ -1,8 +1,10 @@
package getter
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"github.com/armosec/armoapi-go/armotypes"
@@ -23,15 +25,16 @@ var (
armoDevERURL = "report.eudev3.cyberarmorsoft.com"
armoDevBEURL = "eggdashbe.eudev3.cyberarmorsoft.com"
armoDevFEURL = "armoui.eudev3.cyberarmorsoft.com"
armoDevFEURL = "armoui-dev.eudev3.cyberarmorsoft.com"
)
// Armo API for downloading policies
type ArmoAPI struct {
httpClient *http.Client
apiURL string
erURL string
feURL string
httpClient *http.Client
apiURL string
erURL string
feURL string
customerGUID string
}
var globalArmoAPIConnecctor *ArmoAPI
@@ -82,7 +85,10 @@ func newArmoAPI() *ArmoAPI {
httpClient: &http.Client{Timeout: time.Duration(61) * time.Second},
}
}
func (armoAPI *ArmoAPI) SetCustomerGUID(customerGUID string) {
armoAPI.customerGUID = customerGUID
}
func (armoAPI *ArmoAPI) GetFrontendURL() string {
return armoAPI.feURL
}
@@ -92,26 +98,28 @@ func (armoAPI *ArmoAPI) GetReportReceiverURL() string {
}
func (armoAPI *ArmoAPI) GetFramework(name string) (*reporthandling.Framework, error) {
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getFrameworkURL(name))
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getFrameworkURL(name), nil)
if err != nil {
return nil, err
return nil, nil
}
framework := &reporthandling.Framework{}
if err = JSONDecoder(respStr).Decode(framework); err != nil {
return nil, err
}
SaveFrameworkInFile(framework, GetDefaultPath(name+".json"))
SaveInFile(framework, GetDefaultPath(name+".json"))
return framework, err
}
func (armoAPI *ArmoAPI) GetExceptions(customerGUID, clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
func (armoAPI *ArmoAPI) GetControl(policyName string) (*reporthandling.Control, error) {
return nil, fmt.Errorf("control api is not public")
}
func (armoAPI *ArmoAPI) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
exceptions := []armotypes.PostureExceptionPolicy{}
if customerGUID == "" {
return exceptions, nil
}
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getExceptionsURL(customerGUID, clusterName))
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getExceptionsURL(clusterName), nil)
if err != nil {
return nil, err
}
@@ -123,12 +131,12 @@ func (armoAPI *ArmoAPI) GetExceptions(customerGUID, clusterName string) ([]armot
return exceptions, nil
}
func (armoAPI *ArmoAPI) GetCustomerGUID(customerGUID string) (*TenantResponse, error) {
func (armoAPI *ArmoAPI) GetCustomerGUID() (*TenantResponse, error) {
url := armoAPI.getCustomerURL()
if customerGUID != "" {
url = fmt.Sprintf("%s?customerGUID=%s", url, customerGUID)
if armoAPI.customerGUID != "" {
url = fmt.Sprintf("%s?customerGUID=%s", url, armoAPI.customerGUID)
}
respStr, err := HttpGetter(armoAPI.httpClient, url)
respStr, err := HttpGetter(armoAPI.httpClient, url, nil)
if err != nil {
return nil, err
}
@@ -140,6 +148,79 @@ func (armoAPI *ArmoAPI) GetCustomerGUID(customerGUID string) (*TenantResponse, e
return tenant, nil
}
// ControlsInputs // map[<control name>][<input arguments>]
func (armoAPI *ArmoAPI) GetAccountConfig(clusterName string) (*armotypes.CustomerConfig, error) {
accountConfig := &armotypes.CustomerConfig{}
if armoAPI.customerGUID == "" {
return accountConfig, nil
}
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getAccountConfig(clusterName), nil)
if err != nil {
return nil, err
}
if err = JSONDecoder(respStr).Decode(&accountConfig); err != nil {
return nil, err
}
return accountConfig, nil
}
// ControlsInputs // map[<control name>][<input arguments>]
func (armoAPI *ArmoAPI) GetControlsInputs(clusterName string) (map[string][]string, error) {
accountConfig, err := armoAPI.GetAccountConfig(clusterName)
if err == nil {
return accountConfig.Settings.PostureControlInputs, nil
}
return nil, err
}
func (armoAPI *ArmoAPI) ListCustomFrameworks() ([]string, error) {
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getListFrameworkURL(), nil)
if err != nil {
return nil, err
}
frs := []reporthandling.Framework{}
if err = json.Unmarshal([]byte(respStr), &frs); err != nil {
return nil, err
}
frameworkList := []string{}
for _, fr := range frs {
if !isNativeFramework(fr.Name) {
frameworkList = append(frameworkList, fr.Name)
}
}
return frameworkList, nil
}
func (armoAPI *ArmoAPI) ListFrameworks() ([]string, error) {
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getListFrameworkURL(), nil)
if err != nil {
return nil, err
}
frs := []reporthandling.Framework{}
if err = json.Unmarshal([]byte(respStr), &frs); err != nil {
return nil, err
}
frameworkList := []string{}
for _, fr := range frs {
if isNativeFramework(fr.Name) {
frameworkList = append(frameworkList, strings.ToLower(fr.Name))
} else {
frameworkList = append(frameworkList, fr.Name)
}
}
return frameworkList, nil
}
func (armoAPI *ArmoAPI) ListControls(l ListType) ([]string, error) {
return nil, fmt.Errorf("control api is not public")
}
type TenantResponse struct {
TenantID string `json:"tenantId"`
Token string `json:"token"`

View File

@@ -5,28 +5,45 @@ import (
"strings"
)
var NativeFrameworks = []string{"nsa", "mitre", "armobest", "devopsbest"}
func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Path = "v1/armoFrameworks"
u.Path = "api/v1/armoFrameworks"
q := u.Query()
q.Add("customerGUID", "11111111-1111-1111-1111-111111111111")
q.Add("frameworkName", strings.ToUpper(frameworkName))
q.Add("getRules", "true")
q.Add("customerGUID", armoAPI.customerGUID)
if isNativeFramework(frameworkName) {
q.Add("frameworkName", strings.ToUpper(frameworkName))
} else {
// For customer framework has to be the way it was added
q.Add("frameworkName", frameworkName)
}
u.RawQuery = q.Encode()
return u.String()
}
func (armoAPI *ArmoAPI) getExceptionsURL(customerGUID, clusterName string) string {
func (armoAPI *ArmoAPI) getListFrameworkURL() string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Path = "api/v1/armoFrameworks"
q := u.Query()
q.Add("customerGUID", armoAPI.customerGUID)
u.RawQuery = q.Encode()
return u.String()
}
func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Path = "api/v1/armoPostureExceptions"
q := u.Query()
q.Add("customerGUID", customerGUID)
q.Add("customerGUID", armoAPI.customerGUID)
// if clusterName != "" { // TODO - fix customer name support in Armo BE
// q.Add("clusterName", clusterName)
// }
@@ -35,6 +52,22 @@ func (armoAPI *ArmoAPI) getExceptionsURL(customerGUID, clusterName string) strin
return u.String()
}
func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Path = "api/v1/armoCustomerConfiguration"
q := u.Query()
q.Add("customerGUID", armoAPI.customerGUID)
if clusterName != "" { // TODO - fix customer name support in Armo BE
q.Add("clusterName", clusterName)
}
u.RawQuery = q.Encode()
return u.String()
}
func (armoAPI *ArmoAPI) getCustomerURL() string {
u := url.URL{}
u.Scheme = "https"

View File

@@ -11,26 +11,22 @@ import (
// ======================================== DownloadReleasedPolicy =======================================================
// =======================================================================================================================
// Download released version
// Use gitregostore to get policies from github release
type DownloadReleasedPolicy struct {
gs *gitregostore.GitRegoStore
}
func NewDownloadReleasedPolicy() *DownloadReleasedPolicy {
return &DownloadReleasedPolicy{
gs: gitregostore.InitDefaultGitRegoStore(),
gs: gitregostore.NewDefaultGitRegoStore(-1),
}
}
// Return control per name/id using ARMO api
func (drp *DownloadReleasedPolicy) GetControl(policyName string) (*reporthandling.Control, error) {
var control *reporthandling.Control
var err error
if strings.HasPrefix(policyName, "C-") || strings.HasPrefix(policyName, "c-") {
control, err = drp.gs.GetOPAControlByID(policyName)
} else {
control, err = drp.gs.GetOPAControlByName(policyName)
}
control, err = drp.gs.GetOPAControl(policyName)
if err != nil {
return nil, err
}
@@ -44,3 +40,45 @@ func (drp *DownloadReleasedPolicy) GetFramework(name string) (*reporthandling.Fr
}
return framework, err
}
func (drp *DownloadReleasedPolicy) ListFrameworks() ([]string, error) {
return drp.gs.GetOPAFrameworksNamesList()
}
func (drp *DownloadReleasedPolicy) ListControls(listType ListType) ([]string, error) {
switch listType {
case ListID:
return drp.gs.GetOPAControlsIDsList()
default:
return drp.gs.GetOPAControlsNamesList()
}
}
func (drp *DownloadReleasedPolicy) GetControlsInputs(clusterName string) (map[string][]string, error) {
defaultConfigInputs, err := drp.gs.GetDefaultConfigInputs()
if err != nil {
return nil, err
}
return defaultConfigInputs.Settings.PostureControlInputs, err
}
func (drp *DownloadReleasedPolicy) SetRegoObjects() error {
fwNames, err := drp.gs.GetOPAFrameworksNamesList()
if len(fwNames) != 0 && err == nil {
return nil
}
return drp.gs.SetRegoObjects()
}
func isNativeFramework(framework string) bool {
return contains(NativeFrameworks, framework)
}
func contains(s []string, str string) bool {
for _, v := range s {
if strings.EqualFold(v, str) {
return true
}
}
return false
}

View File

@@ -5,14 +5,28 @@ import (
"github.com/armosec/opa-utils/reporthandling"
)
// supported listing
type ListType string
const ListID ListType = "id"
const ListName ListType = "name"
type IPolicyGetter interface {
GetFramework(name string) (*reporthandling.Framework, error)
GetControl(policyName string) (*reporthandling.Control, error)
GetControl(name string) (*reporthandling.Control, error)
ListFrameworks() ([]string, error)
ListControls(ListType) ([]string, error)
}
type IExceptionsGetter interface {
GetExceptions(customerGUID, clusterName string) ([]armotypes.PostureExceptionPolicy, error)
GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error)
}
type IBackend interface {
GetCustomerGUID(customerGUID string) (*TenantResponse, error)
GetCustomerGUID() (*TenantResponse, error)
SetCustomerGUID(customerGUID string)
}
type IControlsInputsGetter interface {
GetControlsInputs(clusterName string) (map[string][]string, error)
}

View File

@@ -1,6 +1,7 @@
package getter
import (
"bytes"
"encoding/json"
"fmt"
"io"
@@ -9,8 +10,6 @@ import (
"path"
"path/filepath"
"strings"
"github.com/armosec/opa-utils/reporthandling"
)
func GetDefaultPath(name string) string {
@@ -21,33 +20,8 @@ func GetDefaultPath(name string) string {
return defaultfilePath
}
// Save control as json in file
func SaveControlInFile(control *reporthandling.Control, pathStr string) error {
encodedData, err := json.Marshal(control)
if err != nil {
return err
}
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
if err != nil {
if os.IsNotExist(err) {
pathDir := path.Dir(pathStr)
if err := os.Mkdir(pathDir, 0744); err != nil {
return err
}
} else {
return err
}
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
if err != nil {
return err
}
}
return nil
}
func SaveFrameworkInFile(framework *reporthandling.Framework, pathStr string) error {
encodedData, err := json.Marshal(framework)
func SaveInFile(policy interface{}, pathStr string) error {
encodedData, err := json.Marshal(policy)
if err != nil {
return err
}
@@ -77,12 +51,14 @@ func JSONDecoder(origin string) *json.Decoder {
return dec
}
func HttpGetter(httpClient *http.Client, fullURL string) (string, error) {
func HttpGetter(httpClient *http.Client, fullURL string, headers map[string]string) (string, error) {
req, err := http.NewRequest("GET", fullURL, nil)
if err != nil {
return "", err
}
addHeaders(req, headers)
resp, err := httpClient.Do(req)
if err != nil {
return "", err
@@ -94,6 +70,32 @@ func HttpGetter(httpClient *http.Client, fullURL string) (string, error) {
return respStr, nil
}
func HttpPost(httpClient *http.Client, fullURL string, headers map[string]string, body []byte) (string, error) {
req, err := http.NewRequest("POST", fullURL, bytes.NewReader(body))
if err != nil {
return "", err
}
addHeaders(req, headers)
resp, err := httpClient.Do(req)
if err != nil {
return "", err
}
respStr, err := httpRespToString(resp)
if err != nil {
return "", err
}
return respStr, nil
}
func addHeaders(req *http.Request, headers map[string]string) {
if len(headers) >= 0 { // might be nil
for k, v := range headers {
req.Header.Add(k, v)
}
}
}
// HTTPRespToString parses the body as string and checks the HTTP status code, it closes the body reader at the end
func httpRespToString(resp *http.Response) (string, error) {
if resp == nil || resp.Body == nil {

View File

@@ -17,12 +17,12 @@ const DefaultLocalStore = ".kubescape"
// Load policies from a local repository
type LoadPolicy struct {
filePath string
filePaths []string
}
func NewLoadPolicy(filePath string) *LoadPolicy {
func NewLoadPolicy(filePaths []string) *LoadPolicy {
return &LoadPolicy{
filePath: filePath,
filePaths: filePaths,
}
}
@@ -30,37 +30,68 @@ func NewLoadPolicy(filePath string) *LoadPolicy {
func (lp *LoadPolicy) GetControl(controlName string) (*reporthandling.Control, error) {
control := &reporthandling.Control{}
f, err := os.ReadFile(lp.filePath)
filePath := lp.filePath()
f, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
err = json.Unmarshal(f, control)
if err = json.Unmarshal(f, control); err != nil {
return control, err
}
if controlName != "" && !strings.EqualFold(controlName, control.Name) && !strings.EqualFold(controlName, control.ControlID) {
return nil, fmt.Errorf("control from file not matching")
framework, err := lp.GetFramework(control.Name)
if err != nil {
return nil, fmt.Errorf("control from file not matching")
} else {
for _, ctrl := range framework.Controls {
if strings.EqualFold(ctrl.Name, controlName) || strings.EqualFold(ctrl.ControlID, controlName) {
control = &ctrl
break
}
}
}
}
return control, err
}
func (lp *LoadPolicy) GetFramework(frameworkName string) (*reporthandling.Framework, error) {
framework := &reporthandling.Framework{}
f, err := os.ReadFile(lp.filePath)
if err != nil {
return nil, err
}
var err error
for _, filePath := range lp.filePaths {
f, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
err = json.Unmarshal(f, framework)
if err = json.Unmarshal(f, framework); err != nil {
return framework, err
}
if strings.EqualFold(frameworkName, framework.Name) {
break
}
}
if frameworkName != "" && !strings.EqualFold(frameworkName, framework.Name) {
return nil, fmt.Errorf("framework from file not matching")
}
return framework, err
}
func (lp *LoadPolicy) GetExceptions(customerGUID, clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
func (lp *LoadPolicy) ListFrameworks() ([]string, error) {
// TODO - Support
return []string{}, fmt.Errorf("loading frameworks list from file is not supported")
}
func (lp *LoadPolicy) ListControls(listType ListType) ([]string, error) {
// TODO - Support
return []string{}, fmt.Errorf("loading controls list from file is not supported")
}
func (lp *LoadPolicy) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
filePath := lp.filePath()
exception := []armotypes.PostureExceptionPolicy{}
f, err := os.ReadFile(lp.filePath)
f, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
@@ -68,3 +99,25 @@ func (lp *LoadPolicy) GetExceptions(customerGUID, clusterName string) ([]armotyp
err = json.Unmarshal(f, &exception)
return exception, err
}
func (lp *LoadPolicy) GetControlsInputs(clusterName string) (map[string][]string, error) {
filePath := lp.filePath()
accountConfig := &armotypes.CustomerConfig{}
f, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
if err = json.Unmarshal(f, &accountConfig); err == nil {
return accountConfig.Settings.PostureControlInputs, nil
}
return nil, err
}
// temporary support for a list of files
func (lp *LoadPolicy) filePath() string {
if len(lp.filePaths) > 0 {
return lp.filePaths[0]
}
return ""
}

View File

@@ -0,0 +1,13 @@
package getter
import (
"path/filepath"
)
var mockFrameworkBasePath = filepath.Join("examples", "mocks", "frameworks")
func MockNewLoadPolicy() *LoadPolicy {
return &LoadPolicy{
filePaths: []string{""},
}
}

7
cautils/listpolicies.go Normal file
View File

@@ -0,0 +1,7 @@
package cautils
type ListPolicies struct {
Target string
ListIDs bool
Account string
}

114
cautils/rbac.go Normal file
View File

@@ -0,0 +1,114 @@
package cautils
import (
"encoding/json"
"time"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/rbac-utils/rbacscanner"
"github.com/armosec/rbac-utils/rbacutils"
uuid "github.com/satori/go.uuid"
)
type RBACObjects struct {
scanner *rbacscanner.RbacScannerFromK8sAPI
}
func NewRBACObjects(scanner *rbacscanner.RbacScannerFromK8sAPI) *RBACObjects {
return &RBACObjects{scanner: scanner}
}
func (rbacObjects *RBACObjects) SetResourcesReport() (*reporthandling.PostureReport, error) {
return &reporthandling.PostureReport{
ReportID: uuid.NewV4().String(),
ReportGenerationTime: time.Now().UTC(),
CustomerGUID: rbacObjects.scanner.CustomerGUID,
ClusterName: rbacObjects.scanner.ClusterName,
}, nil
}
func (rbacObjects *RBACObjects) ListAllResources() (map[string]workloadinterface.IMetadata, error) {
resources, err := rbacObjects.scanner.ListResources()
if err != nil {
return nil, err
}
allresources, err := rbacObjects.rbacObjectsToResources(resources)
if err != nil {
return nil, err
}
return allresources, nil
}
func (rbacObjects *RBACObjects) rbacObjectsToResources(resources *rbacutils.RbacObjects) (map[string]workloadinterface.IMetadata, error) {
allresources := map[string]workloadinterface.IMetadata{}
// wrap rbac aggregated objects in IMetadata and add to allresources
// TODO - DEPRECATE SA2WLIDmap
SA2WLIDmapIMeta, err := rbacutils.SA2WLIDmapIMetadataWrapper(resources.SA2WLIDmap)
if err != nil {
return nil, err
}
allresources[SA2WLIDmapIMeta.GetID()] = SA2WLIDmapIMeta
SAID2WLIDmapIMeta, err := rbacutils.SAID2WLIDmapIMetadataWrapper(resources.SAID2WLIDmap)
if err != nil {
return nil, err
}
allresources[SAID2WLIDmapIMeta.GetID()] = SAID2WLIDmapIMeta
// convert rbac k8s resources to IMetadata and add to allresources
for _, cr := range resources.ClusterRoles.Items {
crmap, err := convertToMap(cr)
if err != nil {
return nil, err
}
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1"
crIMeta := workloadinterface.NewWorkloadObj(crmap)
crIMeta.SetKind("ClusterRole")
allresources[crIMeta.GetID()] = crIMeta
}
for _, cr := range resources.Roles.Items {
crmap, err := convertToMap(cr)
if err != nil {
return nil, err
}
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1"
crIMeta := workloadinterface.NewWorkloadObj(crmap)
crIMeta.SetKind("Role")
allresources[crIMeta.GetID()] = crIMeta
}
for _, cr := range resources.ClusterRoleBindings.Items {
crmap, err := convertToMap(cr)
if err != nil {
return nil, err
}
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1"
crIMeta := workloadinterface.NewWorkloadObj(crmap)
crIMeta.SetKind("ClusterRoleBinding")
allresources[crIMeta.GetID()] = crIMeta
}
for _, cr := range resources.RoleBindings.Items {
crmap, err := convertToMap(cr)
if err != nil {
return nil, err
}
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1"
crIMeta := workloadinterface.NewWorkloadObj(crmap)
crIMeta.SetKind("RoleBinding")
allresources[crIMeta.GetID()] = crIMeta
}
return allresources, nil
}
func convertToMap(obj interface{}) (map[string]interface{}, error) {
var inInterface map[string]interface{}
inrec, err := json.Marshal(obj)
if err != nil {
return nil, err
}
err = json.Unmarshal(inrec, &inInterface)
if err != nil {
return nil, err
}
return inInterface, nil
}

View File

@@ -1,5 +0,0 @@
package cautils
const (
ComponentIdentifier = "Posture"
)

142
cautils/reportv2tov1.go Normal file
View File

@@ -0,0 +1,142 @@
package cautils
import (
"github.com/armosec/opa-utils/reporthandling"
helpersv1 "github.com/armosec/opa-utils/reporthandling/helpers/v1"
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/armosec/opa-utils/score"
)
func ReportV2ToV1(opaSessionObj *OPASessionObj) {
if len(opaSessionObj.PostureReport.FrameworkReports) > 0 {
return // report already converted
}
opaSessionObj.PostureReport.ClusterCloudProvider = opaSessionObj.Report.ClusterCloudProvider
frameworks := []reporthandling.FrameworkReport{}
if len(opaSessionObj.Report.SummaryDetails.Frameworks) > 0 {
for _, fwv2 := range opaSessionObj.Report.SummaryDetails.Frameworks {
fwv1 := reporthandling.FrameworkReport{}
fwv1.Name = fwv2.GetName()
fwv1.Score = fwv2.GetScore()
fwv1.ControlReports = append(fwv1.ControlReports, controlReportV2ToV1(opaSessionObj, fwv2.GetName(), fwv2.Controls)...)
frameworks = append(frameworks, fwv1)
}
} else {
fwv1 := reporthandling.FrameworkReport{}
fwv1.Name = ""
fwv1.Score = 0
fwv1.ControlReports = append(fwv1.ControlReports, controlReportV2ToV1(opaSessionObj, "", opaSessionObj.Report.SummaryDetails.Controls)...)
frameworks = append(frameworks, fwv1)
}
// // remove unused data
// opaSessionObj.Report = nil
// opaSessionObj.ResourcesResult = nil
// setup counters and score
for f := range frameworks {
// // set exceptions
// exceptions.SetFrameworkExceptions(frameworks, opap.Exceptions, cautils.ClusterName)
// set counters
reporthandling.SetUniqueResourcesCounter(&frameworks[f])
// set default score
reporthandling.SetDefaultScore(&frameworks[f])
}
// update score
scoreutil := score.NewScore(opaSessionObj.AllResources)
scoreutil.Calculate(frameworks)
opaSessionObj.PostureReport.FrameworkReports = frameworks
// for i := range frameworks {
// for j := range frameworks[i].ControlReports {
// // frameworks[i].ControlReports[j].Score
// for w := range opaSessionObj.Report.SummaryDetails.Frameworks {
// if opaSessionObj.Report.SummaryDetails.Frameworks[w].Name == frameworks[i].Name {
// opaSessionObj.Report.SummaryDetails.Frameworks[w].Score = frameworks[i].Score
// }
// if c, ok := opaSessionObj.Report.SummaryDetails.Frameworks[w].Controls[frameworks[i].ControlReports[j].ControlID]; ok {
// c.Score = frameworks[i].ControlReports[j].Score
// opaSessionObj.Report.SummaryDetails.Frameworks[w].Controls[frameworks[i].ControlReports[j].ControlID] = c
// }
// }
// if c, ok := opaSessionObj.Report.SummaryDetails.Controls[frameworks[i].ControlReports[j].ControlID]; ok {
// c.Score = frameworks[i].ControlReports[j].Score
// opaSessionObj.Report.SummaryDetails.Controls[frameworks[i].ControlReports[j].ControlID] = c
// }
// }
// }
}
func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, controls map[string]reportsummary.ControlSummary) []reporthandling.ControlReport {
controlRepors := []reporthandling.ControlReport{}
for controlID, crv2 := range controls {
crv1 := reporthandling.ControlReport{}
crv1.ControlID = controlID
crv1.BaseScore = crv2.ScoreFactor
crv1.Name = crv2.GetName()
crv1.Score = crv2.GetScore()
// TODO - add fields
crv1.Description = crv2.Description
crv1.Remediation = crv2.Remediation
rulesv1 := map[string]reporthandling.RuleReport{}
for _, resourceID := range crv2.ListResourcesIDs().All() {
if result, ok := opaSessionObj.ResourcesResult[resourceID]; ok {
for _, rulev2 := range result.ListRulesOfControl(crv2.GetID(), "") {
if _, ok := rulesv1[rulev2.GetName()]; !ok {
rulesv1[rulev2.GetName()] = reporthandling.RuleReport{
Name: rulev2.GetName(),
}
}
rulev1 := rulesv1[rulev2.GetName()]
status := rulev2.GetStatus(&helpersv1.Filters{FrameworkNames: []string{frameworkName}})
if status.IsFailed() || status.IsExcluded() {
// rule response
ruleResponse := reporthandling.RuleResponse{}
ruleResponse.Rulename = rulev2.GetName()
for i := range rulev2.Paths {
ruleResponse.FailedPaths = append(ruleResponse.FailedPaths, rulev2.Paths[i].FailedPath)
}
ruleResponse.RuleStatus = string(status.Status())
if len(rulev2.Exception) > 0 {
ruleResponse.Exception = &rulev2.Exception[0]
}
if fullRessource, ok := opaSessionObj.AllResources[resourceID]; ok {
ruleResponse.AlertObject.K8SApiObjects = append(ruleResponse.AlertObject.K8SApiObjects, fullRessource.GetObject())
}
rulev1.RuleResponses = append(rulev1.RuleResponses, ruleResponse)
}
rulev1.ListInputKinds = append(rulev1.ListInputKinds, resourceID)
rulesv1[rulev2.GetName()] = rulev1
}
}
}
if len(rulesv1) > 0 {
for i := range rulesv1 {
crv1.RuleReports = append(crv1.RuleReports, rulesv1[i])
}
}
controlRepors = append(controlRepors, crv1)
}
return controlRepors
}

View File

@@ -1,67 +1,91 @@
package cautils
import (
"fmt"
"path/filepath"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/opa-utils/reporthandling"
)
const (
ScanCluster string = "cluster"
ScanLocalFiles string = "yaml"
)
type BoolPtrFlag struct {
valPtr *bool
}
func (bpf *BoolPtrFlag) Type() string {
return "bool"
}
func (bpf *BoolPtrFlag) String() string {
if bpf.valPtr != nil {
return fmt.Sprintf("%v", *bpf.valPtr)
}
return ""
}
func (bpf *BoolPtrFlag) Get() *bool {
return bpf.valPtr
}
func (bpf *BoolPtrFlag) SetBool(val bool) {
bpf.valPtr = &val
}
func (bpf *BoolPtrFlag) Set(val string) error {
switch val {
case "true":
bpf.SetBool(true)
case "false":
bpf.SetBool(false)
}
return nil
}
type ScanInfo struct {
Getters
PolicyIdentifier reporthandling.PolicyIdentifier
UseExceptions string // Load exceptions configuration
UseFrom string // Load framework from local file (instead of download). Use when running offline
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
Format string // Format results (table, json, junit ...)
Output string // Store results in an output file, Output file name
ExcludedNamespaces string // DEPRECATED?
InputPatterns []string // Yaml files input patterns
Silent bool // Silent mode - Do not print progress logs
FailThreshold uint16 // Failure score threshold
Submit bool // Submit results to Armo BE
Local bool // Do not submit results
Account string // account ID
FrameworkScan bool // false if scanning control
PolicyIdentifier []reporthandling.PolicyIdentifier
UseExceptions string // Load file with exceptions configuration
ControlsInputs string // Load file with inputs for controls
UseFrom []string // Load framework from local file (instead of download). Use when running offline
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
VerboseMode bool // Display all of the input resources and not only failed resources
Format string // Format results (table, json, junit ...)
Output string // Store results in an output file, Output file name
ExcludedNamespaces string // used for host sensor namespace
IncludeNamespaces string // DEPRECATED?
InputPatterns []string // Yaml files input patterns
Silent bool // Silent mode - Do not print progress logs
FailThreshold uint16 // Failure score threshold
Submit bool // Submit results to Armo BE
HostSensor BoolPtrFlag // Deploy ARMO K8s host sensor to collect data from certain controls
Local bool // Do not submit results
Account string // account ID
ClusterName string // cluster name
FrameworkScan bool // false if scanning control
ScanAll bool // true if scan all frameworks
}
type Getters struct {
ExceptionsGetter getter.IExceptionsGetter
PolicyGetter getter.IPolicyGetter
ExceptionsGetter getter.IExceptionsGetter
ControlsInputsGetter getter.IControlsInputsGetter
PolicyGetter getter.IPolicyGetter
}
func (scanInfo *ScanInfo) Init() {
scanInfo.setUseFrom()
scanInfo.setUseExceptions()
scanInfo.setOutputFile()
scanInfo.setGetter()
}
func (scanInfo *ScanInfo) setUseExceptions() {
if scanInfo.UseExceptions != "" {
// load exceptions from file
scanInfo.ExceptionsGetter = getter.NewLoadPolicy(scanInfo.UseExceptions)
} else {
scanInfo.ExceptionsGetter = getter.GetArmoAPIConnector()
}
}
func (scanInfo *ScanInfo) setUseFrom() {
if scanInfo.UseFrom != "" {
return
}
if scanInfo.UseDefault {
scanInfo.UseFrom = getter.GetDefaultPath(scanInfo.PolicyIdentifier.Name + ".json")
}
}
func (scanInfo *ScanInfo) setGetter() {
if scanInfo.UseFrom != "" {
// load from file
scanInfo.PolicyGetter = getter.NewLoadPolicy(scanInfo.UseFrom)
} else {
scanInfo.PolicyGetter = getter.NewDownloadReleasedPolicy()
for _, policy := range scanInfo.PolicyIdentifier {
scanInfo.UseFrom = append(scanInfo.UseFrom, getter.GetDefaultPath(policy.Name+".json"))
}
}
}
@@ -81,25 +105,29 @@ func (scanInfo *ScanInfo) setOutputFile() {
}
}
func (scanInfo *ScanInfo) ScanRunningCluster() bool {
return len(scanInfo.InputPatterns) == 0
}
func (scanInfo *ScanInfo) SetClusterConfig() (IClusterConfig, *k8sinterface.KubernetesApi) {
var clusterConfig IClusterConfig
var k8s *k8sinterface.KubernetesApi
if !scanInfo.ScanRunningCluster() {
k8sinterface.ConnectedToCluster = false
clusterConfig = NewEmptyConfig()
} else {
k8s = k8sinterface.NewKubernetesApi()
// setup cluster config
clusterConfig = ClusterConfigSetup(scanInfo, k8s, getter.GetArmoAPIConnector())
func (scanInfo *ScanInfo) GetScanningEnvironment() string {
if len(scanInfo.InputPatterns) != 0 {
return ScanLocalFiles
}
return clusterConfig, k8s
return ScanCluster
}
// func (scanInfo *ScanInfo) ConnectedToCluster(k8s k8sinterface.) bool {
// _, err := k8s.KubernetesClient.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{})
// return err == nil
// }
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind reporthandling.NotificationPolicyKind) {
for _, policy := range policies {
if !scanInfo.contains(policy) {
newPolicy := reporthandling.PolicyIdentifier{}
newPolicy.Kind = kind // reporthandling.KindFramework
newPolicy.Name = policy
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
}
}
}
func (scanInfo *ScanInfo) contains(policyName string) bool {
for _, policy := range scanInfo.PolicyIdentifier {
if policy.Name == policyName {
return true
}
}
return false
}

136
cautils/versioncheck.go Normal file
View File

@@ -0,0 +1,136 @@
package cautils
import (
"encoding/json"
"fmt"
"net/http"
"os"
"github.com/armosec/kubescape/cautils/getter"
pkgutils "github.com/armosec/utils-go/utils"
)
const SKIP_VERSION_CHECK = "KUBESCAPE_SKIP_UPDATE_CHECK"
var BuildNumber string
const UnknownBuildNumber = "unknown"
type IVersionCheckHandler interface {
CheckLatestVersion(*VersionCheckRequest) error
}
func NewIVersionCheckHandler() IVersionCheckHandler {
if BuildNumber == "" {
WarningDisplay(os.Stderr, "Warning: unknown build number, this might affect your scan results. Please make sure you are updated to latest version.\n")
}
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && pkgutils.StringToBool(v) {
return NewVersionCheckHandlerMock()
}
return NewVersionCheckHandler()
}
type VersionCheckHandlerMock struct {
}
func NewVersionCheckHandlerMock() *VersionCheckHandlerMock {
return &VersionCheckHandlerMock{}
}
type VersionCheckHandler struct {
versionURL string
}
type VersionCheckRequest struct {
Client string `json:"client"` // kubescape
ClientVersion string `json:"clientVersion"` // kubescape version
Framework string `json:"framework"` // framework name
FrameworkVersion string `json:"frameworkVersion"` // framework version
ScanningTarget string `json:"target"` // scanning target- cluster/yaml
}
type VersionCheckResponse struct {
Client string `json:"client"` // kubescape
ClientUpdate string `json:"clientUpdate"` // kubescape latest version
Framework string `json:"framework"` // framework name
FrameworkUpdate string `json:"frameworkUpdate"` // framework latest version
Message string `json:"message"` // alert message
}
func NewVersionCheckHandler() *VersionCheckHandler {
return &VersionCheckHandler{
versionURL: "https://us-central1-elated-pottery-310110.cloudfunctions.net/ksgf1v1",
}
}
func NewVersionCheckRequest(buildNumber, frameworkName, frameworkVersion, scanningTarget string) *VersionCheckRequest {
if buildNumber == "" {
buildNumber = UnknownBuildNumber
}
if scanningTarget == "" {
scanningTarget = "unknown"
}
return &VersionCheckRequest{
Client: "kubescape",
ClientVersion: buildNumber,
Framework: frameworkName,
FrameworkVersion: frameworkVersion,
ScanningTarget: scanningTarget,
}
}
func (v *VersionCheckHandlerMock) CheckLatestVersion(versionData *VersionCheckRequest) error {
fmt.Println("Skipping version check")
return nil
}
func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckRequest) error {
defer func() {
if err := recover(); err != nil {
WarningDisplay(os.Stderr, "failed to get latest version\n")
}
}()
latestVersion, err := v.getLatestVersion(versionData)
if err != nil || latestVersion == nil {
return fmt.Errorf("failed to get latest version")
}
if latestVersion.ClientUpdate != "" {
if BuildNumber != "" && BuildNumber < latestVersion.ClientUpdate {
WarningDisplay(os.Stderr, warningMessage(latestVersion.Client, latestVersion.ClientUpdate), "\n")
}
}
// TODO - Enable after supporting framework version
// if latestVersion.FrameworkUpdate != "" {
// fmt.Println(warningMessage(latestVersion.Framework, latestVersion.FrameworkUpdate))
// }
if latestVersion.Message != "" {
InfoDisplay(os.Stderr, latestVersion.Message, "\n")
}
return nil
}
func (v *VersionCheckHandler) getLatestVersion(versionData *VersionCheckRequest) (*VersionCheckResponse, error) {
reqBody, err := json.Marshal(*versionData)
if err != nil {
return nil, fmt.Errorf("in 'CheckLatestVersion' failed to json.Marshal, reason: %s", err.Error())
}
resp, err := getter.HttpPost(http.DefaultClient, v.versionURL, map[string]string{"Content-Type": "application/json"}, reqBody)
if err != nil {
return nil, err
}
vResp := &VersionCheckResponse{}
if err = getter.JSONDecoder(resp).Decode(vResp); err != nil {
return nil, err
}
return vResp, nil
}
func warningMessage(kind, release string) string {
return fmt.Sprintf("Warning: '%s' is not updated to the latest release: '%s'", kind, release)
}

114
clihandler/clidownload.go Normal file
View File

@@ -0,0 +1,114 @@
package clihandler
import (
"fmt"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/getter"
)
var downloadFunc = map[string]func(*cautils.DownloadInfo) error{
"controls-inputs": downloadConfigInputs,
"exceptions": downloadExceptions,
"control": downloadControl,
"framework": downloadFramework,
}
func DownloadSupportCommands() []string {
commands := []string{}
for k := range downloadFunc {
commands = append(commands, k)
}
return commands
}
func CliDownload(downloadInfo *cautils.DownloadInfo) error {
if f, ok := downloadFunc[downloadInfo.Target]; ok {
if err := f(downloadInfo); err != nil {
return err
}
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, downloadInfo.Path)
return nil
}
return fmt.Errorf("unknown command to download")
}
func downloadConfigInputs(downloadInfo *cautils.DownloadInfo) error {
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi()) // change k8sinterface
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetCustomerGUID(), nil)
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetClusterName())
if err != nil {
return err
}
if downloadInfo.Path == "" {
downloadInfo.Path = getter.GetDefaultPath(fmt.Sprintf("%s.json", downloadInfo.Target))
}
// save in file
err = getter.SaveInFile(controlInputs, downloadInfo.Path)
if err != nil {
return err
}
return nil
}
func downloadExceptions(downloadInfo *cautils.DownloadInfo) error {
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi()) // change k8sinterface
exceptionsGetter := getExceptionsGetter("")
exceptions, err := exceptionsGetter.GetExceptions(tenant.GetClusterName())
if err != nil {
return err
}
if downloadInfo.Path == "" {
downloadInfo.Path = getter.GetDefaultPath(fmt.Sprintf("%s.json", downloadInfo.Target))
}
// save in file
err = getter.SaveInFile(exceptions, downloadInfo.Path)
if err != nil {
return err
}
return nil
}
func downloadFramework(downloadInfo *cautils.DownloadInfo) error {
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi()) // change k8sinterface
g := getPolicyGetter(nil, tenant.GetCustomerGUID(), true, nil)
if downloadInfo.Name == "" {
// TODO - support
return fmt.Errorf("missing framework name")
}
if downloadInfo.Path == "" {
downloadInfo.Path = getter.GetDefaultPath(downloadInfo.Name + ".json")
}
frameworks, err := g.GetFramework(downloadInfo.Name)
if err != nil {
return err
}
err = getter.SaveInFile(frameworks, downloadInfo.Path)
if err != nil {
return err
}
return nil
}
func downloadControl(downloadInfo *cautils.DownloadInfo) error {
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi()) // change k8sinterface
g := getPolicyGetter(nil, tenant.GetCustomerGUID(), false, nil)
if downloadInfo.Name == "" {
// TODO - support
return fmt.Errorf("missing control name")
}
if downloadInfo.Path == "" {
downloadInfo.Path = getter.GetDefaultPath(downloadInfo.Name + ".json")
}
controls, err := g.GetControl(downloadInfo.Name)
if err != nil {
return err
}
err = getter.SaveInFile(controls, downloadInfo.Path)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,19 @@
package cliinterfaces
import (
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/resultshandling/reporter"
"github.com/armosec/opa-utils/reporthandling"
)
type ISubmitObjects interface {
SetResourcesReport() (*reporthandling.PostureReport, error)
ListAllResources() (map[string]workloadinterface.IMetadata, error)
}
type SubmitInterfaces struct {
SubmitObjects ISubmitObjects
Reporter reporter.IReport
ClusterConfig cautils.ITenantConfig
}

58
clihandler/clilist.go Normal file
View File

@@ -0,0 +1,58 @@
package clihandler
import (
"fmt"
"sort"
"strings"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/getter"
)
var listFunc = map[string]func(*cautils.ListPolicies) ([]string, error){
"controls": listControls,
"frameworks": listFrameworks,
}
func ListSupportCommands() []string {
commands := []string{}
for k := range listFunc {
commands = append(commands, k)
}
return commands
}
func CliList(listPolicies *cautils.ListPolicies) error {
if f, ok := listFunc[listPolicies.Target]; ok {
policies, err := f(listPolicies)
if err != nil {
return err
}
sort.Strings(policies)
sep := "\n * "
usageCmd := strings.TrimSuffix(listPolicies.Target, "s")
fmt.Printf("Supported %s:%s%s\n", listPolicies.Target, sep, strings.Join(policies, sep))
fmt.Printf("\nUseage:\n")
fmt.Printf("$ kubescape scan %s \"name\"\n", usageCmd)
fmt.Printf("$ kubescape scan %s \"name-0\",\"name-1\"\n\n", usageCmd)
return nil
}
return fmt.Errorf("unknown command to download")
}
func listFrameworks(listPolicies *cautils.ListPolicies) ([]string, error) {
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
g := getPolicyGetter(nil, tenant.GetCustomerGUID(), true, nil)
return listFrameworksNames(g), nil
}
func listControls(listPolicies *cautils.ListPolicies) ([]string, error) {
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
g := getPolicyGetter(nil, tenant.GetCustomerGUID(), false, nil)
l := getter.ListName
if listPolicies.ListIDs {
l = getter.ListID
}
return g.ListControls(l)
}

View File

@@ -7,15 +7,13 @@ import (
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/kubescape/clihandler"
"github.com/spf13/cobra"
)
var getCmd = &cobra.Command{
Use: "get <key>",
Short: "Get configuration in cluster",
Long: ``,
ValidArgs: clihandler.SupportedFrameworks,
Use: "get <key>",
Short: "Get configuration in cluster",
Long: ``,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 || len(args) > 1 {
return fmt.Errorf("requires one argument")
@@ -32,7 +30,7 @@ var getCmd = &cobra.Command{
key := keyValue[0]
k8s := k8sinterface.NewKubernetesApi()
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector())
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), scanInfo.Account, "")
val, err := clusterConfig.GetValueByKeyFromConfigMap(key)
if err != nil {
if err.Error() == "value does not exist." {

View File

@@ -30,7 +30,7 @@ var setCmd = &cobra.Command{
data := keyValue[1]
k8s := k8sinterface.NewKubernetesApi()
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector())
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), scanInfo.Account, "")
if err := clusterConfig.SetKeyValueInConfigmap(key, data); err != nil {
return err
}

View File

@@ -2,6 +2,7 @@ package cmd
import (
"fmt"
"io"
"os"
"strings"
@@ -11,27 +12,76 @@ import (
"github.com/spf13/cobra"
)
var (
controlExample = `
# Scan the 'privileged container' control
kubescape scan control "privileged container"
# Scan list of controls separated with a comma
kubescape scan control "privileged container","allowed hostpath"
# Scan list of controls using the control ID separated with a comma
kubescape scan control C-0058,C-0057
Run 'kubescape list controls' for the list of supported controls
Control documentation:
https://hub.armo.cloud/docs/controls
`
)
// controlCmd represents the control command
var controlCmd = &cobra.Command{
Use: "control <control name>/<control id>",
Short: fmt.Sprintf("The control you wish to use for scan. It must be present in at least one of the folloiwng frameworks: %s", clihandler.ValidFrameworks),
Use: "control <control names list>/<control ids list>",
Short: "The controls you wish to use. Run 'kubescape list controls' for the list of supported controls",
Example: controlExample,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 && !(cmd.Flags().Lookup("use-from").Changed) {
return fmt.Errorf("requires at least one argument")
if len(args) > 0 {
controls := strings.Split(args[0], ",")
if len(controls) > 1 {
if controls[1] == "" {
return fmt.Errorf("usage: <control-0>,<control-1>")
}
}
} else {
return fmt.Errorf("requires at least one control name")
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
flagValidationControl()
scanInfo.PolicyIdentifier = reporthandling.PolicyIdentifier{}
if !(cmd.Flags().Lookup("use-from").Changed) {
scanInfo.PolicyIdentifier.Name = strings.ToLower(args[0])
scanInfo.PolicyIdentifier = []reporthandling.PolicyIdentifier{}
if len(args) == 0 {
// scanInfo.SetPolicyIdentifiers(getter.NativeFrameworks, reporthandling.KindFramework)
scanInfo.ScanAll = true
} else { // expected control or list of control sepparated by ","
// Read controls from input args
scanInfo.SetPolicyIdentifiers(strings.Split(args[0], ","), reporthandling.KindControl)
if len(args) > 1 {
if len(args[1:]) == 0 || args[1] != "-" {
scanInfo.InputPatterns = args[1:]
} else { // store stdin to file - do NOT move to separate function !!
tempFile, err := os.CreateTemp(".", "tmp-kubescape*.yaml")
if err != nil {
return err
}
defer os.Remove(tempFile.Name())
if _, err := io.Copy(tempFile, os.Stdin); err != nil {
return err
}
scanInfo.InputPatterns = []string{tempFile.Name()}
}
}
}
scanInfo.FrameworkScan = false
scanInfo.PolicyIdentifier.Kind = reporthandling.KindControl
scanInfo.Init()
cautils.SetSilentMode(scanInfo.Silent)
err := clihandler.CliSetup(scanInfo)
err := clihandler.ScanCliSetup(&scanInfo)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
@@ -51,3 +101,22 @@ func flagValidationControl() {
os.Exit(1)
}
}
func setScanForFirstControl(controls []string) []reporthandling.PolicyIdentifier {
newPolicy := reporthandling.PolicyIdentifier{}
newPolicy.Kind = reporthandling.KindControl
newPolicy.Name = controls[0]
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
return scanInfo.PolicyIdentifier
}
func SetScanForGivenControls(controls []string) []reporthandling.PolicyIdentifier {
for _, control := range controls {
control := strings.TrimLeft(control, " ")
newPolicy := reporthandling.PolicyIdentifier{}
newPolicy.Kind = reporthandling.KindControl
newPolicy.Name = control
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
}
return scanInfo.PolicyIdentifier
}

View File

@@ -2,66 +2,48 @@ package cmd
import (
"fmt"
"os"
"strings"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/kubescape/clihandler"
"github.com/spf13/cobra"
)
var downloadInfo cautils.DownloadInfo
var downloadInfo = cautils.DownloadInfo{}
var downloadCmd = &cobra.Command{
Use: fmt.Sprintf("download framework/control <framework-name>/<control-name> [flags]\nSupported frameworks: %s", clihandler.ValidFrameworks),
Short: "Download framework/control",
Use: "download <policy> <policy name>",
Short: fmt.Sprintf("Download %s", strings.Join(clihandler.DownloadSupportCommands(), "/")),
Long: ``,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) != 2 {
return fmt.Errorf("requires two arguments : framework/control <framework-name>/<control-name>")
supported := strings.Join(clihandler.DownloadSupportCommands(), ",")
if len(args) < 1 {
return fmt.Errorf("policy type requeued, supported: %v", supported)
}
if !strings.EqualFold(args[0], "framework") && !strings.EqualFold(args[0], "control") {
return fmt.Errorf("invalid parameter '%s'. Supported parameters: framework, control", args[0])
if cautils.StringInSlice(clihandler.DownloadSupportCommands(), args[0]) == cautils.ValueNotFound {
return fmt.Errorf("invalid parameter '%s'. Supported parameters: %s", args[0], supported)
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
if strings.EqualFold(args[0], "framework") {
downloadInfo.FrameworkName = strings.ToLower(args[1])
g := getter.NewDownloadReleasedPolicy()
if downloadInfo.Path == "" {
downloadInfo.Path = getter.GetDefaultPath(downloadInfo.FrameworkName + ".json")
}
frameworks, err := g.GetFramework(downloadInfo.FrameworkName)
if err != nil {
return err
}
err = getter.SaveFrameworkInFile(frameworks, downloadInfo.Path)
if err != nil {
return err
}
} else if strings.EqualFold(args[0], "control") {
downloadInfo.ControlName = strings.ToLower(args[1])
g := getter.NewDownloadReleasedPolicy()
if downloadInfo.Path == "" {
downloadInfo.Path = getter.GetDefaultPath(downloadInfo.ControlName + ".json")
}
controls, err := g.GetControl(downloadInfo.ControlName)
if err != nil {
return err
}
err = getter.SaveControlInFile(controls, downloadInfo.Path)
if err != nil {
return err
}
downloadInfo.Target = args[0]
if len(args) >= 2 {
downloadInfo.Name = args[1]
}
if err := clihandler.CliDownload(&downloadInfo); err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
return nil
},
}
func init() {
// cobra.OnInitialize(initConfig)
rootCmd.AddCommand(downloadCmd)
downloadInfo = cautils.DownloadInfo{}
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If specified, will store save to `~/.kubescape/<framework name>.json`")
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If specified, will store save to `~/.kubescape/<policy name>.json`")
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
}

View File

@@ -9,76 +9,118 @@ import (
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/clihandler"
"github.com/armosec/opa-utils/reporthandling"
"github.com/spf13/cobra"
)
var frameworkCmd = &cobra.Command{
var (
frameworkExample = `
# Scan all frameworks and submit the results
kubescape scan --submit
# Scan the NSA framework
kubescape scan framework nsa
# Scan the NSA and MITRE framework
kubescape scan framework nsa,mitre
# Scan all frameworks
kubescape scan framework all
Use: fmt.Sprintf("framework <framework name> [`<glob pattern>`/`-`] [flags]\nSupported frameworks: %s", clihandler.ValidFrameworks),
Short: fmt.Sprintf("The framework you wish to use. Supported frameworks: %s", strings.Join(clihandler.SupportedFrameworks, ", ")),
Long: "Execute a scan on a running Kubernetes cluster or `yaml`/`json` files (use glob) or `-` for stdin",
ValidArgs: clihandler.SupportedFrameworks,
# Scan kubernetes YAML manifest files
kubescape scan framework nsa *.yaml
# Scan and save the results in the JSON format
kubescape scan --format json --output results.json
# Save scan results in JSON format
kubescape scan --format json --output results.json
# Display all resources
kubescape scan --verbose
Run 'kubescape list frameworks' for the list of supported frameworks
`
)
var frameworkCmd = &cobra.Command{
Use: "framework <framework names list> [`<glob pattern>`/`-`] [flags]",
Short: "The framework you wish to use. Run 'kubescape list frameworks' for the list of supported frameworks",
Example: frameworkExample,
Long: "Execute a scan on a running Kubernetes cluster or `yaml`/`json` files (use glob) or `-` for stdin",
// ValidArgs: getter.NativeFrameworks,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 && !(cmd.Flags().Lookup("use-from").Changed) {
return fmt.Errorf("requires at least one argument")
} else if len(args) > 0 {
if !isValidFramework(strings.ToLower(args[0])) {
return fmt.Errorf(fmt.Sprintf("supported frameworks: %s", strings.Join(clihandler.SupportedFrameworks, ", ")))
if len(args) > 0 {
frameworks := strings.Split(args[0], ",")
if len(frameworks) > 1 {
if frameworks[1] == "" {
return fmt.Errorf("usage: <framework-0>,<framework-1>")
}
}
} else {
return fmt.Errorf("requires at least one framework name")
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
scanInfo.PolicyIdentifier = reporthandling.PolicyIdentifier{}
scanInfo.PolicyIdentifier.Kind = reporthandling.KindFramework
flagValidationFramework()
if !(cmd.Flags().Lookup("use-from").Changed) {
scanInfo.PolicyIdentifier.Name = strings.ToLower(args[0])
}
if len(args) > 0 {
if len(args[1:]) == 0 || args[1] != "-" {
scanInfo.InputPatterns = args[1:]
} else { // store stout to file
tempFile, err := os.CreateTemp(".", "tmp-kubescape*.yaml")
if err != nil {
return err
}
defer os.Remove(tempFile.Name())
var frameworks []string
if _, err := io.Copy(tempFile, os.Stdin); err != nil {
return err
if len(args) == 0 { // scan all frameworks
// frameworks = getter.NativeFrameworks
scanInfo.ScanAll = true
} else {
// Read frameworks from input args
frameworks = strings.Split(args[0], ",")
if cautils.StringInSlice(frameworks, "all") != cautils.ValueNotFound {
scanInfo.ScanAll = true
frameworks = []string{}
}
if len(args) > 1 {
if len(args[1:]) == 0 || args[1] != "-" {
scanInfo.InputPatterns = args[1:]
} else { // store stdin to file - do NOT move to separate function !!
tempFile, err := os.CreateTemp(".", "tmp-kubescape*.yaml")
if err != nil {
return err
}
defer os.Remove(tempFile.Name())
if _, err := io.Copy(tempFile, os.Stdin); err != nil {
return err
}
scanInfo.InputPatterns = []string{tempFile.Name()}
}
scanInfo.InputPatterns = []string{tempFile.Name()}
}
}
scanInfo.FrameworkScan = true
scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
scanInfo.Init()
cautils.SetSilentMode(scanInfo.Silent)
err := clihandler.CliSetup(scanInfo)
err := clihandler.ScanCliSetup(&scanInfo)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
fmt.Fprintf(os.Stderr, "error: %v\n\n", err)
os.Exit(1)
}
return nil
},
}
func isValidFramework(framework string) bool {
return cautils.StringInSlice(clihandler.SupportedFrameworks, framework) != cautils.ValueNotFound
}
func init() {
scanCmd.AddCommand(frameworkCmd)
scanInfo = cautils.ScanInfo{}
scanInfo.FrameworkScan = true
frameworkCmd.Flags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
frameworkCmd.Flags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to Armo backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
frameworkCmd.Flags().StringVarP(&scanInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
}
func flagValidationFramework() {
// func SetScanForFirstFramework(frameworks []string) []reporthandling.PolicyIdentifier {
// newPolicy := reporthandling.PolicyIdentifier{}
// newPolicy.Kind = reporthandling.KindFramework
// newPolicy.Name = frameworks[0]
// scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
// return scanInfo.PolicyIdentifier
// }
func flagValidationFramework() {
if scanInfo.Submit && scanInfo.Local {
fmt.Println("You can use `keep-local` or `submit`, but not both")
os.Exit(1)

66
clihandler/cmd/list.go Normal file
View File

@@ -0,0 +1,66 @@
package cmd
import (
"fmt"
"os"
"strings"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/clihandler"
"github.com/spf13/cobra"
)
var (
listExample = `
# List default supported frameworks names
kubescape list frameworks
# List all supported frameworks names
kubescape list frameworks --account <account id>
# List all supported controls names
kubescape list controls
# List all supported controls id's
kubescape list controls --id
Control documentation:
https://hub.armo.cloud/docs/controls
`
)
var listPolicies = cautils.ListPolicies{}
var listCmd = &cobra.Command{
Use: "list <policy> [flags]",
Short: "List frameworks/controls will list the supported frameworks and controls",
Long: ``,
Example: listExample,
Args: func(cmd *cobra.Command, args []string) error {
supported := strings.Join(clihandler.ListSupportCommands(), ",")
if len(args) < 1 {
return fmt.Errorf("policy type requeued, supported: %s", supported)
}
if cautils.StringInSlice(clihandler.ListSupportCommands(), args[0]) == cautils.ValueNotFound {
return fmt.Errorf("invalid parameter '%s'. Supported parameters: %s", args[0], supported)
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
listPolicies.Target = args[0]
if err := clihandler.CliList(&listPolicies); err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
return nil
},
}
func init() {
// cobra.OnInitialize(initConfig)
rootCmd.AddCommand(listCmd)
listCmd.PersistentFlags().StringVarP(&listPolicies.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
listCmd.PersistentFlags().BoolVarP(&listPolicies.ListIDs, "id", "", false, "List control ID's instead of controls names")
}

53
clihandler/cmd/rbac.go Normal file
View File

@@ -0,0 +1,53 @@
package cmd
import (
"fmt"
"os"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/clihandler"
"github.com/armosec/kubescape/clihandler/cliinterfaces"
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
"github.com/armosec/rbac-utils/rbacscanner"
"github.com/spf13/cobra"
)
// rabcCmd represents the RBAC command
var rabcCmd = &cobra.Command{
Use: "rbac \nExample:\n$ kubescape submit rbac",
Short: "Submit cluster's Role-Based Access Control(RBAC)",
Long: ``,
RunE: func(cmd *cobra.Command, args []string) error {
k8s := k8sinterface.NewKubernetesApi()
// get config
clusterConfig, err := getSubmittedClusterConfig(k8s)
if err != nil {
return err
}
// list RBAC
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetCustomerGUID(), clusterConfig.GetClusterName()))
// submit resources
r := reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
submitInterfaces := cliinterfaces.SubmitInterfaces{
ClusterConfig: clusterConfig,
SubmitObjects: rbacObjects,
Reporter: r,
}
if err := clihandler.Submit(submitInterfaces); err != nil {
fmt.Println(err)
os.Exit(1)
}
return nil
},
}
func init() {
submitCmd.AddCommand(rabcCmd)
}

107
clihandler/cmd/results.go Normal file
View File

@@ -0,0 +1,107 @@
package cmd
import (
"encoding/json"
"fmt"
"os"
"time"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/clihandler"
"github.com/armosec/kubescape/clihandler/cliinterfaces"
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
"github.com/armosec/opa-utils/reporthandling"
uuid "github.com/satori/go.uuid"
"github.com/spf13/cobra"
)
type ResultsObject struct {
filePath string
customerGUID string
clusterName string
}
func NewResultsObject(customerGUID, clusterName, filePath string) *ResultsObject {
return &ResultsObject{
filePath: filePath,
customerGUID: customerGUID,
clusterName: clusterName,
}
}
func (resultsObject *ResultsObject) SetResourcesReport() (*reporthandling.PostureReport, error) {
// load framework results from json file
frameworkReports, err := loadResultsFromFile(resultsObject.filePath)
if err != nil {
return nil, err
}
return &reporthandling.PostureReport{
FrameworkReports: frameworkReports,
ReportID: uuid.NewV4().String(),
ReportGenerationTime: time.Now().UTC(),
CustomerGUID: resultsObject.customerGUID,
ClusterName: resultsObject.clusterName,
}, nil
}
func (resultsObject *ResultsObject) ListAllResources() (map[string]workloadinterface.IMetadata, error) {
return map[string]workloadinterface.IMetadata{}, nil
}
var resultsCmd = &cobra.Command{
Use: "results <json file>\nExample:\n$ kubescape submit results path/to/results.json",
Short: "Submit a pre scanned results file. The file must be in json format",
Long: ``,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return fmt.Errorf("missing results file")
}
k8s := k8sinterface.NewKubernetesApi()
// get config
clusterConfig, err := getSubmittedClusterConfig(k8s)
if err != nil {
return err
}
resultsObjects := NewResultsObject(clusterConfig.GetCustomerGUID(), clusterConfig.GetClusterName(), args[0])
// submit resources
r := reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
submitInterfaces := cliinterfaces.SubmitInterfaces{
ClusterConfig: clusterConfig,
SubmitObjects: resultsObjects,
Reporter: r,
}
if err := clihandler.Submit(submitInterfaces); err != nil {
fmt.Println(err)
os.Exit(1)
}
return nil
},
}
func init() {
submitCmd.AddCommand(resultsCmd)
}
func loadResultsFromFile(filePath string) ([]reporthandling.FrameworkReport, error) {
frameworkReports := []reporthandling.FrameworkReport{}
f, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
if err = json.Unmarshal(f, &frameworkReports); err != nil {
frameworkReport := reporthandling.FrameworkReport{}
if err = json.Unmarshal(f, &frameworkReport); err != nil {
return frameworkReports, err
}
frameworkReports = append(frameworkReports, frameworkReport)
}
return frameworkReports, nil
}

View File

@@ -31,17 +31,13 @@ func Execute() {
}
func init() {
rootCmd.PersistentFlags().StringVarP(&scanInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
flag.CommandLine.StringVar(&armoBEURLs, "environment", "", envFlagUsage)
rootCmd.PersistentFlags().StringVar(&armoBEURLs, "environment", "", envFlagUsage)
rootCmd.PersistentFlags().MarkHidden("environment")
cobra.OnInitialize(initConfig)
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
}
func InitArmoBEConnector() {
urlSlices := strings.Split(armoBEURLs, ",")
if len(urlSlices) > 3 {

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"strings"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils"
"github.com/spf13/cobra"
)
@@ -16,26 +17,47 @@ var scanCmd = &cobra.Command{
Short: "Scan the current running cluster or yaml files",
Long: `The action you want to perform`,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return fmt.Errorf("requires one argument: framework/control")
}
if !strings.EqualFold(args[0], "framework") && !strings.EqualFold(args[0], "control") {
return fmt.Errorf("invalid parameter '%s'. Supported parameters: framework, control", args[0])
if len(args) > 0 {
if !strings.EqualFold(args[0], "framework") && !strings.EqualFold(args[0], "control") {
return fmt.Errorf("invalid parameter '%s'. Supported parameters: framework, control", args[0])
}
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
scanInfo.ScanAll = true
// frameworks := getter.NativeFrameworks
// frameworkArgs := []string{strings.Join(frameworks, ",")}
frameworkCmd.RunE(cmd, []string{"all"})
}
},
}
func init() {
rootCmd.AddCommand(scanCmd)
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system, kube-public")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer"/"json"/"junit"`)
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Silent, "silent", "s", false, "Silent progress messages")
scanCmd.PersistentFlags().Uint16VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 0, "Failure threshold is the percent bellow which the command fails and returns exit code 1")
scanCmd.PersistentFlags().StringVar(&scanInfo.UseFrom, "use-from", "", "Load local framework object from specified path. If not used will download latest")
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local framework object from default path. If not used will download latest")
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from Armo management portal")
func frameworkInitConfig() {
k8sinterface.SetClusterContextName(scanInfo.ClusterName)
}
func init() {
cobra.OnInitialize(frameworkInitConfig)
rootCmd.AddCommand(scanCmd)
rootCmd.PersistentFlags().StringVarP(&scanInfo.ClusterName, "cluster", "", "", "Cluster name. Default will use the current-context")
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system,kube-public")
scanCmd.PersistentFlags().Uint16VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer"/"json"/"junit"/"prometheus"`)
scanCmd.PersistentFlags().StringVar(&scanInfo.IncludeNamespaces, "include-namespaces", "", "scan specific namespaces. e.g: --include-namespaces ns-a,ns-b")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to Armo backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
scanCmd.PersistentFlags().BoolVar(&scanInfo.VerboseMode, "verbose", false, "Display all of the input resources and not only failed resources")
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Silent, "silent", "s", false, "Silent progress messages")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensor, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valueable data from cluster nodes for certain controls")
hostF.NoOptDefVal = "true"
hostF.DefValue = "false, for no TTY in stdin"
}

31
clihandler/cmd/submit.go Normal file
View File

@@ -0,0 +1,31 @@
package cmd
import (
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/getter"
"github.com/spf13/cobra"
)
var submitCmd = &cobra.Command{
Use: "submit <command>",
Short: "Submit an object to the Kubescape SaaS version",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
},
}
func init() {
rootCmd.AddCommand(submitCmd)
}
func getSubmittedClusterConfig(k8s *k8sinterface.KubernetesApi) (*cautils.ClusterConfig, error) {
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), scanInfo.Account, scanInfo.ClusterName) // TODO - support none cluster env submit
if clusterConfig.GetCustomerGUID() != "" {
if err := clusterConfig.SetTenant(); err != nil {
return clusterConfig, err
}
}
return clusterConfig, nil
}

View File

@@ -1,49 +1,24 @@
package cmd
import (
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/armosec/kubescape/cautils"
"github.com/spf13/cobra"
)
var BuildNumber string
var versionCmd = &cobra.Command{
Use: "version",
Short: "Get current version",
Long: ``,
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println("Your current version is: " + BuildNumber)
v := cautils.NewIVersionCheckHandler()
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, "", "", "version"))
fmt.Println("Your current version is: " + cautils.BuildNumber)
return nil
},
}
func GetLatestVersion() (string, error) {
latestVersion := "https://api.github.com/repos/armosec/kubescape/releases/latest"
resp, err := http.Get(latestVersion)
if err != nil {
return "unknown", fmt.Errorf("failed to get latest releases from '%s', reason: %s", latestVersion, err.Error())
}
defer resp.Body.Close()
if resp.StatusCode < 200 || 301 < resp.StatusCode {
return "unknown", fmt.Errorf("failed to download file, status code: %s", resp.Status)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return "unknown", fmt.Errorf("failed to read response body from '%s', reason: %s", latestVersion, err.Error())
}
var data map[string]interface{}
err = json.Unmarshal(body, &data)
if err != nil {
return "unknown", fmt.Errorf("failed to unmarshal response body from '%s', reason: %s", latestVersion, err.Error())
}
return fmt.Sprintf("%v", data["tag_name"]), nil
}
func init() {
rootCmd.AddCommand(versionCmd)
}

View File

@@ -2,48 +2,132 @@ package clihandler
import (
"fmt"
"io/fs"
"os"
"strings"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/resultshandling/printer"
printerv1 "github.com/armosec/kubescape/resultshandling/printer/v1"
// printerv2 "github.com/armosec/kubescape/resultshandling/printer/v2"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/kubescape/clihandler/cliinterfaces"
"github.com/armosec/kubescape/hostsensorutils"
"github.com/armosec/kubescape/opaprocessor"
"github.com/armosec/kubescape/policyhandler"
"github.com/armosec/kubescape/resourcehandler"
"github.com/armosec/kubescape/resultshandling"
"github.com/armosec/kubescape/resultshandling/printer"
"github.com/armosec/kubescape/resultshandling/reporter"
"github.com/armosec/opa-utils/reporthandling"
"github.com/mattn/go-isatty"
)
type CLIHandler struct {
policyHandler *policyhandler.PolicyHandler
scanInfo *cautils.ScanInfo
type componentInterfaces struct {
tenantConfig cautils.ITenantConfig
resourceHandler resourcehandler.IResourceHandler
report reporter.IReport
printerHandler printer.IPrinter
hostSensorHandler hostsensorutils.IHostSensor
}
var SupportedFrameworks = []string{"nsa", "mitre"}
var ValidFrameworks = strings.Join(SupportedFrameworks, ", ")
func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
func CliSetup(scanInfo cautils.ScanInfo) error {
var k8s *k8sinterface.KubernetesApi
if scanInfo.GetScanningEnvironment() == cautils.ScanCluster {
k8s = getKubernetesApi()
if k8s == nil {
fmt.Println("Failed connecting to Kubernetes cluster")
os.Exit(1)
}
}
clusterConfig, k8s := scanInfo.SetClusterConfig()
tenantConfig := getTenantConfig(scanInfo.Account, scanInfo.ClusterName, k8s)
// Set submit behavior AFTER loading tenant config
setSubmitBehavior(scanInfo, tenantConfig)
hostSensorHandler := getHostSensorHandler(scanInfo, k8s)
if err := hostSensorHandler.Init(); err != nil {
errMsg := "failed to init host sensor"
if scanInfo.VerboseMode {
errMsg = fmt.Sprintf("%s: %v", errMsg, err)
}
cautils.ErrorDisplay(errMsg)
hostSensorHandler = &hostsensorutils.HostSensorHandlerMock{}
}
// excluding hostsensor namespace
if len(scanInfo.IncludeNamespaces) == 0 && hostSensorHandler.GetNamespace() != "" {
scanInfo.ExcludedNamespaces = fmt.Sprintf("%s,%s", scanInfo.ExcludedNamespaces, hostSensorHandler.GetNamespace())
}
resourceHandler := getResourceHandler(scanInfo, tenantConfig, k8s, hostSensorHandler)
// reporting behavior - setup reporter
reportHandler := getReporter(tenantConfig, scanInfo.Submit)
v := cautils.NewIVersionCheckHandler()
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanInfo.GetScanningEnvironment()))
// setup printer
printerHandler := printerv1.GetPrinter(scanInfo.Format, scanInfo.VerboseMode)
// printerHandler = printerv2.GetPrinter(scanInfo.Format, scanInfo.VerboseMode)
printerHandler.SetWriter(scanInfo.Output)
return componentInterfaces{
tenantConfig: tenantConfig,
resourceHandler: resourceHandler,
report: reportHandler,
printerHandler: printerHandler,
hostSensorHandler: hostSensorHandler,
}
}
func ScanCliSetup(scanInfo *cautils.ScanInfo) error {
cautils.ScanStartDisplay()
interfaces := getInterfaces(scanInfo)
// setPolicyGetter(scanInfo, interfaces.clusterConfig.GetCustomerGUID())
processNotification := make(chan *cautils.OPASessionObj)
reportResults := make(chan *cautils.OPASessionObj)
// policy handler setup
policyHandler := policyhandler.NewPolicyHandler(&processNotification, k8s)
cautils.ClusterName = interfaces.tenantConfig.GetClusterName() // TODO - Deprecated
cautils.CustomerGUID = interfaces.tenantConfig.GetCustomerGUID() // TODO - Deprecated
interfaces.report.SetClusterName(interfaces.tenantConfig.GetClusterName())
interfaces.report.SetCustomerGUID(interfaces.tenantConfig.GetCustomerGUID())
if err := clusterConfig.SetConfig(scanInfo.Account); err != nil {
fmt.Println(err)
downloadReleasedPolicy := getter.NewDownloadReleasedPolicy() // download config inputs from github release
// set policy getter only after setting the customerGUID
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetCustomerGUID(), scanInfo.FrameworkScan, downloadReleasedPolicy)
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetCustomerGUID(), downloadReleasedPolicy)
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions)
// TODO - list supported frameworks/controls
if scanInfo.ScanAll {
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), reporthandling.KindFramework)
}
cautils.ClusterName = clusterConfig.GetClusterName()
cautils.CustomerGUID = clusterConfig.GetCustomerGUID()
//
defer func() {
if err := interfaces.hostSensorHandler.TearDown(); err != nil {
errMsg := "failed to tear down host sensor"
if scanInfo.VerboseMode {
errMsg = fmt.Sprintf("%s: %v", errMsg, err)
}
cautils.ErrorDisplay(errMsg)
}
}()
// cli handler setup
go func() {
cli := NewCLIHandler(policyHandler, scanInfo)
if err := cli.Scan(); err != nil {
// policy handler setup
policyHandler := policyhandler.NewPolicyHandler(&processNotification, interfaces.resourceHandler)
if err := Scan(policyHandler, scanInfo); err != nil {
fmt.Println(err)
os.Exit(1)
}
@@ -55,41 +139,28 @@ func CliSetup(scanInfo cautils.ScanInfo) error {
opaprocessorObj.ProcessRulesListenner()
}()
resultsHandling := resultshandling.NewResultsHandler(&reportResults, reporter.NewReportEventReceiver(), printer.NewPrinter(scanInfo.Format, scanInfo.Output))
resultsHandling := resultshandling.NewResultsHandler(&reportResults, interfaces.report, interfaces.printerHandler)
score := resultsHandling.HandleResults(scanInfo)
// print report url
if scanInfo.FrameworkScan {
clusterConfig.GenerateURL()
}
interfaces.report.DisplayReportURL()
adjustedFailThreshold := float32(scanInfo.FailThreshold) / 100
if score < adjustedFailThreshold {
return fmt.Errorf("Scan score is bellow threshold")
if score > float32(scanInfo.FailThreshold) {
return fmt.Errorf("scan risk-score %.2f is above permitted threshold %d", score, scanInfo.FailThreshold)
}
return nil
}
func NewCLIHandler(policyHandler *policyhandler.PolicyHandler, scanInfo cautils.ScanInfo) *CLIHandler {
return &CLIHandler{
scanInfo: &scanInfo,
policyHandler: policyHandler,
}
}
func (clihandler *CLIHandler) Scan() error {
cautils.ScanStartDisplay()
func Scan(policyHandler *policyhandler.PolicyHandler, scanInfo *cautils.ScanInfo) error {
policyNotification := &reporthandling.PolicyNotification{
NotificationType: reporthandling.TypeExecPostureScan,
Rules: []reporthandling.PolicyIdentifier{
clihandler.scanInfo.PolicyIdentifier,
},
Designators: armotypes.PortalDesignator{},
Rules: scanInfo.PolicyIdentifier,
Designators: armotypes.PortalDesignator{},
}
switch policyNotification.NotificationType {
case reporthandling.TypeExecPostureScan:
if err := clihandler.policyHandler.HandleNotificationRequest(policyNotification, clihandler.scanInfo); err != nil {
if err := policyHandler.HandleNotificationRequest(policyNotification, scanInfo); err != nil {
return err
}
@@ -98,3 +169,46 @@ func (clihandler *CLIHandler) Scan() error {
}
return nil
}
func Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error {
// list resources
postureReport, err := submitInterfaces.SubmitObjects.SetResourcesReport()
if err != nil {
return err
}
allresources, err := submitInterfaces.SubmitObjects.ListAllResources()
if err != nil {
return err
}
// report
if err := submitInterfaces.Reporter.ActionSendReport(&cautils.OPASessionObj{PostureReport: postureReport, AllResources: allresources}); err != nil {
return err
}
fmt.Printf("\nData has been submitted successfully")
submitInterfaces.Reporter.DisplayReportURL()
return nil
}
func askUserForHostSensor() bool {
return false
if !isatty.IsTerminal(os.Stdin.Fd()) {
return false
}
if ssss, err := os.Stdin.Stat(); err == nil {
// fmt.Printf("Found stdin type: %s\n", ssss.Mode().Type())
if ssss.Mode().Type()&(fs.ModeDevice|fs.ModeCharDevice) > 0 { //has TTY
fmt.Printf("Would you like to scan K8s nodes? [y/N]. This is required to collect valuable data for certain controls\n")
fmt.Printf("Use --enable-host-scan flag to suppress this message\n")
var b []byte = make([]byte, 1)
if n, err := os.Stdin.Read(b); err == nil {
if n > 0 && len(b) > 0 && (b[0] == 'y' || b[0] == 'Y') {
return true
}
}
}
}
return false
}

219
clihandler/initcliutils.go Normal file
View File

@@ -0,0 +1,219 @@
package clihandler
import (
"fmt"
"os"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/kubescape/hostsensorutils"
"github.com/armosec/kubescape/resourcehandler"
"github.com/armosec/kubescape/resultshandling/reporter"
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/rbac-utils/rbacscanner"
// reporterv2 "github.com/armosec/kubescape/resultshandling/reporter/v2"
)
// getKubernetesApi
func getKubernetesApi() *k8sinterface.KubernetesApi {
if !k8sinterface.IsConnectedToCluster() {
return nil
}
return k8sinterface.NewKubernetesApi()
}
func getTenantConfig(Account, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), Account, clusterName)
}
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), Account, clusterName)
}
func getExceptionsGetter(useExceptions string) getter.IExceptionsGetter {
if useExceptions != "" {
// load exceptions from file
return getter.NewLoadPolicy([]string{useExceptions})
} else {
return getter.GetArmoAPIConnector()
}
}
func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, submit bool) *cautils.RBACObjects {
if submit {
return cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, tenantConfig.GetCustomerGUID(), tenantConfig.GetClusterName()))
}
return nil
}
func getReporter(tenantConfig cautils.ITenantConfig, submit bool) reporter.IReport {
if submit {
return reporterv1.NewReportEventReceiver(tenantConfig.GetConfigObj())
}
return reporterv1.NewReportMock()
}
func getResourceHandler(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor) resourcehandler.IResourceHandler {
if len(scanInfo.InputPatterns) > 0 || k8s == nil {
return resourcehandler.NewFileResourceHandler(scanInfo.InputPatterns)
}
rbacObjects := getRBACHandler(tenantConfig, k8s, scanInfo.Submit)
return resourcehandler.NewK8sResourceHandler(k8s, getFieldSelector(scanInfo), hostSensorHandler, rbacObjects)
}
func getHostSensorHandler(scanInfo *cautils.ScanInfo, k8s *k8sinterface.KubernetesApi) hostsensorutils.IHostSensor {
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
return &hostsensorutils.HostSensorHandlerMock{}
}
hasHostSensorControls := true
// we need to determined which controls needs host sensor
if scanInfo.HostSensor.Get() == nil && hasHostSensorControls {
scanInfo.HostSensor.SetBool(askUserForHostSensor())
cautils.WarningDisplay(os.Stderr, "Warning: Kubernetes cluster nodes scanning is disabled. This is required to collect valuable data for certain controls. You can enable it using the --enable-host-scan flag\n")
}
if hostSensorVal := scanInfo.HostSensor.Get(); hostSensorVal != nil && *hostSensorVal {
hostSensorHandler, err := hostsensorutils.NewHostSensorHandler(k8s)
if err != nil {
cautils.WarningDisplay(os.Stderr, fmt.Sprintf("Warning: failed to create host sensor: %v\n", err.Error()))
return &hostsensorutils.HostSensorHandlerMock{}
}
return hostSensorHandler
}
return &hostsensorutils.HostSensorHandlerMock{}
}
func getFieldSelector(scanInfo *cautils.ScanInfo) resourcehandler.IFieldSelector {
if scanInfo.IncludeNamespaces != "" {
return resourcehandler.NewIncludeSelector(scanInfo.IncludeNamespaces)
}
if scanInfo.ExcludedNamespaces != "" {
return resourcehandler.NewExcludeSelector(scanInfo.ExcludedNamespaces)
}
return &resourcehandler.EmptySelector{}
}
func policyIdentifierNames(pi []reporthandling.PolicyIdentifier) string {
policiesNames := ""
for i := range pi {
policiesNames += pi[i].Name
if i+1 < len(pi) {
policiesNames += ","
}
}
if policiesNames == "" {
policiesNames = "all"
}
return policiesNames
}
// setSubmitBehavior - Setup the desired cluster behavior regarding submittion to the Armo BE
func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig) {
/*
If "First run (local config not found)" -
Default/keep-local - Do not send report
Submit - Create tenant & Submit report
If "Submitted" -
keep-local - Do not send report
Default/Submit - Submit report
*/
// do not submit control scanning
if !scanInfo.FrameworkScan {
scanInfo.Submit = false
return
}
if tenantConfig.IsConfigFound() { // config found in cache (submitted)
if !scanInfo.Local {
// Submit report
scanInfo.Submit = true
}
} else { // config not found in cache (not submitted)
if scanInfo.Submit {
// submit - Create tenant & Submit report
if err := tenantConfig.SetTenant(); err != nil {
fmt.Println(err)
}
}
}
}
// setPolicyGetter set the policy getter - local file/github release/ArmoAPI
func getPolicyGetter(loadPoliciesFromFile []string, accountID string, frameworkScope bool, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
if len(loadPoliciesFromFile) > 0 {
return getter.NewLoadPolicy(loadPoliciesFromFile)
}
if accountID != "" && frameworkScope {
g := getter.GetArmoAPIConnector() // download policy from ARMO backend
g.SetCustomerGUID(accountID)
return g
}
if downloadReleasedPolicy == nil {
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
}
return getDownloadReleasedPolicy(downloadReleasedPolicy)
}
// func setGetArmoAPIConnector(scanInfo *cautils.ScanInfo, customerGUID string) {
// g := getter.GetArmoAPIConnector() // download policy from ARMO backend
// g.SetCustomerGUID(customerGUID)
// scanInfo.PolicyGetter = g
// if scanInfo.ScanAll {
// frameworks, err := g.ListCustomFrameworks(customerGUID)
// if err != nil {
// glog.Error("failed to get custom frameworks") // handle error
// return
// }
// scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
// }
// }
// setConfigInputsGetter sets the config input getter - local file/github release/ArmoAPI
func getConfigInputsGetter(ControlsInputs string, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IControlsInputsGetter {
if len(ControlsInputs) > 0 {
return getter.NewLoadPolicy([]string{ControlsInputs})
}
if accountID != "" {
g := getter.GetArmoAPIConnector() // download config from ARMO backend
g.SetCustomerGUID(accountID)
return g
}
if downloadReleasedPolicy == nil {
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
}
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull config inputs, fallback to BE
cautils.WarningDisplay(os.Stderr, "Warning: failed to get config inputs from github release, this may affect the scanning results\n")
}
return downloadReleasedPolicy
}
func getDownloadReleasedPolicy(downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull policy, fallback to cache
cautils.WarningDisplay(os.Stderr, "Warning: failed to get policies from github release, loading policies from cache\n")
return getter.NewLoadPolicy(getDefaultFrameworksPaths())
} else {
return downloadReleasedPolicy
}
}
func getDefaultFrameworksPaths() []string {
fwPaths := []string{}
for i := range getter.NativeFrameworks {
fwPaths = append(fwPaths, getter.GetDefaultPath(getter.NativeFrameworks[i]))
}
return fwPaths
}
func listFrameworksNames(policyGetter getter.IPolicyGetter) []string {
fw, err := policyGetter.ListFrameworks()
if err != nil {
fw = getDefaultFrameworksPaths()
}
return fw
}

View File

@@ -0,0 +1,117 @@
# Container image vulnerabilty adaptor interface proposal
## Rationale
source #287
### Big picture
* Kubescape team planning to create controls which take into account image vulnerabilities, example: looking for public internet facing workloads with critical vulnerabilities. These are seriously effecting the security health of a cluster and therefore we think it is important to cover it. We think that most container registries are/will support image scanning like Harbor and therefore the ability to get information from them is important.
* There are information in the image repository which is important for existing controls as well. They are incomplete without it, example see this issue: Non-root containers check is broken #19 . These are not necessarily image vulnerability related. Can be information in the image manifest (like the issue before), but it can be the image BOM related.
### Relation to this proposal
There are multiple changes and design decisions needs to be made before Kubescape will support the before outlined controls. However, a focal point the whole picutre is the ability to access vulnerabilty databases of container images. We anticiapte that most container image repositories will support image vulnerabilty scanning, some major players are already do. Since there is no a single API available which all of these data sources support it is important to create an adaption layer within Kubescape so different datasources can serve Kubescape's goals.
## High level design of Kubescape
### Layers
* Controls and Rules: that actual control logic implementation, the "tests" themselves. Implemented in rego
* OPA engine: the [OPA](https://github.com/open-policy-agent/opa) rego interpreter
* Rules processor: Kubescape component, it enumerates and runs the controls while also preparing the all the input data that the controls need for running
* Data sources: set of different modules providing data to the Rules processor so it can run the controls with them. Examples: Kubernetes objects, cloud vendor API objects and adding in this proposal the vulnerability infomration
* Cloud Image Vulnerability adaption interface: the subject of this proposal, it gives a common interface for different registry/vulnerabilty vendors to adapt to.
* CIV adaptors: specific implementation of the CIV interface, example Harbor adaption
```
-----------------------
| Controls/Rules (rego) |
-----------------------
|
-----------------------
| OPA engine |
-----------------------
|
-----------------------
| Rules processor |
-----------------------
|
-----------------------
| Data sources |
-----------------------
|
=======================
| CIV adaption interface| <- Adding this layer in this proposal
=======================
|
-----------------------
| Specific CIV adaptors | <- will be implemented based on this proposal
-----------------------
```
## Functionalities to cover
The interface needs to cover the following functionalities:
* Authentication against the information source (abstracted login)
* Triggering image scan (if applicable, the source might store vulnerabilities for images but cannot scan alone)
* Reading image scan status (with last scan date and etc.)
* Getting vulnerability information for a given image
* Getting image information
* Image manifests
* Image BOMs (bill of material)
## Go API proposal
```
/*type ContainerImageRegistryCredentials struct {
map[string]string
Password string
Tag string
Hash string
}*/
type ContainerImageIdentifier struct {
Registry string
Repository string
Tag string
Hash string
}
type ContainerImageScanStatus struct {
ImageID ContainerImageIdentifier
IsScanAvailable bool
IsBomAvailable bool
LastScanDate time.Time
}
type ContainerImageVulnerability struct {
ImageID ContainerImageIdentifier
// TBD
}
type ContainerImageInformation struct {
ImageID ContainerImageIdentifier
Bom []string
ImageManifest Manifest // will use here Docker package definition
}
type IContainerImageVulnerabilityAdaptor interface {
// Credentials are coming from user input (CLI or configuration file) and they are abstracted at string to string map level
// so and example use would be like registry: "simpledockerregistry:80" and credentials like {"username":"joedoe","password":"abcd1234"}
Login(registry string, credentials map[string]string) error
// For "help" purposes
DescribeAdaptor() string
GetImagesScanStatus(imageIDs []ContainerImageIdentifier) ([]ContainerImageScanStatus, error)
GetImagesVulnerabilties(imageIDs []ContainerImageIdentifier) ([]ContainerImageVulnerability, error)
GetImagesInformation(imageIDs []ContainerImageIdentifier) ([]ContainerImageInformation, error)
}
```

View File

@@ -13,7 +13,7 @@ kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
| --- | --- | --- | --- |
| `-e`/`--exclude-namespaces` | Scan all namespaces | Namespaces to exclude from scanning. Recommended to exclude `kube-system` and `kube-public` namespaces |
| `-s`/`--silent` | Display progress messages | Silent progress messages |
| `-t`/`--fail-threshold` | `0` (do not fail) | fail command (return exit code 1) if result bellow threshold| `0` -> `100` |
| `-t`/`--fail-threshold` | `0` (do not fail) | fail command (return exit code 1) if result is below threshold| `0` -> `100` |
| `-f`/`--format` | `pretty-printer` | Output format | `pretty-printer`/`json`/`junit` |
| `-o`/`--output` | print to stdout | Save scan result in file |
| `--use-from` | | Load local framework object from specified path. If not used will download latest |
@@ -25,7 +25,7 @@ kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
### Examples
* Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/News-Features/Feature-Stories/Article-View/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework
* Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
```
@@ -85,5 +85,3 @@ kubescape scan framework nsa --use-from nsa.json
```
Kubescape is an open source project, we welcome your feedback and ideas for improvement. Were also aiming to collaborate with the Kubernetes community to help make the tests themselves more robust and complete as Kubernetes develops.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 62 KiB

After

Width:  |  Height:  |  Size: 60 KiB

View File

@@ -0,0 +1,85 @@
# Periodically Kubescape Scanning
You can scan your cluster periodically by adding a `CronJob` that will repeatedly trigger kubescape
* Setup [scanning & submitting](#scanning-and-submitting)
* Setup [scanning without submitting](#scanning-without-submitting)
## Scanning And Submitting
If you wish to periodically scan and submit the result to the [Kubescape SaaS version](https://portal.armo.cloud/) where you can benefit the features the SaaS version provides, please follow this instructions ->
1. Apply kubescape namespace
```
kubectl apply ks-namespace.yaml
```
2. Apply serviceAccount and roles
```
kubectl apply ks-serviceAccount.yaml
```
3. Setup and apply configMap
Before you apply the configMap you need to set the account ID and cluster name in the `ks-configMap.yaml` file.
* Set cluster name:
Run `kubectl config current-context` and set the result in the `data.clusterName` field
* Set account ID:
1. Navigate to the [Kubescape SaaS version](https://portal.armo.cloud/) and login/sign up for free
2. Click the `Add Cluster` button on the top right of the page
</br>
<img src="screenshots/add-cluster.png" alt="add-cluster">
3. Copy the value of `--account` and set it in the `data.customerGUID` field
</br>
<img src="screenshots/account.png" alt="account">
Make sure the configMap looks as following;
```
kind: ConfigMap
apiVersion: v1
metadata:
name: kubescape
labels:
app: kubescape
namespace: kubescape
data:
config.json: |
{
"customerGUID": "XXXXXXXX-XXXX-XXXX-XXXXXXXXXXXX",
"clusterName": "my-awesome-cluster-name"
}
```
Finally, apply the configMap
```
kubectl apply ks-configMap.yaml
```
4. Apply CronJob
Before you apply the cronJob, make sure the scanning frequency suites your needs
```
kubectl apply ks-cronJob-submit.yaml
```
## Scanning Without Submitting
If you wish to periodically scan but not submit the scan results, follow this instructions ->
1. Apply kubescape namespace
```
kubectl apply ks-namespace.yaml
```
2. Apply serviceAccount and roles
```
kubectl apply ks-serviceAccount.yaml
```
3. Apply CronJob
Before you apply the cronJob, make sure the scanning frequency suites your needs
```
kubectl apply ks-cronJob-non-submit.yaml
```

View File

@@ -0,0 +1,14 @@
# ------------------- Kubescape User/Customer ID ------------------- #
kind: ConfigMap
apiVersion: v1
metadata:
name: kubescape
labels:
app: kubescape
namespace: kubescape
data:
config.json: |
{
"customerGUID": "<ID>",
"clusterName": "<cluster name>"
}

View File

@@ -0,0 +1,32 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: kubescape
labels:
app: kubescape
namespace: kubescape
spec:
# ┌────────────────── timezone (optional)
# | ┌───────────── minute (0 - 59)
# | │ ┌───────────── hour (0 - 23)
# | │ │ ┌───────────── day of the month (1 - 31)
# | │ │ │ ┌───────────── month (1 - 12)
# | │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday;
# | │ │ │ │ │ 7 is also Sunday on some systems)
# | │ │ │ │ │
# | │ │ │ │ │
# CRON_TZ=UTC * * * * *
schedule: "0 0 1 * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: kubescape
image: quay.io/armosec/kubescape:latest
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c"]
args:
- kubescape scan framework nsa
restartPolicy: OnFailure
serviceAccountName: kubescape-discovery

View File

@@ -0,0 +1,40 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: kubescape
labels:
app: kubescape
namespace: kubescape
spec:
# ┌────────────────── timezone (optional)
# | ┌───────────── minute (0 - 59)
# | │ ┌───────────── hour (0 - 23)
# | │ │ ┌───────────── day of the month (1 - 31)
# | │ │ │ ┌───────────── month (1 - 12)
# | │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday;
# | │ │ │ │ │ 7 is also Sunday on some systems)
# | │ │ │ │ │
# | │ │ │ │ │
# CRON_TZ=UTC * * * * *
schedule: "0 0 1 * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: kubescape
image: quay.io/armosec/kubescape:latest
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c"]
args:
- kubescape scan framework nsa --submit
volumeMounts:
- name: kubescape-config-volume
mountPath: /root/.kubescape/config.json
subPath: config.json
restartPolicy: OnFailure
serviceAccountName: kubescape-discovery
volumes:
- name: kubescape-config-volume
configMap:
name: kubescape

View File

@@ -0,0 +1,7 @@
# ------------------- Kubescape User/Customer ID ------------------- #
kind: Namespace
apiVersion: v1
metadata:
name: kubescape
labels:
app: kubescape

View File

@@ -0,0 +1,61 @@
---
# ------------------- Kubescape Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: kubescape
name: kubescape-discovery
namespace: kubescape
---
# ------------------- Kubescape Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubescape-discovery-role
namespace: kubescape
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["get", "list", "describe"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubescape-discovery-binding
namespace: kubescape
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubescape-discovery-role
subjects:
- kind: ServiceAccount
name: kubescape-discovery
---
# ------------------- Kubescape Cluster Role & Cluster Role Binding ------------------- #
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubescape-discovery-clusterroles
# "namespace" omitted since ClusterRoles are not namespaced
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["get", "list", "describe"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubescape-discovery-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubescape-discovery-clusterroles
subjects:
- kind: ServiceAccount
name: kubescape-discovery
namespace: kubescape

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

View File

@@ -1,3 +1,10 @@
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# This file is DEPRECATE, please navigate to the official docs ->
# https://github.com/armosec/kubescape/tree/master/examples/cronJob-support/README.md
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
---
# ------------------- Kubescape Service Account ------------------- #
apiVersion: v1

View File

@@ -0,0 +1,179 @@
# Kubescape Exceptions
Kubescape Exceptions is the proper way of excluding failed resources from effecting the risk score.
e.g. When a `kube-system` resource fails and it is ok, simply add the resource to the exceptions configurations.
## Definitions
* `name`- Exception name - unique name representing the exception
* `policyType`- Do not change
* `actions`- List of available actions. Currently alertOnly is supported
* `resources`- List of resources to apply this exception on
* `designatorType: Attributes`- An attribute-based declaration {key: value}
Supported keys:
* `name`: k8s resource name (case-sensitive, regex supported)
* `kind`: k8s resource kind (case-sensitive, regex supported)
* `namespace`: k8s resource namespace (case-sensitive, regex supported)
* `cluster`: k8s cluster name (usually it is the `current-context`) (case-sensitive, regex supported)
* resource labels as key value (case-sensitive, regex NOT supported)
* `posturePolicies`- An attribute-based declaration {key: value}
* `frameworkName` - Framework names can be find [here](https://github.com/armosec/regolibrary/tree/master/frameworks)
* `controlName` - Control names can be find [here](https://github.com/armosec/regolibrary/tree/master/controls)
* `controlID` - Not yet supported
* `ruleName` - Rule names can be find [here](https://github.com/armosec/regolibrary/tree/master/rules)
## Usage
The `resources` list and `posturePolicies` list are design to be a combination of the resources and policies to exclude
> You must declare at least one resource and one policy
e.g. If you wish to exclude all namespaces with the label `"environment": "dev"`, the resource list should look as following:
```
"resources": [
{
"designatorType": "Attributes",
"attributes": {
"namespace": ".*",
"environment": "dev"
}
}
]
```
But if you wish to exclude all namespaces **OR** any resource with the label `"environment": "dev"`, the resource list should look as following:
```
"resources": [
{
"designatorType": "Attributes",
"attributes": {
"namespace": ".*"
}
},
{
"designatorType": "Attributes",
"attributes": {
"environment": "dev"
}
}
]
```
Same works with the `posturePolicies` list ->
e.g. If you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **AND** failed the `Allowed hostPath` control, the `posturePolicies` list should look as following:
```
"posturePolicies": [
{
"frameworkName": "NSA",
"controlName": "Allowed hostPath"
}
]
```
But if you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **OR** failed the `Allowed hostPath` control, the `posturePolicies` list should look as following:
```
"posturePolicies": [
{
"frameworkName": "NSA"
},
{
"controlName": "Allowed hostPath"
}
]
```
## Examples
Here are some examples demonstrating the different ways the exceptions file can be configured
### Exclude control
Exclude the ["Allowed hostPath" control](https://github.com/armosec/regolibrary/blob/master/controls/allowedhostpath.json#L2) by declaring the control in the `"posturePolicies"` section.
The resources
```
[
{
"name": "exclude-allowed-hostPath-control",
"policyType": "postureExceptionPolicy",
"actions": [
"alertOnly"
],
"resources": [
{
"designatorType": "Attributes",
"attributes": {
"kind": ".*"
}
}
],
"posturePolicies": [
{
"controlName": "Allowed hostPath"
}
]
}
]
```
### Exclude deployments in the default namespace that failed the "Allowed hostPath" control
```
[
{
"name": "exclude-deployments-in-ns-default",
"policyType": "postureExceptionPolicy",
"actions": [
"alertOnly"
],
"resources": [
{
"designatorType": "Attributes",
"attributes": {
"namespace": "default",
"kind": "Deployment"
}
}
],
"posturePolicies": [
{
"controlName": "Allowed hostPath"
}
]
}
]
```
### Exclude resources with label "app=nginx" running in a minikube cluster that failed the "NSA" or "MITRE" framework
```
[
{
"name": "exclude-nginx-minikube",
"policyType": "postureExceptionPolicy",
"actions": [
"alertOnly"
],
"resources": [
{
"designatorType": "Attributes",
"attributes": {
"cluster": "minikube",
"app": "nginx"
}
}
],
"posturePolicies": [
{
"frameworkName": "NSA"
},
{
"frameworkName": "MITRE"
}
]
}
]
```

View File

@@ -0,0 +1,22 @@
[
{
"name": "exclude-allowed-hostPath-control",
"policyType": "postureExceptionPolicy",
"actions": [
"alertOnly"
],
"resources": [
{
"designatorType": "Attributes",
"attributes": {
"kind": ".*"
}
}
],
"posturePolicies": [
{
"controlName": "Allowed hostPath"
}
]
}
]

View File

@@ -0,0 +1,23 @@
[
{
"name": "exclude-deployments-in-ns-default",
"policyType": "postureExceptionPolicy",
"actions": [
"alertOnly"
],
"resources": [
{
"designatorType": "Attributes",
"attributes": {
"namespace": "default",
"kind": "Deployment"
}
}
],
"posturePolicies": [
{
"controlName": "Allowed hostPath"
}
]
}
]

View File

@@ -28,6 +28,12 @@
"posturePolicies": [
{
"frameworkName": "NSA"
},
{
"frameworkName": "MITRE"
},
{
"frameworkName": "ArmoBest"
}
]
}

View File

@@ -0,0 +1,26 @@
[
{
"name": "exclude-nginx-in-minikube",
"policyType": "postureExceptionPolicy",
"actions": [
"alertOnly"
],
"resources": [
{
"designatorType": "Attributes",
"attributes": {
"cluster": "minikube",
"app": "nginx"
}
}
],
"posturePolicies": [
{
"frameworkName": "NSA"
},
{
"frameworkName": "MITRE"
}
]
}
]

View File

@@ -0,0 +1,29 @@
apiVersion: v2
name: kubescape
description:
Kubescape is the first open-source tool for testing if Kubernetes is deployed securely according to multiple frameworks
regulatory, customized company policies and DevSecOps best practices, such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo) and the [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) .
Kubescape scans K8s clusters, YAML files, and HELM charts, and detect misconfigurations and software vulnerabilities at early stages of the CI/CD pipeline and provides a risk score instantly and risk trends over time.
Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI and Github workflows.
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.0.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "v1.0.128"

View File

@@ -0,0 +1,27 @@
# kubescape
![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.0.128](https://img.shields.io/badge/AppVersion-v1.0.128-informational?style=flat-square)
Kubescape is the first open-source tool for testing if Kubernetes is deployed securely according to multiple frameworks regulatory, customized company policies and DevSecOps best practices, such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo) and the [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) . Kubescape scans K8s clusters, YAML files, and HELM charts, and detect misconfigurations and software vulnerabilities at early stages of the CI/CD pipeline and provides a risk score instantly and risk trends over time. Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI and Github workflows.
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | |
| configMap | object | `{"create":true,"params":{"clusterName":"<MyK8sClusterName>","customerGUID":"<MyGUID>,"}}` | ARMO customer information |
| fullnameOverride | string | `""` | |
| image | object | `{"imageName":"kubescape","pullPolicy":"IfNotPresent","repository":"quay.io/armosec","tag":"latest"}` | Image and version to deploy |
| imagePullSecrets | list | `[]` | |
| nameOverride | string | `""` | |
| nodeSelector | object | `{}` | |
| podAnnotations | object | `{}` | |
| podSecurityContext | object | `{}` | |
| resources | object | `{"limits":{"cpu":"500m","memory":"512Mi"},"requests":{"cpu":"200m","memory":"256Mi"}}` | Default resources for running the service in cluster |
| schedule | string | `"0 0 * * *"` | Frequency of running the scan |
| securityContext | object | `{}` | |
| serviceAccount | object | `{"annotations":{},"create":true,"name":"kubescape-discovery"}` | Service account that runs the scan and has permissions to view the cluster |
| tolerations | list | `[]` | |
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.5.0](https://github.com/norwoodj/helm-docs/releases/v1.5.0)

View File

@@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "kubescape.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kubescape.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kubescape.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kubescape.labels" -}}
helm.sh/chart: {{ include "kubescape.chart" . }}
{{ include "kubescape.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kubescape.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kubescape.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "kubescape.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "kubescape.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "kubescape.fullname" . }}
labels:
{{- include "kubescape.labels" . | nindent 4 }}
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["get", "list", "describe"]

View File

@@ -0,0 +1,16 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "kubescape.fullname" . }}
labels:
{{- include "kubescape.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "kubescape.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "kubescape.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}

View File

@@ -0,0 +1,14 @@
{{- if .Values.configMap.create -}}
kind: ConfigMap
apiVersion: v1
metadata:
name: {{ include "kubescape.fullname" . }}-configmap
labels:
{{- include "kubescape.labels" . | nindent 4 }}
data:
config.json: |
{
"customerGUID": "{{ .Values.configMap.params.customerGUID }}",
"clusterName": "{{ .Values.configMap.params.clusterName }}"
}
{{- end }}

View File

@@ -0,0 +1,28 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: {{ include "kubescape.fullname" . }}
labels:
{{- include "kubescape.labels" . | nindent 4 }}
spec:
schedule: "{{ .Values.schedule }}"
jobTemplate:
spec:
template:
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}/{{ .Values.image.imageName }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command: ["/bin/sh", "-c"]
args: ["kubescape scan framework nsa --submit"]
volumeMounts:
- name: kubescape-config-volume
mountPath: /root/.kubescape/config.json
subPath: config.json
restartPolicy: OnFailure
serviceAccountName: {{ include "kubescape.serviceAccountName" . }}
volumes:
- name: kubescape-config-volume
configMap:
name: {{ include "kubescape.fullname" . }}-configmap

View File

@@ -0,0 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "kubescape.fullname" . }}
labels:
{{- include "kubescape.labels" . | nindent 4 }}
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["get", "list", "describe"]

View File

@@ -0,0 +1,16 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "kubescape.fullname" . }}
labels:
{{- include "kubescape.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "kubescape.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "kubescape.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}

View File

@@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "kubescape.serviceAccountName" . }}
labels:
{{- include "kubescape.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,74 @@
# Default values for kubescape.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- Frequency of running the scan
# ┌────────────── timezone (optional)
# | ┌───────────── minute (0 - 59)
# | │ ┌───────────── hour (0 - 23)
# | │ │ ┌───────────── day of the month (1 - 31)
# | │ │ │ ┌───────────── month (1 - 12)
# | │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday;
# | │ │ │ │ │ 7 is also Sunday on some systems)
# | │ │ │ │ │
# | │ │ │ │ │
# UTC * * * * *
schedule: "* * 1 * *"
# -- Image and version to deploy
image:
repository: quay.io/armosec
imageName: kubescape
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
tag: latest
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
# -- Service account that runs the scan and has permissions to view the cluster
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: "kubescape-discovery"
# -- ARMO customer information
configMap:
create: false
params:
customerGUID: <MyGUID>
clusterName: <MyK8sClusterName>
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# -- Default resources for running the service in cluster
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 200m
memory: 256Mi
nodeSelector: {}
tolerations: []
affinity: {}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

25
go.mod
View File

@@ -3,11 +3,12 @@ module github.com/armosec/kubescape
go 1.17
require (
github.com/armosec/armoapi-go v0.0.8
github.com/armosec/k8s-interface v0.0.5
github.com/armosec/opa-utils v0.0.13
github.com/armosec/armoapi-go v0.0.40
github.com/armosec/k8s-interface v0.0.54
github.com/armosec/opa-utils v0.0.92
github.com/armosec/rbac-utils v0.0.11
github.com/armosec/utils-go v0.0.3
github.com/briandowns/spinner v1.16.0
github.com/briandowns/spinner v1.18.0
github.com/enescakir/emoji v1.0.0
github.com/fatih/color v1.13.0
github.com/gofrs/uuid v4.1.0+incompatible
@@ -17,10 +18,12 @@ require (
github.com/open-policy-agent/opa v0.33.1
github.com/satori/go.uuid v1.2.0
github.com/spf13/cobra v1.2.1
github.com/stretchr/testify v1.7.0
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.22.2
k8s.io/apimachinery v0.22.2
k8s.io/client-go v0.22.2
sigs.k8s.io/yaml v1.2.0
)
require (
@@ -33,6 +36,7 @@ require (
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/OneOfOne/xxhash v1.2.8 // indirect
github.com/armosec/utils-k8s-go v0.0.1 // indirect
github.com/aws/aws-sdk-go v1.41.11 // indirect
github.com/coreos/go-oidc v2.2.1+incompatible // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/docker v20.10.9+incompatible // indirect
@@ -45,30 +49,35 @@ require (
github.com/go-logr/logr v0.4.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.5 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/json-iterator/go v1.1.11 // indirect
github.com/mattn/go-colorable v0.1.9 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/pquerna/cachecontrol v0.1.0 // indirect
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b // indirect
go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.1 // indirect
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 // indirect
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 // indirect
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf // indirect
@@ -76,7 +85,10 @@ require (
golang.org/x/text v0.3.6 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
gonum.org/v1/gonum v0.9.1 // indirect
google.golang.org/api v0.44.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect
google.golang.org/grpc v1.38.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
@@ -85,5 +97,4 @@ require (
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a // indirect
sigs.k8s.io/controller-runtime v0.10.2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)

41
go.sum
View File

@@ -84,13 +84,20 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armosec/armoapi-go v0.0.2/go.mod h1:vIK17yoKbJRQyZXWWLe3AqfqCRITxW8qmSkApyq5xFs=
github.com/armosec/armoapi-go v0.0.7/go.mod h1:iaVVGyc23QGGzAdv4n+szGQg3Rbpixn9yQTU3qWRpaw=
github.com/armosec/armoapi-go v0.0.8 h1:JPa9rZynuE2RucamDh6dsy/sjCScmWDsyt1zagJFCDo=
github.com/armosec/armoapi-go v0.0.8/go.mod h1:iaVVGyc23QGGzAdv4n+szGQg3Rbpixn9yQTU3qWRpaw=
github.com/armosec/k8s-interface v0.0.5 h1:DWQXZNMSsYQeLQ6xpB21ueFMR9oFnz28iWQTNn31TAk=
github.com/armosec/k8s-interface v0.0.5/go.mod h1:xxS+V5QT3gVQTwZyAMMDrYLWGrfKOpiJ7Jfhfa0w9sM=
github.com/armosec/opa-utils v0.0.13 h1:QkmmYX0lzC7ZNGetyD8ysRKQHgJhjMfvRUW2cp+hz2o=
github.com/armosec/opa-utils v0.0.13/go.mod h1:E0mFTVx+4BYAVvO2hxWnIniv/IZIogRCak8BkKd7KK4=
github.com/armosec/armoapi-go v0.0.23/go.mod h1:iaVVGyc23QGGzAdv4n+szGQg3Rbpixn9yQTU3qWRpaw=
github.com/armosec/armoapi-go v0.0.40 h1:KQRJXFqw95s6cV7HoGgw1x8qrRZ9eNVze//yQbo24Lk=
github.com/armosec/armoapi-go v0.0.40/go.mod h1:iaVVGyc23QGGzAdv4n+szGQg3Rbpixn9yQTU3qWRpaw=
github.com/armosec/k8s-interface v0.0.8/go.mod h1:xxS+V5QT3gVQTwZyAMMDrYLWGrfKOpiJ7Jfhfa0w9sM=
github.com/armosec/k8s-interface v0.0.37/go.mod h1:vHxGWqD/uh6+GQb9Sqv7OGMs+Rvc2dsFVc0XtgRh1ZU=
github.com/armosec/k8s-interface v0.0.50/go.mod h1:vHxGWqD/uh6+GQb9Sqv7OGMs+Rvc2dsFVc0XtgRh1ZU=
github.com/armosec/k8s-interface v0.0.54 h1:1sQeoEZA5bgpXVibXhEiTSeLd3GKY5NkTOeewdgR0Bs=
github.com/armosec/k8s-interface v0.0.54/go.mod h1:vHxGWqD/uh6+GQb9Sqv7OGMs+Rvc2dsFVc0XtgRh1ZU=
github.com/armosec/opa-utils v0.0.64/go.mod h1:6tQP8UDq2EvEfSqh8vrUdr/9QVSCG4sJfju1SXQOn4c=
github.com/armosec/opa-utils v0.0.92 h1:RzzORhfLx9Evc2ceFtNRoehxUFzwlvK5iMtR6fLWzZc=
github.com/armosec/opa-utils v0.0.92/go.mod h1:ZOXYVTtuyrV4TldcfbzgRqP6F9Drlf4hB0zr210OXgM=
github.com/armosec/rbac-utils v0.0.1/go.mod h1:pQ8CBiij8kSKV7aeZm9FMvtZN28VgA7LZcYyTWimq40=
github.com/armosec/rbac-utils v0.0.11 h1:SCiVLqUeV+WGpUsWbOBt6jKkFAd62jztuzB6PIgHz7w=
github.com/armosec/rbac-utils v0.0.11/go.mod h1:Ex/IdGWhGv9HZq6Hs8N/ApzCKSIvpNe/ETqDfnuyah0=
github.com/armosec/utils-go v0.0.2/go.mod h1:itWmRLzRdsnwjpEOomL0mBWGnVNNIxSjDAdyc+b0iUo=
github.com/armosec/utils-go v0.0.3 h1:uyQI676yRciQM0sSN9uPoqHkbspTxHO0kmzXhBeE/xU=
github.com/armosec/utils-go v0.0.3/go.mod h1:itWmRLzRdsnwjpEOomL0mBWGnVNNIxSjDAdyc+b0iUo=
@@ -98,6 +105,8 @@ github.com/armosec/utils-k8s-go v0.0.1 h1:Ay3y7fW+4+FjVc0+obOWm8YsnEvM31vPAVoKTy
github.com/armosec/utils-k8s-go v0.0.1/go.mod h1:qrU4pmY2iZsOb39Eltpm0sTTNM3E4pmeyWx4dgDUC2U=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.41.1/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go v1.41.11 h1:QLouWsiYQ8i22kD8k58Dpdhio1A0MpT7bg9ZNXqEjuI=
github.com/aws/aws-sdk-go v1.41.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
@@ -110,8 +119,8 @@ github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqO
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/briandowns/spinner v1.16.0 h1:DFmp6hEaIx2QXXuqSJmtfSBSAjRmpGiKG6ip2Wm/yOs=
github.com/briandowns/spinner v1.16.0/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
github.com/briandowns/spinner v1.18.0 h1:SJs0maNOs4FqhBwiJ3Gr7Z1D39/rukIVGQvpNZVHVcM=
github.com/briandowns/spinner v1.18.0/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/bytecodealliance/wasmtime-go v0.30.0 h1:WfYpr4WdqInt8m5/HvYinf+HrSEAIhItKIcth+qb1h4=
github.com/bytecodealliance/wasmtime-go v0.30.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI=
@@ -329,9 +338,11 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
@@ -376,7 +387,9 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
@@ -481,8 +494,9 @@ github.com/open-policy-agent/opa v0.33.1 h1:EJe00U5H82iMsemgxcNm9RFwjW8zPyRMvL+0
github.com/open-policy-agent/opa v0.33.1/go.mod h1:Zb+IdRe0s7M++Rv/KgyuB0qvxO3CUpQ+ZW5v+w/cRUo=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
@@ -683,8 +697,9 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -880,6 +895,7 @@ golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1006,6 +1022,7 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.44.0 h1:URs6qR1lAxDsqWITsQXI4ZkGiYJ5dHtRNiCpfs2OeKA=
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1063,6 +1080,7 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
@@ -1087,6 +1105,7 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=

View File

@@ -0,0 +1,208 @@
package hostsensorutils
import (
"fmt"
"io"
"strings"
"sync"
"time"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/apimachinery/pkg/watch"
appsapplyv1 "k8s.io/client-go/applyconfigurations/apps/v1"
coreapplyv1 "k8s.io/client-go/applyconfigurations/core/v1"
)
type HostSensorHandler struct {
HostSensorPort int32
HostSensorPodNames map[string]string //map from pod names to node names
IsReady <-chan bool //readonly chan
k8sObj *k8sinterface.KubernetesApi
DaemonSet *appsv1.DaemonSet
podListLock sync.RWMutex
gracePeriod int64
}
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi) (*HostSensorHandler, error) {
if k8sObj == nil {
return nil, fmt.Errorf("nil k8s interface received")
}
hsh := &HostSensorHandler{
k8sObj: k8sObj,
HostSensorPodNames: map[string]string{},
gracePeriod: int64(15),
}
// Don't deploy on cluster with no nodes. Some cloud providers prevents termination of K8s objects for cluster with no nodes!!!
if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 {
if err == nil {
err = fmt.Errorf("no nodes to scan")
}
return hsh, fmt.Errorf("in NewHostSensorHandler, failed to get nodes list: %v", err)
}
return hsh, nil
}
func (hsh *HostSensorHandler) Init() error {
// deploy the YAML
// store namespace + port
// store pod names
// make sure all pods are running, after X seconds treat has running anyway, and log an error on the pods not running yet
cautils.ProgressTextDisplay("Installing host sensor")
cautils.StartSpinner()
defer cautils.StopSpinner()
if err := hsh.applyYAML(); err != nil {
return fmt.Errorf("in HostSensorHandler init failed to apply YAML: %v", err)
}
hsh.populatePodNamesToNodeNames()
if err := hsh.checkPodForEachNode(); err != nil {
fmt.Printf("failed to validate host-sensor pods status: %v", err)
}
return nil
}
func (hsh *HostSensorHandler) applyYAML() error {
dec := yaml.NewDocumentDecoder(io.NopCloser(strings.NewReader(hostSensorYAML)))
// apply namespace
singleYAMLBytes := make([]byte, 4096)
if readLen, err := dec.Read(singleYAMLBytes); err != nil {
return fmt.Errorf("failed to read YAML of namespace: %v", err)
} else {
singleYAMLBytes = singleYAMLBytes[:readLen]
}
namespaceAC := &coreapplyv1.NamespaceApplyConfiguration{}
if err := yaml.Unmarshal(singleYAMLBytes, namespaceAC); err != nil {
return fmt.Errorf("failed to Unmarshal YAML of namespace: %v", err)
}
namespaceName := ""
if ns, err := hsh.k8sObj.KubernetesClient.CoreV1().Namespaces().Apply(hsh.k8sObj.Context, namespaceAC, metav1.ApplyOptions{
FieldManager: "kubescape",
}); err != nil {
return fmt.Errorf("failed to apply YAML of namespace: %v", err)
} else {
namespaceName = ns.Name
}
// apply DaemonSet
daemonAC := &appsapplyv1.DaemonSetApplyConfiguration{}
singleYAMLBytes = make([]byte, 4096)
if readLen, err := dec.Read(singleYAMLBytes); err != nil {
if erra := hsh.tearDownNamesapce(namespaceName); erra != nil {
err = fmt.Errorf("%v; In addidtion %v", err, erra)
}
return fmt.Errorf("failed to read YAML of DaemonSet: %v", err)
} else {
singleYAMLBytes = singleYAMLBytes[:readLen]
}
if err := yaml.Unmarshal(singleYAMLBytes, daemonAC); err != nil {
if erra := hsh.tearDownNamesapce(namespaceName); erra != nil {
err = fmt.Errorf("%v; In addidtion %v", err, erra)
}
return fmt.Errorf("failed to Unmarshal YAML of DaemonSet: %v", err)
}
daemonAC.Namespace = &namespaceName
if ds, err := hsh.k8sObj.KubernetesClient.AppsV1().DaemonSets(namespaceName).Apply(hsh.k8sObj.Context, daemonAC, metav1.ApplyOptions{
FieldManager: "kubescape",
}); err != nil {
if erra := hsh.tearDownNamesapce(namespaceName); erra != nil {
err = fmt.Errorf("%v; In addidtion %v", err, erra)
}
return fmt.Errorf("failed to apply YAML of DaemonSet: %v", err)
} else {
hsh.HostSensorPort = ds.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort
hsh.DaemonSet = ds
}
return nil
}
func (hsh *HostSensorHandler) checkPodForEachNode() error {
deadline := time.Now().Add(time.Second * 100)
for {
nodesList, err := hsh.k8sObj.KubernetesClient.CoreV1().Nodes().List(hsh.k8sObj.Context, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("in checkPodsForEveryNode, failed to get nodes list: %v", nodesList)
}
hsh.podListLock.RLock()
podsNum := len(hsh.HostSensorPodNames)
hsh.podListLock.RUnlock()
if len(nodesList.Items) == podsNum {
break
}
if time.Now().After(deadline) {
return fmt.Errorf("host-sensor pods number (%d) differ than nodes number (%d) after deadline exceded", podsNum, len(nodesList.Items))
}
time.Sleep(100 * time.Millisecond)
}
return nil
}
// initiating routine to keep pod list updated
func (hsh *HostSensorHandler) populatePodNamesToNodeNames() {
go func() {
watchRes, err := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).Watch(hsh.k8sObj.Context, metav1.ListOptions{
Watch: true,
LabelSelector: fmt.Sprintf("name=%s", hsh.DaemonSet.Spec.Template.Labels["name"]),
})
if err != nil {
fmt.Printf("Failed to watch over daemonset pods")
}
for eve := range watchRes.ResultChan() {
pod, ok := eve.Object.(*corev1.Pod)
if !ok {
continue
}
go hsh.updatePodInListAtomic(eve.Type, pod)
}
}()
}
func (hsh *HostSensorHandler) updatePodInListAtomic(eventType watch.EventType, podObj *corev1.Pod) {
hsh.podListLock.Lock()
defer hsh.podListLock.Unlock()
switch eventType {
case watch.Added, watch.Modified:
if podObj.Status.Phase == corev1.PodRunning {
hsh.HostSensorPodNames[podObj.ObjectMeta.Name] = podObj.Spec.NodeName
} else {
delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name)
}
default:
delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name)
}
}
func (hsh *HostSensorHandler) tearDownNamesapce(namespace string) error {
if err := hsh.k8sObj.KubernetesClient.CoreV1().Namespaces().Delete(hsh.k8sObj.Context, namespace, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
return fmt.Errorf("failed to delete host-sensor namespace: %v", err)
}
return nil
}
func (hsh *HostSensorHandler) TearDown() error {
namespace := hsh.GetNamespace()
if err := hsh.k8sObj.KubernetesClient.AppsV1().DaemonSets(hsh.GetNamespace()).Delete(hsh.k8sObj.Context, hsh.DaemonSet.Name, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
}
if err := hsh.tearDownNamesapce(namespace); err != nil {
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
}
// TODO: wait for termination? may take up to 120 seconds!!!
return nil
}
func (hsh *HostSensorHandler) GetNamespace() string {
if hsh.DaemonSet == nil {
return ""
}
return hsh.DaemonSet.Namespace
}

View File

@@ -0,0 +1,198 @@
package hostsensorutils
import (
"encoding/json"
"fmt"
"sync"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
"sigs.k8s.io/yaml"
)
func (hsh *HostSensorHandler) getPodList() (res map[string]string, err error) {
hsh.podListLock.RLock()
jsonBytes, err := json.Marshal(hsh.HostSensorPodNames)
hsh.podListLock.RUnlock()
if err != nil {
return res, fmt.Errorf("failed to marshal pod list: %v", err)
}
err = json.Unmarshal(jsonBytes, &res)
if err != nil {
return res, fmt.Errorf("failed to unmarshal pod list: %v", err)
}
return res, nil
}
func (hsh *HostSensorHandler) HTTPGetToPod(podName, path string) ([]byte, error) {
// send the request to the port
restProxy := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).ProxyGet("http", podName, fmt.Sprintf("%d", hsh.HostSensorPort), path, map[string]string{})
return restProxy.DoRaw(hsh.k8sObj.Context)
}
func (hsh *HostSensorHandler) ForwardToPod(podName, path string) ([]byte, error) {
// NOT IN USE:
// ---
// spawn port forwarding
// req := hsh.k8sObj.KubernetesClient.CoreV1().RESTClient().Post()
// req = req.Name(podName)
// req = req.Namespace(hsh.DaemonSet.Namespace)
// req = req.Resource("pods")
// req = req.SubResource("portforward")
// ----
// https://github.com/gianarb/kube-port-forward
// fullPath := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward",
// hsh.DaemonSet.Namespace, podName)
// transport, upgrader, err := spdy.RoundTripperFor(hsh.k8sObj.KubernetesClient.config)
// if err != nil {
// return nil, err
// }
// hostIP := strings.TrimLeft(req.RestConfig.Host, "htps:/")
// dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, &url.URL{Scheme: "http", Path: path, Host: hostIP})
return nil, nil
}
// sendAllPodsHTTPGETRequest fills the raw byte response in the envelope and the node name, but not the GroupVersionKind
// so the caller is responsible to convert the raw data to some structured data and add the GroupVersionKind details
func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(path, requestKind string) ([]hostsensor.HostSensorDataEnvelope, error) {
podList, err := hsh.getPodList()
if err != nil {
return nil, fmt.Errorf("failed to sendAllPodsHTTPGETRequest: %v", err)
}
res := make([]hostsensor.HostSensorDataEnvelope, 0, len(podList))
resLock := sync.Mutex{}
wg := sync.WaitGroup{}
wg.Add(len(podList))
for podName := range podList {
go func(podName, path string) {
defer wg.Done()
resBytes, err := hsh.HTTPGetToPod(podName, path)
if err != nil {
fmt.Printf("In sendAllPodsHTTPGETRequest failed to get data '%s' from pod '%s': %v", path, podName, err)
} else {
resLock.Lock()
defer resLock.Unlock()
hostSensorDataEnvelope := hostsensor.HostSensorDataEnvelope{}
hostSensorDataEnvelope.SetApiVersion(k8sinterface.JoinGroupVersion(hostsensor.GroupHostSensor, hostsensor.Version))
hostSensorDataEnvelope.SetKind(requestKind)
hostSensorDataEnvelope.SetName(podList[podName])
hostSensorDataEnvelope.SetData(resBytes)
res = append(res, hostSensorDataEnvelope)
}
}(podName, path)
}
wg.Wait()
return res, nil
}
// return list of OpenPortsList
func (hsh *HostSensorHandler) GetOpenPortsList() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest("/openedPorts", "OpenPortsList")
}
// return list of LinuxSecurityHardeningStatus
func (hsh *HostSensorHandler) GetLinuxSecurityHardeningStatus() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest("/linuxSecurityHardening", "LinuxSecurityHardeningStatus")
}
// return list of KubeletCommandLine
func (hsh *HostSensorHandler) GetKubeletCommandLine() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
resps, err := hsh.sendAllPodsHTTPGETRequest("/kubeletCommandLine", "KubeletCommandLine")
if err != nil {
return resps, err
}
for resp := range resps {
var data = make(map[string]interface{})
data["fullCommand"] = string(resps[resp].Data)
resBytesMarshal, err := json.Marshal(data)
// TODO catch error
if err == nil {
resps[resp].Data = json.RawMessage(resBytesMarshal)
}
}
return resps, nil
}
// return list of
func (hsh *HostSensorHandler) GetKernelVersion() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest("/kernelVersion", "KernelVersion")
}
// return list of
func (hsh *HostSensorHandler) GetOsReleaseFile() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest("/osRelease", "OsReleaseFile")
}
// return list of
func (hsh *HostSensorHandler) GetKubeletConfigurations() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
res, err := hsh.sendAllPodsHTTPGETRequest("/kubeletConfigurations", "KubeletConfiguration") // empty kind, will be overridden
for resIdx := range res {
jsonBytes, err := yaml.YAMLToJSON(res[resIdx].Data)
if err != nil {
fmt.Printf("In GetKubeletConfigurations failed to YAMLToJSON: %v;\n%v", err, res[resIdx])
continue
}
res[resIdx].SetData(jsonBytes)
}
return res, err
}
func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnvelope, error) {
res := make([]hostsensor.HostSensorDataEnvelope, 0)
if hsh.DaemonSet == nil {
return res, nil
}
cautils.ProgressTextDisplay("Accessing host sensor")
cautils.StartSpinner()
defer cautils.StopSpinner()
kcData, err := hsh.GetKubeletConfigurations()
if err != nil {
return kcData, err
}
res = append(res, kcData...)
//
kcData, err = hsh.GetKubeletCommandLine()
if err != nil {
return kcData, err
}
res = append(res, kcData...)
//
kcData, err = hsh.GetOsReleaseFile()
if err != nil {
return kcData, err
}
res = append(res, kcData...)
//
kcData, err = hsh.GetKernelVersion()
if err != nil {
return kcData, err
}
res = append(res, kcData...)
//
kcData, err = hsh.GetLinuxSecurityHardeningStatus()
if err != nil {
return kcData, err
}
res = append(res, kcData...)
//
kcData, err = hsh.GetOpenPortsList()
if err != nil {
return kcData, err
}
res = append(res, kcData...)
// finish
cautils.SuccessTextDisplay("Read host information from host sensor")
return res, nil
}

View File

@@ -0,0 +1,10 @@
package hostsensorutils
import "github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
type IHostSensor interface {
Init() error
TearDown() error
CollectResources() ([]hostsensor.HostSensorDataEnvelope, error)
GetNamespace() string
}

View File

@@ -0,0 +1,24 @@
package hostsensorutils
import (
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
)
type HostSensorHandlerMock struct {
}
func (hshm *HostSensorHandlerMock) Init() error {
return nil
}
func (hshm *HostSensorHandlerMock) TearDown() error {
return nil
}
func (hshm *HostSensorHandlerMock) CollectResources() ([]hostsensor.HostSensorDataEnvelope, error) {
return []hostsensor.HostSensorDataEnvelope{}, nil
}
func (hshm *HostSensorHandlerMock) GetNamespace() string {
return ""
}

View File

@@ -0,0 +1,65 @@
package hostsensorutils
const hostSensorYAML = `apiVersion: v1
kind: Namespace
metadata:
labels:
app: host-sensor
kubernetes.io/metadata.name: armo-kube-host-sensor
tier: armo-kube-host-sensor-control-plane
name: armo-kube-host-sensor
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: host-sensor
namespace: armo-kube-host-sensor
labels:
k8s-app: armo-kube-host-sensor
spec:
selector:
matchLabels:
name: host-sensor
template:
metadata:
labels:
name: host-sensor
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: host-sensor
image: quay.io/armosec/kube-host-sensor:latest
securityContext:
privileged: true
readOnlyRootFilesystem: true
procMount: Unmasked
ports:
- name: http
hostPort: 7888
containerPort: 7888
resources:
limits:
cpu: 1m
memory: 200Mi
requests:
cpu: 1m
memory: 200Mi
volumeMounts:
- mountPath: /host_fs
name: host-filesystem
terminationGracePeriodSeconds: 120
dnsPolicy: ClusterFirstWithHostNet
automountServiceAccountToken: false
volumes:
- hostPath:
path: /
type: Directory
name: host-filesystem
hostNetwork: true
hostPID: true
hostIPC: true`

View File

@@ -53,6 +53,6 @@ echo -e "\033[0m"
$KUBESCAPE_EXEC version
echo
echo -e "\033[35mUsage: $ $KUBESCAPE_EXEC scan framework nsa"
echo -e "\033[35mUsage: $ $KUBESCAPE_EXEC scan --submit"
echo -e "\033[0m"

14
main.go
View File

@@ -1,23 +1,9 @@
package main
import (
"fmt"
"os"
"github.com/armosec/kubescape/clihandler/cmd"
)
func main() {
CheckLatestVersion()
cmd.Execute()
}
func CheckLatestVersion() {
latest, err := cmd.GetLatestVersion()
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
} else if latest != cmd.BuildNumber {
fmt.Println("Warning: You are not updated to the latest release: " + latest)
}
}

85
mocks/loadmocks.go Normal file

File diff suppressed because one or more lines are too long

View File

@@ -6,52 +6,50 @@ import (
"time"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/opa-utils/exceptions"
"github.com/armosec/opa-utils/objectsenvelopes"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/score"
"github.com/armosec/opa-utils/reporthandling/apis"
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
"github.com/open-policy-agent/opa/storage"
"github.com/golang/glog"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/opa-utils/resources"
"github.com/golang/glog"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/rego"
"github.com/open-policy-agent/opa/storage"
uuid "github.com/satori/go.uuid"
)
const ScoreConfigPath = "/resources/config"
var RegoK8sCredentials storage.Store
type OPAProcessorHandler struct {
processedPolicy *chan *cautils.OPASessionObj
reportResults *chan *cautils.OPASessionObj
// componentConfig cautils.ComponentConfig
processedPolicy *chan *cautils.OPASessionObj
reportResults *chan *cautils.OPASessionObj
regoDependenciesData *resources.RegoDependenciesData
}
type OPAProcessor struct {
*cautils.OPASessionObj
regoDependenciesData *resources.RegoDependenciesData
}
func NewOPAProcessor(sessionObj *cautils.OPASessionObj) *OPAProcessor {
func NewOPAProcessor(sessionObj *cautils.OPASessionObj, regoDependenciesData *resources.RegoDependenciesData) *OPAProcessor {
if regoDependenciesData != nil && sessionObj != nil {
regoDependenciesData.PostureControlInputs = sessionObj.RegoInputData.PostureControlInputs
}
return &OPAProcessor{
OPASessionObj: sessionObj,
OPASessionObj: sessionObj,
regoDependenciesData: regoDependenciesData,
}
}
func NewOPAProcessorHandler(processedPolicy, reportResults *chan *cautils.OPASessionObj) *OPAProcessorHandler {
regoDependenciesData := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), cautils.ClusterName)
store, err := regoDependenciesData.TOStorage()
if err != nil {
panic(err)
}
RegoK8sCredentials = store
return &OPAProcessorHandler{
processedPolicy: processedPolicy,
reportResults: reportResults,
processedPolicy: processedPolicy,
reportResults: reportResults,
regoDependenciesData: resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), cautils.ClusterName),
}
}
@@ -59,166 +57,212 @@ func (opaHandler *OPAProcessorHandler) ProcessRulesListenner() {
for {
opaSessionObj := <-*opaHandler.processedPolicy
opap := NewOPAProcessor(opaSessionObj)
opap := NewOPAProcessor(opaSessionObj, opaHandler.regoDependenciesData)
policies := ConvertFrameworksToPolicies(opap.Frameworks, cautils.BuildNumber)
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Frameworks, policies)
// process
if err := opap.Process(); err != nil {
fmt.Println(err)
if err := opap.Process(policies); err != nil {
// fmt.Println(err)
}
// edit results
opap.updateResults()
// update score
// opap.updateScore()
// report
*opaHandler.reportResults <- opaSessionObj
}
}
func (opap *OPAProcessor) Process() error {
func (opap *OPAProcessor) Process(policies *cautils.Policies) error {
// glog.Infof(fmt.Sprintf("Starting 'Process'. reportID: %s", opap.PostureReport.ReportID))
cautils.ProgressTextDisplay(fmt.Sprintf("Scanning cluster %s", cautils.ClusterName))
cautils.StartSpinner()
frameworkReports := []reporthandling.FrameworkReport{}
var errs error
for i := range opap.Frameworks {
frameworkReport, err := opap.processFramework(&opap.Frameworks[i])
for _, control := range policies.Controls {
resourcesAssociatedControl, err := opap.processControl(&control)
if err != nil {
errs = fmt.Errorf("%v\n%s", errs, err.Error())
appendError(&errs, err)
}
// update resources with latest results
if len(resourcesAssociatedControl) != 0 {
for resourceID, controlResult := range resourcesAssociatedControl {
if _, ok := opap.ResourcesResult[resourceID]; !ok {
opap.ResourcesResult[resourceID] = resourcesresults.Result{ResourceID: resourceID}
}
t := opap.ResourcesResult[resourceID]
t.AssociatedControls = append(t.AssociatedControls, controlResult)
opap.ResourcesResult[resourceID] = t
}
}
frameworkReports = append(frameworkReports, *frameworkReport)
}
opap.PostureReport.FrameworkReports = frameworkReports
opap.PostureReport.ReportID = uuid.NewV4().String()
opap.PostureReport.ReportGenerationTime = time.Now().UTC()
// glog.Infof(fmt.Sprintf("Done 'Process'. reportID: %s", opap.PostureReport.ReportID))
opap.Report.ReportGenerationTime = time.Now().UTC()
cautils.StopSpinner()
cautils.SuccessTextDisplay(fmt.Sprintf("Done scanning cluster %s", cautils.ClusterName))
return errs
}
func (opap *OPAProcessor) processFramework(framework *reporthandling.Framework) (*reporthandling.FrameworkReport, error) {
var errs error
frameworkReport := reporthandling.FrameworkReport{}
frameworkReport.Name = framework.Name
controlReports := []reporthandling.ControlReport{}
for i := range framework.Controls {
controlReport, err := opap.processControl(&framework.Controls[i])
if err != nil {
errs = fmt.Errorf("%v\n%s", errs, err.Error())
}
if controlReport != nil {
controlReports = append(controlReports, *controlReport)
}
func appendError(errs *error, err error) {
if err == nil {
return
}
if errs == nil {
errs = &err
} else {
*errs = fmt.Errorf("%v\n%s", *errs, err.Error())
}
frameworkReport.ControlReports = controlReports
return &frameworkReport, errs
}
func (opap *OPAProcessor) processControl(control *reporthandling.Control) (*reporthandling.ControlReport, error) {
func (opap *OPAProcessor) processControl(control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, error) {
var errs error
controlReport := reporthandling.ControlReport{}
controlReport.PortalBase = control.PortalBase
controlReport.ControlID = control.ControlID
controlReport.Control_ID = control.Control_ID // TODO: delete when 'id' is deprecated
resourcesAssociatedControl := make(map[string]resourcesresults.ResourceAssociatedControl)
controlReport.Name = control.Name
controlReport.Description = control.Description
controlReport.Remediation = control.Remediation
ruleReports := []reporthandling.RuleReport{}
// ruleResults := make(map[string][]resourcesresults.ResourceAssociatedRule)
for i := range control.Rules {
ruleReport, err := opap.processRule(&control.Rules[i])
resourceAssociatedRule, err := opap.processRule(&control.Rules[i])
if err != nil {
errs = fmt.Errorf("%v\n%s", errs, err.Error())
appendError(&errs, err)
continue
}
if ruleReport != nil {
ruleReports = append(ruleReports, *ruleReport)
// append failed rules to controls
if len(resourceAssociatedRule) != 0 {
for resourceID, ruleResponse := range resourceAssociatedRule {
controlResult := resourcesresults.ResourceAssociatedControl{}
controlResult.SetID(control.ControlID)
controlResult.SetName(control.Name)
if _, ok := resourcesAssociatedControl[resourceID]; ok {
controlResult.ResourceAssociatedRules = resourcesAssociatedControl[resourceID].ResourceAssociatedRules
}
if ruleResponse != nil {
controlResult.ResourceAssociatedRules = append(controlResult.ResourceAssociatedRules, *ruleResponse)
}
resourcesAssociatedControl[resourceID] = controlResult
}
}
}
if len(ruleReports) == 0 {
return nil, nil
}
controlReport.RuleReports = ruleReports
return &controlReport, errs
return resourcesAssociatedControl, errs
}
func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (*reporthandling.RuleReport, error) {
if ruleWithArmoOpaDependency(rule.Attributes) {
return nil, nil
}
k8sObjects := getKubernetesObjects(opap.K8SResources, rule.Match)
ruleReport, err := opap.runOPAOnSingleRule(rule, k8sObjects)
func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (map[string]*resourcesresults.ResourceAssociatedRule, error) {
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs) // get store
inputResources, err := reporthandling.RegoResourcesAggregator(rule, getAllSupportedObjects(opap.K8SResources, opap.AllResources, rule))
if err != nil {
ruleReport.RuleStatus.Status = "failure"
ruleReport.RuleStatus.Message = err.Error()
return nil, fmt.Errorf("error getting aggregated k8sObjects: %s", err.Error())
}
if len(inputResources) == 0 {
return nil, nil // no resources found for testing
}
inputRawResources := workloadinterface.ListMetaToMap(inputResources)
resources := map[string]*resourcesresults.ResourceAssociatedRule{}
// the failed resources are a subgroup of the enumeratedData, so we store the enumeratedData like it was the input data
enumeratedData, err := opap.enumerateData(rule, inputRawResources)
if err != nil {
return nil, err
}
inputResources = objectsenvelopes.ListMapToMeta(enumeratedData)
for i := range inputResources {
resources[inputResources[i].GetID()] = &resourcesresults.ResourceAssociatedRule{
Name: rule.Name,
ControlConfigurations: postureControlInputs,
Status: apis.StatusPassed,
}
opap.AllResources[inputResources[i].GetID()] = inputResources[i]
}
ruleResponses, err := opap.runOPAOnSingleRule(rule, inputRawResources, ruleData, postureControlInputs)
if err != nil {
// TODO - Handle error
glog.Error(err)
} else {
ruleReport.RuleStatus.Status = "success"
// ruleResponse to ruleResult
for i := range ruleResponses {
failedResources := objectsenvelopes.ListMapToMeta(ruleResponses[i].GetFailedResources())
for j := range failedResources {
ruleResult := &resourcesresults.ResourceAssociatedRule{}
if r, k := resources[failedResources[j].GetID()]; k {
ruleResult = r
}
ruleResult.Status = apis.StatusFailed
for j := range ruleResponses[i].FailedPaths {
ruleResult.Paths = append(ruleResult.Paths, resourcesresults.Path{FailedPath: ruleResponses[i].FailedPaths[j]})
}
resources[failedResources[j].GetID()] = ruleResult
}
}
}
ruleReport.ListInputResources = k8sObjects
return &ruleReport, err
return resources, err
}
func (opap *OPAProcessor) runOPAOnSingleRule(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}) (reporthandling.RuleReport, error) {
func (opap *OPAProcessor) runOPAOnSingleRule(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, postureControlInputs map[string][]string) ([]reporthandling.RuleResponse, error) {
switch rule.RuleLanguage {
case reporthandling.RegoLanguage, reporthandling.RegoLanguage2:
return opap.runRegoOnK8s(rule, k8sObjects)
return opap.runRegoOnK8s(rule, k8sObjects, getRuleData, postureControlInputs)
default:
return reporthandling.RuleReport{}, fmt.Errorf("rule: '%s', language '%v' not supported", rule.Name, rule.RuleLanguage)
return nil, fmt.Errorf("rule: '%s', language '%v' not supported", rule.Name, rule.RuleLanguage)
}
}
func (opap *OPAProcessor) runRegoOnK8s(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}) (reporthandling.RuleReport, error) {
func (opap *OPAProcessor) runRegoOnK8s(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, postureControlInputs map[string][]string) ([]reporthandling.RuleResponse, error) {
var errs error
ruleReport := reporthandling.RuleReport{
Name: rule.Name,
}
// compile modules
modules, err := getRuleDependencies()
if err != nil {
return ruleReport, fmt.Errorf("rule: '%s', %s", rule.Name, err.Error())
return nil, fmt.Errorf("rule: '%s', %s", rule.Name, err.Error())
}
modules[rule.Name] = rule.Rule
modules[rule.Name] = getRuleData(rule)
compiled, err := ast.CompileModules(modules)
if err != nil {
return ruleReport, fmt.Errorf("in 'runRegoOnSingleRule', failed to compile rule, name: %s, reason: %s", rule.Name, err.Error())
return nil, fmt.Errorf("in 'runRegoOnSingleRule', failed to compile rule, name: %s, reason: %s", rule.Name, err.Error())
}
store, err := resources.TOStorage(postureControlInputs)
if err != nil {
return nil, err
}
// Eval
results, err := opap.regoEval(k8sObjects, compiled)
results, err := opap.regoEval(k8sObjects, compiled, &store)
if err != nil {
errs = fmt.Errorf("rule: '%s', %s", rule.Name, err.Error())
}
if results != nil {
ruleReport.RuleResponses = append(ruleReport.RuleResponses, results...)
}
return ruleReport, errs
return results, errs
}
func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRego *ast.Compiler) ([]reporthandling.RuleResponse, error) {
func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRego *ast.Compiler, store *storage.Store) ([]reporthandling.RuleResponse, error) {
// opap.regoDependenciesData.PostureControlInputs
rego := rego.New(
rego.Query("data.armo_builtins"), // get package name from rule
rego.Compiler(compiledRego),
rego.Input(inputObj),
rego.Store(RegoK8sCredentials),
rego.Store(*store),
)
// Run evaluation
resultSet, err := rego.Eval(context.Background())
if err != nil {
return nil, fmt.Errorf("in 'regoEval', failed to evaluate rule, reason: %s", err.Error())
return nil, err
}
results, err := reporthandling.ParseRegoResult(&resultSet)
// results, err := ParseRegoResult(&resultSet)
if err != nil {
return results, err
}
@@ -226,38 +270,20 @@ func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRe
return results, nil
}
func (opap *OPAProcessor) updateScore() {
func (opap *OPAProcessor) enumerateData(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}) ([]map[string]interface{}, error) {
if !k8sinterface.ConnectedToCluster {
return
if ruleEnumeratorData(rule) == "" {
return k8sObjects, nil
}
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs)
// calculate score
s := score.NewScore(k8sinterface.NewKubernetesApi(), ScoreConfigPath)
s.Calculate(opap.PostureReport.FrameworkReports)
}
func (opap *OPAProcessor) updateResults() {
for f := range opap.PostureReport.FrameworkReports {
// set exceptions
exceptions.SetFrameworkExceptions(&opap.PostureReport.FrameworkReports[f], opap.Exceptions, cautils.ClusterName)
// set counters
reporthandling.SetUniqueResourcesCounter(&opap.PostureReport.FrameworkReports[f])
// set default score
reporthandling.SetDefaultScore(&opap.PostureReport.FrameworkReports[f])
// edit results - remove data
// TODO - move function to pkg - use RemoveData
for c := range opap.PostureReport.FrameworkReports[f].ControlReports {
for r, ruleReport := range opap.PostureReport.FrameworkReports[f].ControlReports[c].RuleReports {
// editing the responses -> removing duplications, clearing secret data, etc.
opap.PostureReport.FrameworkReports[f].ControlReports[c].RuleReports[r].RuleResponses = editRuleResponses(ruleReport.RuleResponses)
}
}
ruleResponse, err := opap.runOPAOnSingleRule(rule, k8sObjects, ruleEnumeratorData, postureControlInputs)
if err != nil {
return nil, err
}
failedResources := []map[string]interface{}{}
for _, ruleResponse := range ruleResponse {
failedResources = append(failedResources, ruleResponse.GetFailedResources()...)
}
return failedResources, nil
}

View File

@@ -3,10 +3,16 @@ package opaprocessor
import (
"testing"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/mocks"
"github.com/armosec/opa-utils/objectsenvelopes"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/resources"
"github.com/stretchr/testify/assert"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/k8s-interface/workloadinterface"
// _ "k8s.io/client-go/plugin/pkg/client/auth"
)
@@ -17,15 +23,23 @@ func TestProcess(t *testing.T) {
// set k8s
k8sResources := make(cautils.K8SResources)
k8sResources["/v1/pods"] = k8sinterface.ConvertUnstructuredSliceToMap(k8sinterface.V1KubeSystemNamespaceMock().Items)
allResources := make(map[string]workloadinterface.IMetadata)
imetaObj := objectsenvelopes.ListMapToMeta(k8sinterface.ConvertUnstructuredSliceToMap(k8sinterface.V1KubeSystemNamespaceMock().Items))
for i := range imetaObj {
allResources[imetaObj[i].GetID()] = imetaObj[i]
}
k8sResources["/v1/pods"] = workloadinterface.ListMetaIDs(imetaObj)
// set opaSessionObj
opaSessionObj := cautils.NewOPASessionObjMock()
opaSessionObj.Frameworks = []reporthandling.Framework{*reporthandling.MockFrameworkA()}
opaSessionObj.K8SResources = &k8sResources
policies := ConvertFrameworksToPolicies(opaSessionObj.Frameworks, "")
opap := NewOPAProcessor(opaSessionObj)
opap.Process()
opaSessionObj.K8SResources = &k8sResources
opaSessionObj.AllResources = allResources
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock())
opap.Process(policies)
opap.updateResults()
for _, f := range opap.PostureReport.FrameworkReports {
for _, c := range f.ControlReports {
@@ -41,3 +55,85 @@ func TestProcess(t *testing.T) {
}
}
func TestProcessResourcesResult(t *testing.T) {
// set k8s
k8sResources := make(cautils.K8SResources)
deployment := mocks.MockDevelopmentWithHostpath()
frameworks := []reporthandling.Framework{*mocks.MockFramework_0006_0013()}
k8sResources["apps/v1/deployments"] = workloadinterface.ListMetaIDs([]workloadinterface.IMetadata{deployment})
// set opaSessionObj
opaSessionObj := cautils.NewOPASessionObjMock()
opaSessionObj.Frameworks = frameworks
policies := ConvertFrameworksToPolicies(opaSessionObj.Frameworks, "")
ConvertFrameworksToSummaryDetails(&opaSessionObj.Report.SummaryDetails, opaSessionObj.Frameworks, policies)
opaSessionObj.K8SResources = &k8sResources
opaSessionObj.AllResources[deployment.GetID()] = deployment
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock())
opap.Process(policies)
assert.Equal(t, 1, len(opaSessionObj.ResourcesResult))
res := opaSessionObj.ResourcesResult[deployment.GetID()]
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
assert.True(t, res.GetStatus(nil).IsFailed())
assert.False(t, res.GetStatus(nil).IsPassed())
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
opap.updateResults()
res = opaSessionObj.ResourcesResult[deployment.GetID()]
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
assert.True(t, res.GetStatus(nil).IsFailed())
assert.False(t, res.GetStatus(nil).IsPassed())
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
// test resource counters
summaryDetails := opaSessionObj.Report.SummaryDetails
assert.Equal(t, 1, summaryDetails.NumberOfResources().All())
assert.Equal(t, 1, summaryDetails.NumberOfResources().Failed())
assert.Equal(t, 0, summaryDetails.NumberOfResources().Excluded())
assert.Equal(t, 0, summaryDetails.NumberOfResources().Passed())
// test resource listing
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().All()))
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Excluded()))
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
// test control listing
assert.Equal(t, len(res.ListControlsIDs(nil).All()), len(summaryDetails.ListControls().All()))
assert.Equal(t, len(res.ListControlsIDs(nil).Passed()), len(summaryDetails.ListControls().Passed()))
assert.Equal(t, len(res.ListControlsIDs(nil).Failed()), len(summaryDetails.ListControls().Failed()))
assert.Equal(t, len(res.ListControlsIDs(nil).Excluded()), len(summaryDetails.ListControls().Excluded()))
assert.True(t, summaryDetails.GetStatus().IsFailed())
opaSessionObj.Exceptions = []armotypes.PostureExceptionPolicy{*mocks.MockExceptionAllKinds(&armotypes.PosturePolicy{FrameworkName: frameworks[0].Name})}
opap.updateResults()
res = opaSessionObj.ResourcesResult[deployment.GetID()]
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Excluded()))
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
assert.True(t, res.GetStatus(nil).IsExcluded())
assert.False(t, res.GetStatus(nil).IsPassed())
assert.False(t, res.GetStatus(nil).IsFailed())
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
// test resource listing
summaryDetails = opaSessionObj.Report.SummaryDetails
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().All()))
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Excluded()))
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
}

View File

@@ -11,11 +11,66 @@ import (
resources "github.com/armosec/opa-utils/resources"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func getKubernetesObjects(k8sResources *cautils.K8SResources, match []reporthandling.RuleMatchObjects) []map[string]interface{} {
k8sObjects := []map[string]interface{}{}
// updateResults update the results objects and report objects. This is a critical function - DO NOT CHANGE
/*
- remove sensible data
- adding exceptions
- summarize results
*/
func (opap *OPAProcessor) updateResults() {
// remove data from all objects
for i := range opap.AllResources {
removeData(opap.AllResources[i])
}
// set exceptions
for i := range opap.ResourcesResult {
t := opap.ResourcesResult[i]
// first set exceptions
if resource, ok := opap.AllResources[i]; ok {
t.SetExceptions(resource, opap.Exceptions, cautils.ClusterName)
}
// summarize the resources
opap.Report.AppendResourceResultToSummary(&t)
// Add score
// TODO
// save changes
opap.ResourcesResult[i] = t
}
// set result summary
opap.Report.SummaryDetails.InitResourcesSummary()
// for f := range opap.PostureReport.FrameworkReports {
// // set exceptions
// exceptions.SetFrameworkExceptions(&opap.PostureReport.FrameworkReports[f], opap.Exceptions, cautils.ClusterName)
// // set counters
// reporthandling.SetUniqueResourcesCounter(&opap.PostureReport.FrameworkReports[f])
// // set default score
// // reporthandling.SetDefaultScore(&opap.PostureReport.FrameworkReports[f])
// }
}
func getAllSupportedObjects(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
k8sObjects := []workloadinterface.IMetadata{}
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.Match)...)
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.DynamicMatch)...)
return k8sObjects
}
func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
k8sObjects := []workloadinterface.IMetadata{}
for m := range match {
for _, groups := range match[m].APIGroups {
for _, version := range match[m].APIVersions {
@@ -24,15 +79,11 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, match []reporthand
for _, groupResource := range groupResources {
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
if k8sObj == nil {
continue
// glog.Errorf("Resource '%s' is nil, probably failed to pull the resource", groupResource)
} else if v, k := k8sObj.([]map[string]interface{}); k {
k8sObjects = append(k8sObjects, v...)
} else if v, k := k8sObj.(map[string]interface{}); k {
k8sObjects = append(k8sObjects, v)
} else if v, k := k8sObj.([]unstructured.Unstructured); k {
k8sObjects = append(k8sObjects, k8sinterface.ConvertUnstructuredSliceToMap(v)...) //
} else {
glog.Errorf("In 'getKubernetesObjects' resource '%s' unknown type", groupResource)
}
for i := range k8sObj {
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
}
}
}
@@ -41,9 +92,33 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, match []reporthand
}
}
return k8sObjects
return filterOutChildResources(k8sObjects, match)
}
// filterOutChildResources filter out child resources if the parent resource is in the list
func filterOutChildResources(objects []workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
response := []workloadinterface.IMetadata{}
owners := []string{}
for m := range match {
for i := range match[m].Resources {
owners = append(owners, match[m].Resources[i])
}
}
for i := range objects {
if !k8sinterface.IsTypeWorkload(objects[i].GetObject()) {
response = append(response, objects[i])
continue
}
w := workloadinterface.NewWorkloadObj(objects[i].GetObject())
ownerReferences, err := w.GetOwnerReferences()
if err != nil || len(ownerReferences) == 0 {
response = append(response, w)
} else if !k8sinterface.IsStringInSlice(owners, ownerReferences[0].Kind) {
response = append(response, w)
}
}
return response
}
func getRuleDependencies() (map[string]string, error) {
modules := resources.LoadRegoModules()
if len(modules) == 0 {
@@ -52,28 +127,6 @@ func getRuleDependencies() (map[string]string, error) {
return modules, nil
}
//editRuleResponses editing the responses -> removing duplications, clearing secret data, etc.
func editRuleResponses(ruleResponses []reporthandling.RuleResponse) []reporthandling.RuleResponse {
lenRuleResponses := len(ruleResponses)
for i := 0; i < lenRuleResponses; i++ {
for j := range ruleResponses[i].AlertObject.K8SApiObjects {
w := workloadinterface.NewWorkloadObj(ruleResponses[i].AlertObject.K8SApiObjects[j])
if w == nil {
continue
}
cleanRuleResponses(w)
ruleResponses[i].AlertObject.K8SApiObjects[j] = w.GetWorkload()
}
}
return ruleResponses
}
func cleanRuleResponses(workload k8sinterface.IWorkload) {
if workload.GetKind() == "Secret" {
workload.RemoveSecretData()
}
}
func ruleWithArmoOpaDependency(annotations map[string]interface{}) bool {
if annotations == nil {
return false
@@ -83,3 +136,88 @@ func ruleWithArmoOpaDependency(annotations map[string]interface{}) bool {
}
return false
}
// Checks that kubescape version is in range of use for this rule
// In local build (BuildNumber = ""):
// returns true only if rule doesn't have the "until" attribute
func isRuleKubescapeVersionCompatible(rule *reporthandling.PolicyRule) bool {
if from, ok := rule.Attributes["useFromKubescapeVersion"]; ok {
if cautils.BuildNumber != "" {
if from.(string) > cautils.BuildNumber {
return false
}
}
}
if until, ok := rule.Attributes["useUntilKubescapeVersion"]; ok {
if cautils.BuildNumber != "" {
if until.(string) <= cautils.BuildNumber {
return false
}
} else {
return false
}
}
return true
}
func removeData(obj workloadinterface.IMetadata) {
if !k8sinterface.IsTypeWorkload(obj.GetObject()) {
return // remove data only from kubernetes objects
}
workload := workloadinterface.NewWorkloadObj(obj.GetObject())
switch workload.GetKind() {
case "Secret":
removeSecretData(workload)
case "ConfigMap":
removeConfigMapData(workload)
default:
removePodData(workload)
}
}
func removeConfigMapData(workload workloadinterface.IWorkload) {
workload.RemoveAnnotation("kubectl.kubernetes.io/last-applied-configuration")
workloadinterface.RemoveFromMap(workload.GetObject(), "metadata", "managedFields")
overrideSensitiveData(workload)
}
func overrideSensitiveData(workload workloadinterface.IWorkload) {
dataInterface, ok := workloadinterface.InspectMap(workload.GetObject(), "data")
if ok {
data, ok := dataInterface.(map[string]interface{})
if ok {
for key := range data {
workloadinterface.SetInMap(workload.GetObject(), []string{"data"}, key, "XXXXXX")
}
}
}
}
func removeSecretData(workload workloadinterface.IWorkload) {
workload.RemoveAnnotation("kubectl.kubernetes.io/last-applied-configuration")
workloadinterface.RemoveFromMap(workload.GetObject(), "metadata", "managedFields")
overrideSensitiveData(workload)
}
func removePodData(workload workloadinterface.IWorkload) {
workload.RemoveAnnotation("kubectl.kubernetes.io/last-applied-configuration")
workloadinterface.RemoveFromMap(workload.GetObject(), "metadata", "managedFields")
containers, err := workload.GetContainers()
if err != nil || len(containers) == 0 {
return
}
for i := range containers {
for j := range containers[i].Env {
containers[i].Env[j].Value = "XXXXXX"
}
}
workloadinterface.SetInMap(workload.GetObject(), workloadinterface.PodSpec(workload.GetKind()), "containers", containers)
}
func ruleData(rule *reporthandling.PolicyRule) string {
return rule.Rule
}
func ruleEnumeratorData(rule *reporthandling.PolicyRule) string {
return rule.ResourceEnumerator
}

View File

@@ -2,7 +2,71 @@ package opaprocessor
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/opa-utils/reporthandling"
)
func TestGetKubernetesObjects(t *testing.T) {
}
var rule_v1_0_131 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useUntilKubescapeVersion": "v1.0.132"}}}
var rule_v1_0_132 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.132", "useUntilKubescapeVersion": "v1.0.133"}}}
var rule_v1_0_133 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.133", "useUntilKubescapeVersion": "v1.0.134"}}}
var rule_v1_0_134 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.134"}}}
func TestIsRuleKubescapeVersionCompatible(t *testing.T) {
// local build- no build number
// should use only rules that don't have "until"
cautils.BuildNumber = ""
if isRuleKubescapeVersionCompatible(rule_v1_0_131) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
if isRuleKubescapeVersionCompatible(rule_v1_0_132) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
if isRuleKubescapeVersionCompatible(rule_v1_0_133) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
if !isRuleKubescapeVersionCompatible(rule_v1_0_134) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
// should only use rules that version is in range of use
cautils.BuildNumber = "v1.0.133"
if isRuleKubescapeVersionCompatible(rule_v1_0_131) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
if isRuleKubescapeVersionCompatible(rule_v1_0_132) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
if !isRuleKubescapeVersionCompatible(rule_v1_0_133) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
if isRuleKubescapeVersionCompatible(rule_v1_0_134) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
}
func TestRemoveData(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"demoservice-server"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"demoservice-server"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}}}`
obj, _ := workloadinterface.NewWorkload([]byte(w))
removeData(obj)
workload := workloadinterface.NewWorkloadObj(obj.GetObject())
c, _ := workload.GetContainers()
for i := range c {
for _, e := range c[i].Env {
assert.Equal(t, "XXXXXX", e.Value)
}
}
}

45
opaprocessor/utils.go Normal file
View File

@@ -0,0 +1,45 @@
package opaprocessor
import (
"github.com/armosec/kubescape/cautils"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
)
// ConvertFrameworksToPolicies convert list of frameworks to list of policies
func ConvertFrameworksToPolicies(frameworks []reporthandling.Framework, version string) *cautils.Policies {
policies := cautils.NewPolicies()
policies.Set(frameworks, version)
return policies
}
// ConvertFrameworksToSummaryDetails initialize the summary details for the report object
func ConvertFrameworksToSummaryDetails(summaryDetails *reportsummary.SummaryDetails, frameworks []reporthandling.Framework, policies *cautils.Policies) {
if summaryDetails.Controls == nil {
summaryDetails.Controls = make(map[string]reportsummary.ControlSummary)
}
for i := range frameworks {
controls := map[string]reportsummary.ControlSummary{}
for j := range frameworks[i].Controls {
id := frameworks[i].Controls[j].ControlID
if _, ok := policies.Controls[id]; ok {
c := reportsummary.ControlSummary{
Name: frameworks[i].Controls[j].Name,
ControlID: id,
ScoreFactor: frameworks[i].Controls[j].BaseScore,
Description: frameworks[i].Controls[j].Description,
Remediation: frameworks[i].Controls[j].Remediation,
}
controls[frameworks[i].Controls[j].ControlID] = c
summaryDetails.Controls[id] = c
}
}
if cautils.StringInSlice(policies.Frameworks, frameworks[i].Name) != cautils.ValueNotFound {
summaryDetails.Frameworks = append(summaryDetails.Frameworks, reportsummary.FrameworkSummary{
Name: frameworks[i].Name,
Controls: controls,
})
}
}
}

View File

@@ -0,0 +1,30 @@
package opaprocessor
import (
"testing"
"github.com/armosec/kubescape/mocks"
"github.com/stretchr/testify/assert"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
)
func TestConvertFrameworksToPolicies(t *testing.T) {
fw0 := mocks.MockFramework_0006_0013()
fw1 := mocks.MockFramework_0044()
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "")
assert.Equal(t, 2, len(policies.Frameworks))
assert.Equal(t, 3, len(policies.Controls))
}
func TestInitializeSummaryDetails(t *testing.T) {
fw0 := mocks.MockFramework_0006_0013()
fw1 := mocks.MockFramework_0044()
summaryDetails := reportsummary.SummaryDetails{}
frameworks := []reporthandling.Framework{*fw0, *fw1}
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "")
ConvertFrameworksToSummaryDetails(&summaryDetails, frameworks, policies)
assert.Equal(t, 2, len(summaryDetails.Frameworks))
assert.Equal(t, 3, len(summaryDetails.Controls))
}

View File

@@ -4,30 +4,23 @@ import (
"fmt"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/resourcehandler"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/k8s-interface/k8sinterface"
)
var supportedFrameworks = []reporthandling.PolicyIdentifier{
{Kind: "Framework", Name: "nsa"},
{Kind: "Framework", Name: "mitre"},
}
// PolicyHandler -
type PolicyHandler struct {
k8s *k8sinterface.KubernetesApi
resourceHandler resourcehandler.IResourceHandler
// we are listening on this chan in opaprocessor/processorhandler.go/ProcessRulesListenner func
processPolicy *chan *cautils.OPASessionObj
getters *cautils.Getters
}
// CreatePolicyHandler Create ws-handler obj
func NewPolicyHandler(processPolicy *chan *cautils.OPASessionObj, k8s *k8sinterface.KubernetesApi) *PolicyHandler {
func NewPolicyHandler(processPolicy *chan *cautils.OPASessionObj, resourceHandler resourcehandler.IResourceHandler) *PolicyHandler {
return &PolicyHandler{
k8s: k8s,
processPolicy: processPolicy,
resourceHandler: resourceHandler,
processPolicy: processPolicy,
}
}
@@ -38,60 +31,33 @@ func (policyHandler *PolicyHandler) HandleNotificationRequest(notification *repo
policyHandler.getters = &scanInfo.Getters
// get policies
frameworks, exceptions, err := policyHandler.getPolicies(notification)
if err != nil {
if err := policyHandler.getPolicies(notification, opaSessionObj); err != nil {
return err
}
if len(frameworks) == 0 {
return fmt.Errorf("empty list of frameworks")
}
opaSessionObj.Frameworks = frameworks
opaSessionObj.Exceptions = exceptions
k8sResources, err := policyHandler.getResources(notification, opaSessionObj, scanInfo)
err := policyHandler.getResources(notification, opaSessionObj, scanInfo)
if err != nil {
return err
}
if k8sResources == nil || len(*k8sResources) == 0 {
if opaSessionObj.K8SResources == nil || len(*opaSessionObj.K8SResources) == 0 {
return fmt.Errorf("empty list of resources")
}
opaSessionObj.K8SResources = k8sResources
// update channel
*policyHandler.processPolicy <- opaSessionObj
return nil
}
func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.PolicyNotification) ([]reporthandling.Framework, []armotypes.PostureExceptionPolicy, error) {
func (policyHandler *PolicyHandler) getResources(notification *reporthandling.PolicyNotification, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
cautils.ProgressTextDisplay("Downloading/Loading policy definitions")
frameworks, exceptions, err := policyHandler.GetPoliciesFromBackend(notification)
opaSessionObj.PostureReport.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
resourcesMap, allResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj.Frameworks, &notification.Designators)
if err != nil {
return frameworks, exceptions, err
return err
}
if len(frameworks) == 0 {
err := fmt.Errorf("could not download any policies, please check previous logs")
return frameworks, exceptions, err
}
//if notification.Rules
cautils.SuccessTextDisplay("Downloaded/Loaded policy")
opaSessionObj.K8SResources = resourcesMap
opaSessionObj.AllResources = allResources
return frameworks, exceptions, nil
}
func (policyHandler *PolicyHandler) getResources(notification *reporthandling.PolicyNotification, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) (*cautils.K8SResources, error) {
var k8sResources *cautils.K8SResources
var err error
if k8sinterface.ConnectedToCluster { // TODO - use interface
if opaSessionObj.PostureReport.ClusterAPIServerInfo, err = policyHandler.k8s.KubernetesClient.Discovery().ServerVersion(); err != nil {
cautils.ErrorDisplay(fmt.Sprintf("Failed to discover API server inforamtion: %v", err))
}
k8sResources, err = policyHandler.getK8sResources(opaSessionObj.Frameworks, &notification.Designators, scanInfo.ExcludedNamespaces)
} else {
k8sResources, err = policyHandler.loadResources(opaSessionObj.Frameworks, scanInfo)
}
return k8sResources, err
return nil
}

View File

@@ -4,88 +4,78 @@ import (
"fmt"
"strings"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/opa-utils/reporthandling"
)
func (policyHandler *PolicyHandler) GetPoliciesFromBackend(notification *reporthandling.PolicyNotification) ([]reporthandling.Framework, []armotypes.PostureExceptionPolicy, error) {
var errs error
func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.PolicyNotification, policiesAndResources *cautils.OPASessionObj) error {
cautils.ProgressTextDisplay("Downloading/Loading policy definitions")
frameworks, err := policyHandler.getScanPolicies(notification)
if err != nil {
return err
}
if len(frameworks) == 0 {
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(notification.Rules), ","))
}
policiesAndResources.Frameworks = frameworks
// get exceptions
exceptionPolicies, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)
if err == nil {
policiesAndResources.Exceptions = exceptionPolicies
}
// get account configuration
controlsInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.ClusterName)
if err == nil {
policiesAndResources.RegoInputData.PostureControlInputs = controlsInputs
}
cautils.SuccessTextDisplay("Downloaded/Loaded policy")
return nil
}
func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling.PolicyNotification) ([]reporthandling.Framework, error) {
frameworks := []reporthandling.Framework{}
exceptionPolicies := []armotypes.PostureExceptionPolicy{}
// Get - cacli opa get
for _, rule := range notification.Rules {
switch rule.Kind {
case reporthandling.KindFramework:
receivedFramework, recExceptionPolicies, err := policyHandler.getFrameworkPolicies(rule.Name)
switch getScanKind(notification) {
case reporthandling.KindFramework: // Download frameworks
for _, rule := range notification.Rules {
receivedFramework, err := policyHandler.getters.PolicyGetter.GetFramework(rule.Name)
if err != nil {
return frameworks, policyDownloadError(err)
}
if receivedFramework != nil {
frameworks = append(frameworks, *receivedFramework)
if recExceptionPolicies != nil {
exceptionPolicies = append(exceptionPolicies, recExceptionPolicies...)
}
} else if err != nil {
if strings.Contains(err.Error(), "unsupported protocol scheme") {
err = fmt.Errorf("failed to download from GitHub release, try running with `--use-default` flag")
}
return nil, nil, fmt.Errorf("kind: %v, name: %s, error: %s", rule.Kind, rule.Name, err.Error())
}
case reporthandling.KindControl:
receivedControls, recExceptionPolicies, err := policyHandler.getControl(rule.Name)
if receivedControls != nil {
f := reporthandling.Framework{
Controls: receivedControls,
}
frameworks = append(frameworks, f)
if recExceptionPolicies != nil {
exceptionPolicies = append(exceptionPolicies, recExceptionPolicies...)
}
} else if err != nil {
if strings.Contains(err.Error(), "unsupported protocol scheme") {
err = fmt.Errorf("failed to download from GitHub release, try running with `--use-default` flag")
}
return nil, nil, fmt.Errorf("error: %s", err.Error())
}
// TODO: add case for control from file
default:
err := fmt.Errorf("missing rule kind, expected: %s", reporthandling.KindFramework)
errs = fmt.Errorf("%s", err.Error())
}
case reporthandling.KindControl: // Download controls
f := reporthandling.Framework{}
var receivedControl *reporthandling.Control
var err error
for _, rule := range notification.Rules {
receivedControl, err = policyHandler.getters.PolicyGetter.GetControl(rule.Name)
if err != nil {
return frameworks, policyDownloadError(err)
}
if receivedControl != nil {
f.Controls = append(f.Controls, *receivedControl)
}
}
frameworks = append(frameworks, f)
// TODO: add case for control from file
default:
return frameworks, fmt.Errorf("unknown policy kind")
}
return frameworks, exceptionPolicies, errs
return frameworks, nil
}
func (policyHandler *PolicyHandler) getFrameworkPolicies(policyName string) (*reporthandling.Framework, []armotypes.PostureExceptionPolicy, error) {
receivedFramework, err := policyHandler.getters.PolicyGetter.GetFramework(policyName)
if err != nil {
return nil, nil, err
func policyIdentifierToSlice(rules []reporthandling.PolicyIdentifier) []string {
s := []string{}
for i := range rules {
s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Name))
}
receivedException, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.CustomerGUID, cautils.ClusterName)
if err != nil {
return receivedFramework, nil, err
}
return receivedFramework, receivedException, nil
}
// Get control by name
func (policyHandler *PolicyHandler) getControl(policyName string) ([]reporthandling.Control, []armotypes.PostureExceptionPolicy, error) {
controls := []reporthandling.Control{}
control, err := policyHandler.getters.PolicyGetter.GetControl(policyName)
if err != nil {
return nil, nil, err
}
if control == nil {
return nil, nil, fmt.Errorf("control not found")
}
controls = append(controls, *control)
exceptions, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.CustomerGUID, cautils.ClusterName)
if err != nil {
return controls, nil, err
}
return controls, exceptions, nil
return s
}

View File

@@ -0,0 +1,21 @@
package policyhandler
import (
"fmt"
"strings"
"github.com/armosec/opa-utils/reporthandling"
)
func getScanKind(notification *reporthandling.PolicyNotification) reporthandling.NotificationPolicyKind {
if len(notification.Rules) > 0 {
return notification.Rules[0].Kind
}
return "unknown"
}
func policyDownloadError(err error) error {
if strings.Contains(err.Error(), "unsupported protocol scheme") {
err = fmt.Errorf("failed to download from GitHub release, try running with `--use-default` flag")
}
return err
}

View File

@@ -1,107 +0,0 @@
package policyhandler
import (
"fmt"
"strings"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/armoapi-go/armotypes"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
k8slabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
)
const SelectAllResources = "*"
func (policyHandler *PolicyHandler) getK8sResources(frameworks []reporthandling.Framework, designator *armotypes.PortalDesignator, excludedNamespaces string) (*cautils.K8SResources, error) {
// get k8s resources
cautils.ProgressTextDisplay("Accessing Kubernetes objects")
// build resources map
k8sResourcesMap := setResourceMap(frameworks)
// get namespace and labels from designator (ignore cluster labels)
_, namespace, labels := armotypes.DigestPortalDesignator(designator)
// pull k8s recourses
if err := policyHandler.pullResources(k8sResourcesMap, namespace, labels, excludedNamespaces); err != nil {
return k8sResourcesMap, err
}
cautils.SuccessTextDisplay("Accessed successfully to Kubernetes objects, lets start!!!")
return k8sResourcesMap, nil
}
func (policyHandler *PolicyHandler) pullResources(k8sResources *cautils.K8SResources, namespace string, labels map[string]string, excludedNamespaces string) error {
var errs error
for groupResource := range *k8sResources {
apiGroup, apiVersion, resource := k8sinterface.StringToResourceGroup(groupResource)
gvr := schema.GroupVersionResource{Group: apiGroup, Version: apiVersion, Resource: resource}
result, err := policyHandler.pullSingleResource(&gvr, namespace, labels, excludedNamespaces)
if err != nil {
// handle error
if errs == nil {
errs = err
} else {
errs = fmt.Errorf("%s\n%s", errs, err.Error())
}
} else {
// store result as []map[string]interface{}
(*k8sResources)[groupResource] = k8sinterface.ConvertUnstructuredSliceToMap(k8sinterface.FilterOutOwneredResources(result))
}
}
return errs
}
func (policyHandler *PolicyHandler) pullSingleResource(resource *schema.GroupVersionResource, namespace string, labels map[string]string, excludedNamespaces string) ([]unstructured.Unstructured, error) {
// set labels
listOptions := metav1.ListOptions{}
if excludedNamespaces != "" {
setFieldSelector(&listOptions, resource, excludedNamespaces)
}
if len(labels) > 0 {
set := k8slabels.Set(labels)
listOptions.LabelSelector = set.AsSelector().String()
}
// set dynamic object
var clientResource dynamic.ResourceInterface
if namespace != "" && k8sinterface.IsNamespaceScope(resource.Group, resource.Resource) {
clientResource = policyHandler.k8s.DynamicClient.Resource(*resource).Namespace(namespace)
} else {
clientResource = policyHandler.k8s.DynamicClient.Resource(*resource)
}
// list resources
result, err := clientResource.List(policyHandler.k8s.Context, listOptions)
if err != nil {
return nil, fmt.Errorf("failed to get resource: %v, namespace: %s, labelSelector: %v, reason: %s", resource, namespace, listOptions.LabelSelector, err.Error())
}
return result.Items, nil
}
func setFieldSelector(listOptions *metav1.ListOptions, resource *schema.GroupVersionResource, excludedNamespaces string) {
fieldSelector := "metadata."
if resource.Resource == "namespaces" {
fieldSelector += "name"
} else if k8sinterface.IsNamespaceScope(resource.Group, resource.Resource) {
fieldSelector += "namespace"
} else {
return
}
excludedNamespacesSlice := strings.Split(excludedNamespaces, ",")
for _, excludedNamespace := range excludedNamespacesSlice {
listOptions.FieldSelector += fmt.Sprintf("%s!=%s,", fieldSelector, excludedNamespace)
}
}

View File

@@ -0,0 +1,69 @@
package resourcehandler
import (
"fmt"
"strings"
"github.com/armosec/k8s-interface/k8sinterface"
"k8s.io/apimachinery/pkg/runtime/schema"
)
type IFieldSelector interface {
GetNamespacesSelectors(*schema.GroupVersionResource) []string
}
type EmptySelector struct {
}
func (es *EmptySelector) GetNamespacesSelectors(resource *schema.GroupVersionResource) []string {
return []string{""} //
}
type ExcludeSelector struct {
namespace string
}
func NewExcludeSelector(ns string) *ExcludeSelector {
return &ExcludeSelector{namespace: ns}
}
type IncludeSelector struct {
namespace string
}
func NewIncludeSelector(ns string) *IncludeSelector {
return &IncludeSelector{namespace: ns}
}
func (es *ExcludeSelector) GetNamespacesSelectors(resource *schema.GroupVersionResource) []string {
fieldSelectors := ""
for _, n := range strings.Split(es.namespace, ",") {
if n != "" {
fieldSelectors += getNamespacesSelector(resource, n, "!=") + ","
}
}
return []string{fieldSelectors}
}
func (is *IncludeSelector) GetNamespacesSelectors(resource *schema.GroupVersionResource) []string {
fieldSelectors := []string{}
for _, n := range strings.Split(is.namespace, ",") {
if n != "" {
fieldSelectors = append(fieldSelectors, getNamespacesSelector(resource, n, "=="))
}
}
return fieldSelectors
}
func getNamespacesSelector(resource *schema.GroupVersionResource, ns, operator string) string {
fieldSelector := "metadata."
if resource.Resource == "namespaces" {
fieldSelector += "name"
} else if k8sinterface.IsResourceInNamespaceScope(resource.Resource) {
fieldSelector += "namespace"
} else {
return ""
}
return fmt.Sprintf("%s%s%s", fieldSelector, operator, ns)
}

View File

@@ -0,0 +1,43 @@
package resourcehandler
import (
"testing"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func TestGetNamespacesSelector(t *testing.T) {
k8sinterface.InitializeMapResourcesMock()
assert.Equal(t, "metadata.namespace==default", getNamespacesSelector(&schema.GroupVersionResource{Version: "v1", Resource: "pods"}, "default", "=="))
assert.Equal(t, "", getNamespacesSelector(&schema.GroupVersionResource{Version: "v1", Resource: "nodes"}, "default", "=="))
}
func TestExcludedNamespacesSelectors(t *testing.T) {
k8sinterface.InitializeMapResourcesMock()
es := NewExcludeSelector("default,ingress")
selectors := es.GetNamespacesSelectors(&schema.GroupVersionResource{Resource: "pods"})
assert.Equal(t, 1, len(selectors))
assert.Equal(t, "metadata.namespace!=default,metadata.namespace!=ingress,", selectors[0])
selectors2 := es.GetNamespacesSelectors(&schema.GroupVersionResource{Resource: "namespaces"})
assert.Equal(t, 1, len(selectors2))
assert.Equal(t, "metadata.name!=default,metadata.name!=ingress,", selectors2[0])
}
func TestIncludeNamespacesSelectors(t *testing.T) {
k8sinterface.InitializeMapResourcesMock()
is := NewIncludeSelector("default,ingress")
selectors := is.GetNamespacesSelectors(&schema.GroupVersionResource{Resource: "pods"})
assert.Equal(t, 2, len(selectors))
assert.Equal(t, "metadata.namespace==default", selectors[0])
assert.Equal(t, "metadata.namespace==ingress", selectors[1])
selectors2 := is.GetNamespacesSelectors(&schema.GroupVersionResource{Resource: "namespaces"})
assert.Equal(t, 2, len(selectors2))
assert.Equal(t, "metadata.name==default", selectors2[0])
assert.Equal(t, "metadata.name==ingress", selectors2[1])
}

View File

@@ -1,4 +1,4 @@
package policyhandler
package resourcehandler
import (
"bytes"
@@ -8,10 +8,13 @@ import (
"path/filepath"
"strings"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/k8s-interface/workloadinterface"
"k8s.io/apimachinery/pkg/version"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/opa-utils/objectsenvelopes"
"github.com/armosec/opa-utils/reporthandling"
"gopkg.in/yaml.v2"
@@ -29,50 +32,73 @@ const (
JSON_FILE_FORMAT FileFormat = "json"
)
func (policyHandler *PolicyHandler) loadResources(frameworks []reporthandling.Framework, scanInfo *cautils.ScanInfo) (*cautils.K8SResources, error) {
workloads := []k8sinterface.IWorkload{}
// FileResourceHandler handle resources from files and URLs
type FileResourceHandler struct {
inputPatterns []string
}
func NewFileResourceHandler(inputPatterns []string) *FileResourceHandler {
k8sinterface.InitializeMapResourcesMock() // initialize the resource map
return &FileResourceHandler{
inputPatterns: inputPatterns,
}
}
func (fileHandler *FileResourceHandler) GetResources(frameworks []reporthandling.Framework, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, error) {
// build resources map
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
k8sResources := setResourceMap(frameworks)
allResources := map[string]workloadinterface.IMetadata{}
workloads := []workloadinterface.IMetadata{}
// load resource from local file system
w, err := loadResourcesFromFiles(scanInfo.InputPatterns)
w, err := loadResourcesFromFiles(fileHandler.inputPatterns)
if err != nil {
return nil, err
return nil, allResources, err
}
if w != nil {
workloads = append(workloads, w...)
}
// load resources from url
w, err = loadResourcesFromUrl(scanInfo.InputPatterns)
w, err = loadResourcesFromUrl(fileHandler.inputPatterns)
if err != nil {
return nil, err
return nil, allResources, err
}
if w != nil {
workloads = append(workloads, w...)
}
if len(workloads) == 0 {
return nil, fmt.Errorf("empty list of workloads - no workloads found")
return nil, allResources, fmt.Errorf("empty list of workloads - no workloads found")
}
// map all resources: map["/group/version/kind"][]<k8s workloads>
allResources := mapResources(workloads)
// build resources map
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads>
k8sResources := setResourceMap(frameworks)
mappedResources := mapResources(workloads)
// save only relevant resources
for i := range allResources {
for i := range mappedResources {
if _, ok := (*k8sResources)[i]; ok {
(*k8sResources)[i] = allResources[i]
ids := []string{}
for j := range mappedResources[i] {
ids = append(ids, mappedResources[i][j].GetID())
allResources[mappedResources[i][j].GetID()] = mappedResources[i][j]
}
(*k8sResources)[i] = ids
}
}
return k8sResources, nil
return k8sResources, allResources, nil
}
func loadResourcesFromFiles(inputPatterns []string) ([]k8sinterface.IWorkload, error) {
func (fileHandler *FileResourceHandler) GetClusterAPIServerInfo() *version.Info {
return nil
}
func loadResourcesFromFiles(inputPatterns []string) ([]workloadinterface.IMetadata, error) {
files, errs := listFiles(inputPatterns)
if len(errs) > 0 {
cautils.ErrorDisplay(fmt.Sprintf("%v", errs)) // TODO - print error
@@ -89,32 +115,36 @@ func loadResourcesFromFiles(inputPatterns []string) ([]k8sinterface.IWorkload, e
}
// build resources map
func mapResources(workloads []k8sinterface.IWorkload) map[string][]map[string]interface{} {
allResources := map[string][]map[string]interface{}{}
func mapResources(workloads []workloadinterface.IMetadata) map[string][]workloadinterface.IMetadata {
allResources := map[string][]workloadinterface.IMetadata{}
for i := range workloads {
groupVersionResource, err := k8sinterface.GetGroupVersionResource(workloads[i].GetKind())
if err != nil {
// TODO - print warning
continue
}
if groupVersionResource.Group != workloads[i].GetGroup() || groupVersionResource.Version != workloads[i].GetVersion() {
// TODO - print warning
continue
if k8sinterface.IsTypeWorkload(workloads[i].GetObject()) {
w := workloadinterface.NewWorkloadObj(workloads[i].GetObject())
if groupVersionResource.Group != w.GetGroup() || groupVersionResource.Version != w.GetVersion() {
// TODO - print warning
continue
}
}
resourceTriplets := k8sinterface.JoinResourceTriplets(groupVersionResource.Group, groupVersionResource.Version, groupVersionResource.Resource)
if r, ok := allResources[resourceTriplets]; ok {
r = append(r, workloads[i].GetWorkload())
allResources[resourceTriplets] = r
allResources[resourceTriplets] = append(r, workloads[i])
} else {
allResources[resourceTriplets] = []map[string]interface{}{workloads[i].GetWorkload()}
allResources[resourceTriplets] = []workloadinterface.IMetadata{workloads[i]}
}
}
return allResources
}
func loadFiles(filePaths []string) ([]k8sinterface.IWorkload, []error) {
workloads := []k8sinterface.IWorkload{}
func loadFiles(filePaths []string) ([]workloadinterface.IMetadata, []error) {
workloads := []workloadinterface.IMetadata{}
errs := []error{}
for i := range filePaths {
f, err := loadFile(filePaths[i])
@@ -134,7 +164,7 @@ func loadFiles(filePaths []string) ([]k8sinterface.IWorkload, []error) {
func loadFile(filePath string) ([]byte, error) {
return os.ReadFile(filePath)
}
func readFile(fileContent []byte, fileFromat FileFormat) ([]k8sinterface.IWorkload, []error) {
func readFile(fileContent []byte, fileFromat FileFormat) ([]workloadinterface.IMetadata, []error) {
switch fileFromat {
case YAML_FILE_FORMAT:
@@ -168,12 +198,12 @@ func listFiles(patterns []string) ([]string, []error) {
return files, errs
}
func readYamlFile(yamlFile []byte) ([]k8sinterface.IWorkload, []error) {
func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, []error) {
errs := []error{}
r := bytes.NewReader(yamlFile)
dec := yaml.NewDecoder(r)
yamlObjs := []k8sinterface.IWorkload{}
yamlObjs := []workloadinterface.IMetadata{}
var t interface{}
for dec.Decode(&t) == nil {
@@ -182,7 +212,13 @@ func readYamlFile(yamlFile []byte) ([]k8sinterface.IWorkload, []error) {
continue
}
if obj, ok := j.(map[string]interface{}); ok {
yamlObjs = append(yamlObjs, workloadinterface.NewWorkloadObj(obj))
if o := objectsenvelopes.NewObject(obj); o != nil {
if o.GetKind() == "List" {
yamlObjs = append(yamlObjs, handleListObject(o)...)
} else {
yamlObjs = append(yamlObjs, o)
}
}
} else {
errs = append(errs, fmt.Errorf("failed to convert yaml file to map[string]interface, file content: %v", j))
}
@@ -191,8 +227,8 @@ func readYamlFile(yamlFile []byte) ([]k8sinterface.IWorkload, []error) {
return yamlObjs, errs
}
func readJsonFile(jsonFile []byte) ([]k8sinterface.IWorkload, []error) {
workloads := []k8sinterface.IWorkload{}
func readJsonFile(jsonFile []byte) ([]workloadinterface.IMetadata, []error) {
workloads := []workloadinterface.IMetadata{}
var jsonObj interface{}
if err := json.Unmarshal(jsonFile, &jsonObj); err != nil {
return workloads, []error{err}
@@ -202,11 +238,13 @@ func readJsonFile(jsonFile []byte) ([]k8sinterface.IWorkload, []error) {
return workloads, nil
}
func convertJsonToWorkload(jsonObj interface{}, workloads *[]k8sinterface.IWorkload) {
func convertJsonToWorkload(jsonObj interface{}, workloads *[]workloadinterface.IMetadata) {
switch x := jsonObj.(type) {
case map[string]interface{}:
(*workloads) = append(*workloads, workloadinterface.NewWorkloadObj(x))
if o := objectsenvelopes.NewObject(x); o != nil {
(*workloads) = append(*workloads, o)
}
case []interface{}:
for i := range x {
convertJsonToWorkload(x[i], workloads)
@@ -269,3 +307,20 @@ func getFileFormat(filePath string) FileFormat {
return FileFormat(filePath)
}
}
// handleListObject handle a List manifest
func handleListObject(obj workloadinterface.IMetadata) []workloadinterface.IMetadata {
yamlObjs := []workloadinterface.IMetadata{}
if i, ok := workloadinterface.InspectMap(obj.GetObject(), "items"); ok && i != nil {
if items, ok := i.([]interface{}); ok && items != nil {
for item := range items {
if m, ok := items[item].(map[string]interface{}); ok && m != nil {
if o := objectsenvelopes.NewObject(m); o != nil {
yamlObjs = append(yamlObjs, o)
}
}
}
}
}
return yamlObjs
}

View File

@@ -1,4 +1,4 @@
package policyhandler
package resourcehandler
import (
"fmt"
@@ -41,7 +41,7 @@ func TestLoadFile(t *testing.T) {
t.Errorf("%v", err)
}
}
func TestLoadResources(t *testing.T) {
func TestMapResources(t *testing.T) {
// policyHandler := &PolicyHandler{}
// k8sResources, err := policyHandler.loadResources(opaSessionObj.Frameworks, scanInfo)
// files, _ := listFiles([]string{onlineBoutiquePath()})

View File

@@ -0,0 +1,217 @@
package resourcehandler
import (
"context"
"fmt"
"os"
"strings"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/hostsensorutils"
"github.com/armosec/opa-utils/objectsenvelopes"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/k8s-interface/cloudsupport"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/armoapi-go/armotypes"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
k8slabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/dynamic"
)
type K8sResourceHandler struct {
k8s *k8sinterface.KubernetesApi
hostSensorHandler hostsensorutils.IHostSensor
fieldSelector IFieldSelector
rbacObjectsAPI *cautils.RBACObjects
}
func NewK8sResourceHandler(k8s *k8sinterface.KubernetesApi, fieldSelector IFieldSelector, hostSensorHandler hostsensorutils.IHostSensor, rbacObjects *cautils.RBACObjects) *K8sResourceHandler {
return &K8sResourceHandler{
k8s: k8s,
fieldSelector: fieldSelector,
hostSensorHandler: hostSensorHandler,
rbacObjectsAPI: rbacObjects,
}
}
func (k8sHandler *K8sResourceHandler) GetResources(frameworks []reporthandling.Framework, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, error) {
allResources := map[string]workloadinterface.IMetadata{}
// get k8s resources
cautils.ProgressTextDisplay("Accessing Kubernetes objects")
cautils.StartSpinner()
// build resources map
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
k8sResourcesMap := setResourceMap(frameworks)
// get namespace and labels from designator (ignore cluster labels)
_, namespace, labels := armotypes.DigestPortalDesignator(designator)
// pull k8s recourses
if err := k8sHandler.pullResources(k8sResourcesMap, allResources, namespace, labels); err != nil {
return k8sResourcesMap, allResources, err
}
if err := k8sHandler.collectHostResources(allResources, k8sResourcesMap); err != nil {
cautils.WarningDisplay(os.Stderr, "Warning: failed to collect host sensor resources\n")
}
if err := k8sHandler.collectRbacResources(allResources); err != nil {
cautils.WarningDisplay(os.Stderr, "Warning: failed to collect rbac resources\n")
}
if err := getCloudProviderDescription(allResources, k8sResourcesMap); err != nil {
cautils.WarningDisplay(os.Stderr, fmt.Sprintf("Warning: %v\n", err.Error()))
}
cautils.StopSpinner()
cautils.SuccessTextDisplay("Accessed successfully to Kubernetes objects")
return k8sResourcesMap, allResources, nil
}
func (k8sHandler *K8sResourceHandler) GetClusterAPIServerInfo() *version.Info {
clusterAPIServerInfo, err := k8sHandler.k8s.DiscoveryClient.ServerVersion()
if err != nil {
cautils.ErrorDisplay(fmt.Sprintf("Failed to discover API server information: %v", err))
return nil
}
return clusterAPIServerInfo
}
func (k8sHandler *K8sResourceHandler) pullResources(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, namespace string, labels map[string]string) error {
var errs error
for groupResource := range *k8sResources {
apiGroup, apiVersion, resource := k8sinterface.StringToResourceGroup(groupResource)
gvr := schema.GroupVersionResource{Group: apiGroup, Version: apiVersion, Resource: resource}
result, err := k8sHandler.pullSingleResource(&gvr, namespace, labels)
if err != nil {
if !strings.Contains(err.Error(), "the server could not find the requested resource") {
// handle error
if errs == nil {
errs = err
} else {
errs = fmt.Errorf("%s\n%s", errs, err.Error())
}
}
continue
}
// store result as []map[string]interface{}
metaObjs := ConvertMapListToMeta(k8sinterface.ConvertUnstructuredSliceToMap(result))
for i := range metaObjs {
allResources[metaObjs[i].GetID()] = metaObjs[i]
}
(*k8sResources)[groupResource] = workloadinterface.ListMetaIDs(metaObjs)
}
return errs
}
func (k8sHandler *K8sResourceHandler) pullSingleResource(resource *schema.GroupVersionResource, namespace string, labels map[string]string) ([]unstructured.Unstructured, error) {
resourceList := []unstructured.Unstructured{}
// set labels
listOptions := metav1.ListOptions{}
fieldSelectors := k8sHandler.fieldSelector.GetNamespacesSelectors(resource)
for i := range fieldSelectors {
listOptions.FieldSelector = fieldSelectors[i]
if len(labels) > 0 {
set := k8slabels.Set(labels)
listOptions.LabelSelector = set.AsSelector().String()
}
// set dynamic object
var clientResource dynamic.ResourceInterface
if namespace != "" && k8sinterface.IsNamespaceScope(resource) {
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource).Namespace(namespace)
} else {
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource)
}
// list resources
result, err := clientResource.List(context.Background(), listOptions)
if err != nil || result == nil {
return nil, fmt.Errorf("failed to get resource: %v, namespace: %s, labelSelector: %v, reason: %v", resource, namespace, listOptions.LabelSelector, err)
}
resourceList = append(resourceList, result.Items...)
}
return resourceList, nil
}
func ConvertMapListToMeta(resourceMap []map[string]interface{}) []workloadinterface.IMetadata {
workloads := []workloadinterface.IMetadata{}
for i := range resourceMap {
if w := objectsenvelopes.NewObject(resourceMap[i]); w != nil {
workloads = append(workloads, w)
}
}
return workloads
}
func (k8sHandler *K8sResourceHandler) collectHostResources(allResources map[string]workloadinterface.IMetadata, resourcesMap *cautils.K8SResources) error {
hostResources, err := k8sHandler.hostSensorHandler.CollectResources()
if err != nil {
return err
}
for rscIdx := range hostResources {
group, version := getGroupNVersion(hostResources[rscIdx].GetApiVersion())
groupResource := k8sinterface.JoinResourceTriplets(group, version, hostResources[rscIdx].GetKind())
allResources[hostResources[rscIdx].GetID()] = &hostResources[rscIdx]
grpResourceList, ok := (*resourcesMap)[groupResource]
if !ok {
grpResourceList = make([]string, 0)
}
(*resourcesMap)[groupResource] = append(grpResourceList, hostResources[rscIdx].GetID())
}
return nil
}
func (k8sHandler *K8sResourceHandler) collectRbacResources(allResources map[string]workloadinterface.IMetadata) error {
if k8sHandler.rbacObjectsAPI == nil {
return nil
}
allRbacResources, err := k8sHandler.rbacObjectsAPI.ListAllResources()
if err != nil {
return err
}
for k, v := range allRbacResources {
allResources[k] = v
}
return nil
}
func getCloudProviderDescription(allResources map[string]workloadinterface.IMetadata, k8sResourcesMap *cautils.K8SResources) error {
if cloudsupport.IsRunningInCloudProvider() {
wl, err := cloudsupport.GetDescriptiveInfoFromCloudProvider()
if err != nil {
cluster := k8sinterface.GetCurrentContext().Cluster
provider := cloudsupport.GetCloudProvider(cluster)
// Return error with useful info on how to configure credentials for getting cloud provider info
switch provider {
case "gke":
return fmt.Errorf("could not get descriptive information about gke cluster: %s using sdk client. See https://developers.google.com/accounts/docs/application-default-credentials for more information", cluster)
case "eks":
return fmt.Errorf("could not get descriptive information about eks cluster: %s using sdk client. Check out how to configure credentials in https://docs.aws.amazon.com/sdk-for-go/api/", cluster)
case "aks":
return fmt.Errorf("could not get descriptive information about aks cluster: %s. %v", cluster, err.Error())
}
return err
}
allResources[wl.GetID()] = wl
(*k8sResourcesMap)[fmt.Sprintf("%s/%s", wl.GetApiVersion(), wl.GetKind())] = []string{wl.GetID()}
}
return nil
}

View File

@@ -1,6 +1,8 @@
package policyhandler
package resourcehandler
import (
"strings"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/opa-utils/reporthandling"
@@ -66,3 +68,15 @@ func insertK8sResources(k8sResources map[string]map[string]map[string]interface{
}
}
}
func getGroupNVersion(apiVersion string) (string, string) {
gv := strings.Split(apiVersion, "/")
group, version := "", ""
if len(gv) >= 1 {
group = gv[0]
}
if len(gv) >= 2 {
version = gv[1]
}
return group, version
}

View File

@@ -1,4 +1,4 @@
package policyhandler
package resourcehandler
import (
"github.com/armosec/k8s-interface/k8sinterface"
@@ -11,6 +11,7 @@ func TestGetK8sResources(t *testing.T) {
// getK8sResources
}
func TestSetResourceMap(t *testing.T) {
k8sinterface.InitializeMapResourcesMock()
framework := reporthandling.MockFrameworkA()
k8sResources := setResourceMap([]reporthandling.Framework{*framework})
resources := k8sinterface.ResourceGroupToString("*", "v1", "Pod")

Some files were not shown because too many files have changed in this diff Show More