Compare commits

...

834 Commits

Author SHA1 Message Date
dwertent
e685fe2b7d update download readme 2022-02-10 11:42:42 +02:00
dwertent
4cda32771b fixed url scanning 2022-02-10 09:24:46 +02:00
dwertent
f896b65a87 fixed eks cluster name 2022-02-10 08:55:28 +02:00
dwertent
2380317953 fixed Set image version 2022-02-09 14:16:32 +02:00
dwertent
659d3533ee disable cosign 2022-02-09 13:29:46 +02:00
Rotem Refael
37c242576e Merge pull request #373 from Bezbran/dev
[housekeeper] add readiness checks
2022-02-09 09:45:54 +02:00
Bezalel Brandwine
e9a22a23e7 [housekeeper] add readiness checks 2022-02-09 09:35:39 +02:00
David Wertenteil
ae3816c1e0 [host sensor] fix name of log field 2022-02-09 09:00:57 +02:00
Bezalel Brandwine
e4661a5ae2 [host sensor] fix name of log field 2022-02-09 08:50:47 +02:00
Bezbran
539d1889fe Merge pull request #15 from armosec/dev
Dev
2022-02-09 08:23:54 +02:00
David Wertenteil
2dd5f05f1a Replace call to fmt.print to logger call 2022-02-08 22:10:07 +02:00
dwertent
60c9b38de4 replace print by logger 2022-02-08 22:08:41 +02:00
dwertent
8b66b068ea remove publish image file 2022-02-08 15:55:53 +02:00
dwertent
1507bc3f04 update helm readme 2022-02-08 15:54:05 +02:00
David Wertenteil
1e0baba919 fixed dev version 2022-02-08 15:15:29 +02:00
dwertent
4c9f47b1e1 fixed dev version 2022-02-08 15:14:10 +02:00
David Wertenteil
b66446b7eb Update auth urls 2022-02-08 14:47:28 +02:00
dwertent
f1726e21ae ignore casign 2022-02-08 14:45:23 +02:00
dwertent
8d48f8ad86 adding logger 2022-02-08 14:23:54 +02:00
dwertent
8b280f272e update auth url 2022-02-08 14:21:39 +02:00
David Wertenteil
b92d4256ad Fixed build.yaml and do not push docker in other repos 2022-02-08 14:13:10 +02:00
dwertent
914a04a386 fixed builld dev 2022-02-08 14:02:53 +02:00
dwertent
12f3dd7db6 update build 2022-02-08 13:45:31 +02:00
dwertent
427032ab94 testing workflow 2022-02-08 11:27:41 +02:00
David Wertenteil
b55aaaa34d chore: sign container image with cosign 2022-02-08 11:00:45 +02:00
David Wertenteil
7cde877452 Minor fixes 2022-02-07 17:41:25 +02:00
dwertent
e399012f73 fixed glob files 2022-02-07 17:36:47 +02:00
dwertent
fe1d2646bd fixed glob files 2022-02-07 16:08:57 +02:00
dwertent
ea98bfbe9a Merge remote-tracking branch 'upstream/dev' 2022-02-07 15:38:11 +02:00
dwertent
7bc3277634 support scanning all with yaml files 2022-02-07 15:37:57 +02:00
dwertent
22e94c5a29 typo 2022-02-07 15:37:30 +02:00
dwertent
aa8cf0ff15 fixed python build 2022-02-07 11:18:23 +02:00
dwertent
a22f97bd13 fixed python build 2022-02-07 10:56:39 +02:00
David Wertenteil
3fd2d1629d Adding logger and submitting exceptions 2022-02-06 19:05:00 +02:00
dwertent
cd04204a5c fixed url 2022-02-06 18:19:35 +02:00
dwertent
eee55376e7 Merge remote-tracking branch 'upstream/dev' 2022-02-06 17:14:28 +02:00
dwertent
d3c0972d70 submit exceptions 2022-02-06 17:12:59 +02:00
David Wertenteil
e3f5fa8e35 update version of rbac pkg - show highest parent in SAID2WLID map 2022-02-06 16:36:20 +02:00
dwertent
03c540b68c support armosec login 2022-02-06 16:35:39 +02:00
yiscah
7f2f53b06c update version of rbac pkg - show highest parent in SAID2WLID map 2022-02-06 13:50:48 +02:00
dwertent
8064826b53 init logger 2022-02-06 12:01:26 +02:00
dwertent
8bdff31693 replace print with logger 2022-02-06 11:13:33 +02:00
Batuhan Apaydın
6f05b4137b chore: sign container image with cosign
Signed-off-by: Batuhan Apaydın <batuhan.apaydin@trendyol.com>
2022-02-04 12:16:07 +03:00
dwertent
4207f3d6d1 using logger 2022-02-03 18:12:35 +02:00
dwertent
98dbda696d update exception support 2022-02-03 16:44:57 +02:00
dwertent
7a34c94542 adding logger 2022-02-03 15:42:37 +02:00
David Wertenteil
789b93776d support fixPaths 2022-02-03 15:28:24 +02:00
yiscah
c93ee64630 allow case of both failpath and fixpath exist 2022-02-03 13:13:18 +02:00
yiscah
f54c3ad85c Merge branch 'dev' of ssh://github.com/armosec/kubescape into dev 2022-02-03 13:03:27 +02:00
David Wertenteil
a9fcd00723 Support image-vulnerabilities related controls 2022-02-03 12:48:41 +02:00
yiscah
fb7cc4284e support fixPaths 2022-02-03 12:29:28 +02:00
dwertent
e7a0755c25 report ClusterAPIServerInfo 2022-02-03 10:32:00 +02:00
dwertent
d1e02dc298 update config command 2022-02-02 14:24:53 +02:00
dwertent
69814039ca using accountID instead of customerGUID 2022-02-02 08:12:41 +02:00
dwertent
2ffb7fcdb4 support view/set/delete commands 2022-02-01 17:16:27 +02:00
dwertent
d1695b7f10 support vuln scan 2022-01-31 18:41:45 +02:00
raziel
5f0f9a9eae correct nil derefrance 2022-01-31 17:13:48 +02:00
dwertent
aa2dedb76f Merge branch 'master' into vuln-support 2022-01-30 14:03:46 +02:00
David Wertenteil
407e35c9d8 Merge pull request #356 from dwertent/master
load cloud provider from env
2022-01-30 11:09:45 +02:00
dwertent
bb7f38ce31 load cloud provider from env 2022-01-30 11:09:02 +02:00
David Wertenteil
1ffb2d360a Merge pull request #351 from wim-de-groot/master
Fixes(#333) Added length check to prevent panic
2022-01-30 08:43:36 +02:00
David Wertenteil
5cbadc02c5 Minor fixes 2022-01-30 08:17:43 +02:00
dwertent
9aa8d9edf0 Merge remote-tracking branch 'upstream/dev' 2022-01-30 08:14:31 +02:00
dwertent
db84380844 print summary and scoe 2022-01-30 08:14:10 +02:00
dwertent
bbb0d2154f fixed version print 2022-01-30 08:06:50 +02:00
Wim de Groot
2a937ac7c0 Merge branch 'dev' into master 2022-01-27 09:36:52 +01:00
Wim de Groot
a96652094e Added length check to prevent panic (#333)
Signed-off-by: Wim de Groot <34519486+degrootwim@users.noreply.github.com>
2022-01-26 14:57:19 +01:00
David Wertenteil
dbf3de57f6 Prevent index out of range [0] when control report does not have rules 2022-01-26 10:19:04 +02:00
Rotem Refael
c00bc0ebbb Merge pull request #348 from armosec/dev
Fixing null pointer crash in GKE
2022-01-24 17:15:30 +02:00
Ben Hirschberg
b08e5a2c32 Merge pull request #347 from slashben/dev
Fixing null pointer exception in case of in-cluster installation
2022-01-24 16:47:54 +02:00
Ben Hirschberg
09db5d94e1 Fixing null pointer exception in case of in-cluster installation 2022-01-24 16:47:12 +02:00
Rotem Refael
033e8f6b44 Merge pull request #346 from armosec/dev
returning rbac submit code + fix crash
2022-01-24 12:40:33 +02:00
Ben Hirschberg
bef40f0e6c Merge pull request #344 from slashben/dev
returning RBAC submit table
2022-01-24 09:13:28 +02:00
Ben Hirschberg
aa2f69125f returning rbac submit code 2022-01-24 09:12:13 +02:00
Ben Hirschberg
d30f3960a7 Merge pull request #341 from slashben/dev
fixing the crash around submit rbac
2022-01-23 21:28:10 +02:00
Ben Hirschberg
5f43da94ba fixing the crash around submit rbac 2022-01-23 20:17:03 +02:00
Rotem Refael
2aa8a0c935 Merge pull request #340 from Anthirian/dev
Fix typo in namespace
2022-01-23 13:43:42 +02:00
Ben Hirschberg
c02f8c6cb5 Merge pull request #338 from vfarcic/dev
Video
2022-01-23 08:18:44 +02:00
Geert Smelt
aa0be474e2 Fix typo in namespace 2022-01-21 15:40:00 +01:00
Viktor Farcic
c0161c9b33 Video 2022-01-20 19:54:57 +01:00
Quirino Gervacio
71404f2205 Prevent index out of range [0] when control report do not have a rule report 2022-01-20 04:51:04 +08:00
Rotem Refael
514da1e2db Merge pull request #332 from armosec/dev
Emergency fix: checking kube context before use
2022-01-19 16:53:14 +02:00
Ben Hirschberg
75dfceb5da Merge pull request #331 from slashben/dev
checking if local context exists before using
2022-01-19 16:38:22 +02:00
Ben Hirschberg
1ae76b4377 checking if context exists 2022-01-19 16:37:32 +02:00
Rotem Refael
b6f90cba8e Merge pull request #330 from armosec/dev
New kubescape major version
2022-01-19 15:21:52 +02:00
rrefael
62af441a1d Increase release & tag to major 2022-01-19 09:19:16 +02:00
Rotem Refael
228b8957d3 Merge pull request #317 from armosec/report-refactor
- Support report v2 (pagination)
- Scan control only once
- Support download of exceptions,control-configuration,artifacts -> kubescape download
- Support listing frameworks and controls -> kubescape list
- Adding scan framework all for scanning yaml files
- Scan other kubernetes contexts by adding the --kube-context flag
2022-01-18 21:02:56 +02:00
YiscahLevySilas1
b4ce999ab3 Merge pull request #324 from YiscahLevySilas1/dev
added download + load artifacts to readme
2022-01-18 18:02:34 +02:00
yiscah
cc06a414fe added examples of download + load artifacts 2022-01-18 17:50:28 +02:00
YiscahLevySilas1
d3c37c4e5f Merge branch 'armosec:dev' into dev 2022-01-18 17:30:36 +02:00
YiscahLevySilas1
3b448b62b1 Merge pull request #323 from YiscahLevySilas1/dev
support load artifacts from local path
2022-01-18 17:28:08 +02:00
YiscahLevySilas1
6a3f5658b1 Merge branch 'report-refactor' into dev 2022-01-18 17:19:03 +02:00
yiscah
f65e791522 fix parsing local path to artifacts 2022-01-18 17:01:53 +02:00
yiscah
d91304f9ad setUseArtifactsFrom only when flag is set 2022-01-18 16:32:01 +02:00
yiscah
61ce00108e fixed download to local path 2022-01-18 15:29:35 +02:00
Ben Hirschberg
a4eb773eee Merge pull request #322 from slashben/dev
Adding examples for cloud integrations
2022-01-18 15:05:47 +02:00
Ben Hirschberg
cfc69f5a0f adding access to container registry 2022-01-18 14:41:07 +02:00
Ben Hirschberg
a44823c3ed example cloud integration scripts 2022-01-17 13:55:43 +02:00
Ben Hirschberg
8a166e5ba5 typo fix 2022-01-17 11:54:02 +02:00
rrefael
9a7aeff870 update overview text- README 2022-01-16 15:40:34 +02:00
Lior Alafi
cb3bdb9df2 supporting both structures for score calculation 2022-01-14 17:54:39 +02:00
rrefael
0be8d57eaa add --enable-host-scan flag 2022-01-13 15:44:21 +02:00
dwertent
79b9cbf1d6 Merge branch 'report-refactor' 2022-01-13 15:16:15 +02:00
dwertent
500df8737e send small clusters once 2022-01-13 15:03:23 +02:00
David Wertenteil
b8acbd1bee fixed flag and cloud env support 2022-01-13 14:19:26 +02:00
dwertent
0bde8a65ba fixed context flag 2022-01-13 14:16:36 +02:00
dwertent
d2884b8936 update failure message 2022-01-13 13:07:29 +02:00
dwertent
e692359b47 Merge remote-tracking branch 'dwertent/master' into report-refactor 2022-01-13 12:52:34 +02:00
dwertent
473746eab0 merged with Daniels branch 2022-01-13 12:50:50 +02:00
dwertent
050878cbd6 Merge remote-tracking branch 'dwertent/master' into report-refactor 2022-01-13 11:48:13 +02:00
dwertent
e100f18bb0 fixed tests 2022-01-13 11:44:46 +02:00
dwertent
05c82fc166 Merge remote-tracking branch 'dwertent/master' into dev 2022-01-13 11:36:15 +02:00
dwertent
839c3e261f swap result and resource 2022-01-13 11:04:09 +02:00
dwertent
95b579d191 fixed resource list 2022-01-12 14:03:12 +02:00
Bezbran
8656715753 Merge pull request #314 from alegrey91/master
move kube-host-sensor manifest to indipendent yaml file with embed capability
2022-01-12 09:39:26 +02:00
dwertent
05b6394c5c send report to v2 2022-01-11 14:21:16 +02:00
dwertent
72860deb0f update struct, adding mock struct 2022-01-11 10:43:53 +02:00
Rotem Refael
d3bdbf31ac Merge pull request #315 from armosec/dev
Typo fixes
Update Kubescape logo
Adding contributors

Issues:
- Closes Spelling error Namescape should be Namespace #296
- Closes Add contributors to Readme #307
2022-01-11 09:58:36 +02:00
David Wertenteil
639c694c13 Merge pull request #5 from slashben/master
Armo interface
2022-01-11 09:35:12 +02:00
Ben Hirschberg
f34f6dc51e GetImageVulnerabilty is working 2022-01-11 00:42:26 +02:00
Ben Hirschberg
b93e7b9abf take only the first result 2022-01-10 22:48:00 +02:00
yiscah
995f615b10 support load artifacts from local path 2022-01-10 20:00:42 +02:00
Ben Hirschberg
39b95eff4f got full auth cycle 2022-01-10 19:50:07 +02:00
alegrey91
392625b774 style(host-sensor): move kube-host-sensor manifest to indipendent yaml file 2022-01-10 18:39:06 +01:00
dwertent
306b9d28ca ignore skipped resources 2022-01-10 15:21:31 +02:00
dwertent
6fe87bba20 Merge branch 'dev' of github.com:dwertent/kubescape into dev 2022-01-10 13:46:19 +02:00
David Wertenteil
c0d534072d print downloaded artifacts 2022-01-10 13:45:43 +02:00
dwertent
009221aa98 report 2022-01-10 10:21:42 +02:00
yiscah
46e5aff5f9 each artifact handle print seprately 2022-01-10 09:07:06 +02:00
YiscahLevySilas1
59498361e7 Merge pull request #3 from dwertent/dev
Dev
2022-01-10 08:35:40 +02:00
dwertent
c652da130d adding download examples 2022-01-10 08:13:44 +02:00
Ben Hirschberg
83246a1802 generalizing url settings 2022-01-10 08:10:40 +02:00
Ben Hirschberg
f255df0198 login first part 2022-01-10 07:39:54 +02:00
David Wertenteil
9e524ffc34 support download artifacts 2022-01-09 17:37:26 +02:00
David Wertenteil
004cc0c469 Merge branch 'dev' into dev 2022-01-09 17:37:12 +02:00
Ben Hirschberg
52b78a7e73 Merge branch 'armosec:master' into master 2022-01-09 16:47:41 +02:00
dwertent
bd089d76af adding cluster flag - support submiting yaml file 2022-01-09 16:13:15 +02:00
yiscah
d5025b54bf handle error for each artifact download seprately 2022-01-09 15:12:03 +02:00
dwertent
740497047d cli print support v2 2022-01-09 10:33:47 +02:00
Ben Hirschberg
3f6cbd57b2 Merge pull request #311 from saiyam1814/patch-1
Adding Kubescape katacoda playground
2022-01-08 18:16:50 +02:00
Saiyam Pathak
2c9524ed45 Adding Kubescape katacoda playground 2022-01-08 19:21:33 +05:30
Ben Hirschberg
384922680a Merge pull request #309 from saiyam1814/master
Adding Contributors
2022-01-07 15:41:53 +02:00
David Wertenteil
d2e9f8f4f8 Fixes #296 Spelling error Namescape should be Namespace 2022-01-07 14:25:24 +02:00
Saiyam Pathak
b4f10f854e Adding contributors 2022-01-07 11:27:32 +05:30
Clint Modien
8ce64d2a7f Fixes #296 Spelling error Namescape should be Namespace 2022-01-06 09:45:11 -08:00
yiscah
d917e21364 support download artifacts 2022-01-06 17:44:50 +02:00
YiscahLevySilas1
32cedaf565 Merge branch 'armosec:dev' into dev 2022-01-06 17:44:23 +02:00
dwertent
4c2a5e9a11 suooirt scan all 2022-01-06 16:21:41 +02:00
dwertent
a41d2a46ff cli support list 2022-01-06 15:28:01 +02:00
dwertent
4794cbfb36 update opa version 2022-01-06 14:31:46 +02:00
rrefael
d021217cf7 add new logo 2022-01-06 14:19:13 +02:00
dwertent
4573d83831 fixed counters and skipped ctr 2022-01-06 13:05:51 +02:00
Ben Hirschberg
2bb612ca3f Merge pull request #303 from armosec/slashben-roadmap-branch
Create roadmap.md
2022-01-06 11:12:48 +02:00
Ben Hirschberg
35534112c6 Create MAINTAINERS.md 2022-01-06 11:10:21 +02:00
Ben Hirschberg
f51e531f3a fixing typos 2022-01-06 11:04:28 +02:00
Ben Hirschberg
2490856ccb Update roadmap.md 2022-01-06 11:02:37 +02:00
Ben Hirschberg
9a5a87b027 Create CODE_OF_CONDUCT.md 2022-01-06 10:44:28 +02:00
Ben Hirschberg
45b8c89865 Create roadmap.md 2022-01-06 10:30:08 +02:00
YiscahLevySilas1
e68e6dcd3d Merge pull request #1 from dwertent/master
Download policies support
2022-01-06 08:51:16 +02:00
dwertent
670ff4a15d support download 2022-01-05 20:46:56 +02:00
dwertent
b616a37800 fixed test 2022-01-05 16:45:50 +02:00
dwertent
ce488a3645 update latest fixes 2022-01-05 16:45:02 +02:00
dwertent
fb47a9c742 support v2 printing 2022-01-05 16:38:37 +02:00
David Wertenteil
80ace81a12 Fixing typo in the ActionSendReport error message 2022-01-05 16:16:52 +02:00
yiscah
1efdae5197 begin download config + download exceptions 2022-01-05 15:56:38 +02:00
yiscah
a4c88edfca begin download config + download exceptions 2022-01-05 15:56:24 +02:00
YiscahLevySilas1
8f38c2f627 Merge branch 'armosec:dev' into dev 2022-01-05 15:10:49 +02:00
Jonas Kint
bbf68d4ce8 Fixing typo in the ActionSendReport error message 2022-01-05 13:49:26 +01:00
dwertent
e1eec47a22 fixed report 2022-01-04 18:28:48 +02:00
Rotem Refael
fc05075817 Merge pull request #294 from armosec/dev
Minor features and improvements
2022-01-04 15:29:55 +02:00
dwertent
5bb64b634a support loading ks config in env 2022-01-04 14:42:25 +02:00
dwertent
7bc2c2be13 fliter ot reources based on owners 2022-01-03 13:36:29 +02:00
yiscah
27e2c044da update rbac-utils version for SAID2WLIDmap obj 2022-01-03 09:59:34 +02:00
dwertent
1213e8d6ac convert reports 2022-01-02 21:46:09 +02:00
dwertent
3f58d68d2a mocks 2021-12-30 17:48:42 +02:00
Rotem Refael
803e62020e add devopsbest framework 2021-12-30 16:40:07 +02:00
dwertent
fde437312f report v1 to v2 2021-12-30 11:52:17 +02:00
Ben Hirschberg
18425c915b Merge pull request #291 from slashben/dev
adding container image vulnerability adaptor proposal
2021-12-30 10:44:57 +02:00
Benyamin Hirschberg
0de6892ddd adding container image vunerability adaptor proposal 2021-12-30 10:44:08 +02:00
David Wertenteil
dfb92ffec3 Remove RBAC deprecated objects 2021-12-29 17:49:52 +02:00
Ben Hirschberg
b7842f98f0 Merge branch 'armosec:master' into master 2021-12-29 16:28:35 +02:00
yiscah
85317f1ee1 Merge branch 'dev' of https://github.com/YiscahLevySilas1/kubescape into dev 2021-12-29 16:23:29 +02:00
yiscah
f22f60508f rbacTable and rbac struct deprecated 2021-12-29 16:23:14 +02:00
dwertent
716bdaaf38 support kind List 2021-12-29 12:06:48 +02:00
dwertent
1b0e2b87de Handle all resources failure 2021-12-28 10:47:12 +02:00
David Wertenteil
2c57b809d2 show warnings for host sensor and send kubelet cmd 2021-12-28 10:42:26 +02:00
David Wertenteil
d9c96db212 Merge branch 'dev' into master 2021-12-28 10:41:39 +02:00
Daniel-GrunbergerCA
5f7391a76b stdout to stderror 2021-12-28 09:20:05 +02:00
Daniel-GrunbergerCA
accd80eda8 rm cmdline map 2021-12-28 09:07:50 +02:00
Daniel-GrunbergerCA
e49499f085 use regoes from master 2021-12-27 08:45:50 +02:00
David Delarosa
521f8930d7 Merge branch 'dev' into dev 2021-12-26 14:43:06 +02:00
David Wertenteil
11b9a8eb6e fix ControlsInputsGetter init 2021-12-23 17:39:49 +02:00
yiscah
0d4350ae24 fix ControlsInputsGetter init 2021-12-23 17:31:23 +02:00
David Wertenteil
62a6a25aa1 support pulling config inputs from git 2021-12-23 16:48:29 +02:00
yiscah
14a74e7312 support pulling config inputs from git 2021-12-23 10:33:23 +02:00
Rotem Refael
3fad2f3430 Merge pull request #279 from armosec/dev
Cli improvements
2021-12-22 21:16:54 +02:00
David Delarosa
c35d1e8791 Use stderr
By using stderr fd we can separate the information logs from the
application output
2021-12-22 20:27:38 +02:00
David Wertenteil
0367255a2a cli improvement
* Support risk-score calculation
* Update spinner support
* Update url display
* Adding control url to each control
2021-12-22 16:56:11 +02:00
dwertent
f5f5552ecd support threshold 2021-12-22 16:48:18 +02:00
dwertent
046a22bd2b print to error stderr 2021-12-22 12:59:46 +02:00
Daniel-GrunbergerCA
ad94ac7595 rm json print 2021-12-22 08:29:35 +02:00
Daniel-GrunbergerCA
cfa3993b79 print json 2021-12-21 20:31:12 +02:00
Daniel-GrunbergerCA
972793b98a print json 2021-12-21 20:27:23 +02:00
Daniel-GrunbergerCA
35682bf5b8 pull regoes from dev 2021-12-21 19:02:16 +02:00
Daniel-GrunbergerCA
b023f592aa Merge remote-tracking branch 'upstream/dev' 2021-12-21 13:37:32 +02:00
Daniel-GrunbergerCA
a1c34646f1 waning for host sensor 2021-12-21 13:34:31 +02:00
David Wertenteil
9ac3768f1d Merge pull request #277 from dwertent/master
Fixed host sensor ignore ns
2021-12-21 13:23:20 +02:00
dwertent
ff7881130f fixed host sensor issue 2021-12-21 13:09:31 +02:00
dwertent
37effda7c5 update pkg 2021-12-21 11:04:18 +02:00
Daniel-GrunbergerCA
0cac7cb1a5 fix kubeletcmd for marshalling 2021-12-21 09:23:38 +02:00
David Wertenteil
8d41d11ca3 Merge pull request #275 from dwertent/master
CLI improvements
2021-12-20 18:26:55 +02:00
dwertent
0ef516d147 support printing sensor and cloud resources 2021-12-20 18:22:08 +02:00
dwertent
f57a30898c print skipped 2021-12-20 13:32:14 +02:00
dwertent
a10c67555d adding spinner when sending reporting 2021-12-20 11:14:31 +02:00
dwertent
14d0df3926 update generated url 2021-12-20 11:11:31 +02:00
dwertent
c085aeaa68 adding control url to results 2021-12-20 10:53:40 +02:00
David Wertenteil
8543afccca Update objects groping and kinds 2021-12-19 23:34:41 +02:00
dwertent
61b5603a3b support host sensor and cloud descirption 2021-12-19 23:21:05 +02:00
dwertent
e3efffb2ec Merge remote-tracking branch 'upstream/dev' 2021-12-19 15:07:48 +02:00
dwertent
fe9a342b42 update submit testing 2021-12-19 15:06:30 +02:00
Rotem Refael
c7668b4436 Merge pull request #272 from LiorAlafiArmo/dev
ARMO risk-score
2021-12-16 14:33:53 +02:00
Rotem Refael
ccdf6b227f Merge pull request #271 from LiorAlafiArmo/master
ARMO risk-score
2021-12-16 14:32:13 +02:00
Lior Alafi
0aea384f41 changed prettyprint to work with risk-score 2021-12-15 19:04:40 +02:00
Lior Alafi
467059cd26 adding score 2021-12-15 18:13:58 +02:00
David Wertenteil
f41af36ea9 Adding cloudSupport objects to interface 2021-12-14 20:10:45 +02:00
dwertent
e2f8902222 use objectsenvelopes pkg 2021-12-14 20:06:38 +02:00
David Wertenteil
52bfd4cadc Submit configmap and env vars keys 2021-12-14 18:13:42 +02:00
Daniel-GrunbergerCA
7cdc556292 go mod 2021-12-14 14:16:50 +02:00
Daniel-GrunbergerCA
039bda9eaf Merge remote-tracking branch 'upstream/dev' into send_keys 2021-12-14 14:05:13 +02:00
Daniel-GrunbergerCA
a6d73d6f8b ssend keys for configmaps and env vars 2021-12-14 14:05:07 +02:00
Ben Hirschberg
8e5af59153 Merge pull request #267 from armosec/dev
Hot fix - Download command
2021-12-14 12:16:24 +02:00
dwertent
278467518e call SetRegoObjects when downloading 2021-12-14 12:07:07 +02:00
dwertent
a7080a5778 remove data from resource after saving in list 2021-12-14 11:33:33 +02:00
Daniel-GrunbergerCA
6a71ef6745 Merge remote-tracking branch 'upstream/dev' 2021-12-13 15:44:44 +02:00
Rotem Refael
10eb576260 Merge pull request #264 from armosec/dev
Hot fix - all resource list missing some of the failed resources
2021-12-13 14:35:26 +02:00
David Wertenteil
f14acb79bf Merge pull request #263 from dwertent/master
Hot fix - adding failed resources to the all-list of resources
2021-12-13 14:18:55 +02:00
dwertent
b8e011bd27 Merge remote-tracking branch 'upstream/dev' 2021-12-13 14:06:23 +02:00
dwertent
f6295308cd hot-fix eks failing resources 2021-12-13 14:06:07 +02:00
Rotem Refael
f981675850 Merge pull request #262 from armosec/dev
New features and bug fixing - new release
2021-12-12 18:06:25 +02:00
dwertent
93bb7610e6 update summary table 2021-12-12 17:43:34 +02:00
Rotem Refael
23975ee359 change dev-ui link 2021-12-12 13:50:06 +02:00
David Wertenteil
14eaedf375 revert prometheus output
* Revert prometheus output
* Revert sensor behavior
2021-12-12 11:37:50 +02:00
dwertent
ced0b741b9 Do not ask user about host sensor 2021-12-12 11:36:35 +02:00
dwertent
13e805b213 remove passed resources 2021-12-12 10:50:22 +02:00
dwertent
c424c1e394 revert prometheus output 2021-12-12 10:41:09 +02:00
David Wertenteil
77d68bdc73 version and submit improvements 2021-12-09 15:10:50 +02:00
dwertent
a1555bb9cd after merge with dev branch 2021-12-09 14:10:44 +02:00
dwertent
3ca61b218e execute the ResourceEnumerator 2021-12-09 13:41:19 +02:00
dwertent
e7917277e7 move rbac objects to cautils 2021-12-09 13:27:23 +02:00
dwertent
aa18be17fa remove rego from armo, support release fallback 2021-12-09 11:54:25 +02:00
Daniel-GrunbergerCA
39c7af5f8d Merge remote-tracking branch 'upstream/dev' 2021-12-08 13:36:51 +02:00
David Wertenteil
a5f7f8bbe4 Merge pull request #258 from Bezbran/dev
take nodes list from corev1 API.
2021-12-08 11:04:20 +02:00
Bezalel Brandwine
420e491963 add some more host sensor data 2021-12-08 08:58:07 +02:00
Bezbran
36f2ff997a Merge pull request #13 from armosec/dev
Dev
2021-12-08 08:44:04 +02:00
YiscahLevySilas1
c33807d052 Merge pull request #257 from YiscahLevySilas1/dev
add apiVersion to rbac obj
2021-12-07 20:13:00 +02:00
yiscah
fb3946b64f Merge branch 'dev' of https://github.com/YiscahLevySilas1/kubescape into dev 2021-12-07 19:50:31 +02:00
yiscah
51322e7270 add apiVersion to rbac objs 2021-12-07 19:50:14 +02:00
David Wertenteil
3f084d8525 Merge pull request #256 from dwertent/master
* Fixed url scanning 
* Support preRun rego
2021-12-07 17:15:06 +02:00
David Wertenteil
b1f4002036 Merge pull request #255 from AlexsJones/dev
spelling mistake on clihandler/cmd/control.go:19
2021-12-07 16:51:15 +02:00
dwertent
bb1cbe0902 fixed url scanning, support preRun rego 2021-12-07 16:50:43 +02:00
Alex Jones
a095634755 spelling mistake on clihandler/cmd/control.go:19 2021-12-07 13:03:44 +00:00
Daniel-GrunbergerCA
1b9ff074af run workflow 2021-12-07 14:51:34 +02:00
Daniel-GrunbergerCA
f8361446a4 integrate cloud provider description 2021-12-07 14:49:56 +02:00
Bezalel Brandwine
5713490f14 Merge branch 'dev' of github.com:Bezbran/kubescape into dev 2021-12-07 12:48:51 +02:00
Bezalel Brandwine
1ceac2a0a0 take node list from core v1 2021-12-07 12:48:45 +02:00
Daniel-GrunbergerCA
8a2967a0db Merge remote-tracking branch 'upstream/dev' 2021-12-07 10:49:23 +02:00
David Wertenteil
86297720d5 Merge pull request #254 from dwertent/master
store data only once
2021-12-07 10:33:15 +02:00
dwertent
1aeb2b96e2 store data only once 2021-12-07 10:30:03 +02:00
Bezbran
4ee8b9d7f6 Merge pull request #12 from armosec/dev
Dev
2021-12-06 10:18:33 +02:00
David Wertenteil
1d208ed5ec Merge pull request #252 from Bezbran/dev
Merge host-sensor capability
2021-12-05 17:32:57 +02:00
Bezalel Brandwine
3883aaabab fault tolerence for host sensor installing failures 2021-12-05 16:01:38 +02:00
Bezalel Brandwine
6fb3c070d0 dont print skipping node scanning 2021-12-05 15:42:07 +02:00
Bezalel Brandwine
d8d8b4ed73 add k8s resources map all in all to all resources report 2021-12-05 15:40:21 +02:00
David Wertenteil
907f46769f Merge pull request #251 from dwertent/master
return list of strings
2021-12-05 15:14:31 +02:00
dwertent
1ffdb717f7 return list of string 2021-12-05 15:11:44 +02:00
Bezalel Brandwine
9080603bce integrate host sensor into k8s IMetadata resource map 2021-12-05 15:08:05 +02:00
David Wertenteil
5796ae9084 supporting list of include-namespaces
Fixed issue #247
2021-12-05 13:54:23 +02:00
dwertent
50636e3a7e supporting list of include-namespaces 2021-12-05 13:52:06 +02:00
David Wertenteil
501d4c9dfc Update k8s-interface version 2021-12-05 13:12:46 +02:00
dwertent
84cbc4ae04 Update verswion 2021-12-05 12:42:07 +02:00
Bezalel Brandwine
cbb2a3e46f go get + build after merge from armosec 2021-12-05 12:28:28 +02:00
Bezbran
493197c073 Merge pull request #11 from armosec/dev
Dev
2021-12-05 12:24:29 +02:00
Bezbran
31a2952101 Merge branch 'dev' into dev 2021-12-05 12:24:10 +02:00
Bezalel Brandwine
acaccc23e8 merge conflicts 1 2021-12-05 12:15:55 +02:00
David Wertenteil
70e339164d Separate offline behavior from yaml input 2021-12-05 09:57:54 +02:00
dwertent
0de5d72d75 Merge remote-tracking branch 'upstream/dev' 2021-12-05 09:54:57 +02:00
dwertent
d604cc7faf update k8sinterface version 2021-12-05 09:50:56 +02:00
dwertent
d843a3e359 set tenant if config not found 2021-12-02 19:18:35 +02:00
dwertent
37586662b3 handle yaml files and armo api behavior 2021-12-02 19:13:05 +02:00
David Wertenteil
193687418f RBAC object sent using pagination mechanism 2021-12-02 10:13:04 +02:00
yiscah
72e6bb9537 send rbac objs in all resources 2021-12-01 21:30:04 +02:00
David Wertenteil
d69e790c61 Supporting verbose flag 2021-12-01 14:49:53 +02:00
dwertent
01d41520d4 initialize mock resourceMap when scanning yamls 2021-12-01 14:12:00 +02:00
dwertent
aea9eb9e01 use mapResource mock when testing 2021-12-01 12:35:09 +02:00
dwertent
26717b13e9 supporing verbose flag 2021-12-01 12:30:16 +02:00
dwertent
5f36417bd9 update ver 2021-11-30 17:06:20 +02:00
dwertent
021ea34814 update k8s package 2021-11-30 15:47:56 +02:00
David Wertenteil
4a08fbdf28 Adding pagination to report 2021-11-30 10:59:30 +02:00
dwertent
268753091d fixed test 2021-11-30 10:49:05 +02:00
dwertent
ec688829b5 support report pagination 2021-11-29 17:24:50 +02:00
Rotem Refael
ec5bf58b0f Merge pull request #242 from YiscahLevySilas1/dev
add comment for isRuleKubescapeVersionCompatible()
2021-11-29 13:06:05 +02:00
Bezalel Brandwine
f877d821f0 in the middle of refactoring 2021-11-28 16:47:44 +02:00
Bezalel Brandwine
6c22cfef1e in the middle of refactoring 2021-11-28 16:47:24 +02:00
Bezalel Brandwine
05305d858b host sensor flag + user input asking 2021-11-28 12:35:21 +02:00
yiscah
e094237bbf add comment for isRuleKubescapeVersionCompatible() 2021-11-28 10:23:47 +02:00
David Wertenteil
77eb52bc51 Working with IMetadata interface 2021-11-28 08:14:00 +02:00
dwertent
c79834cec7 working with IMetadata interface 2021-11-26 00:52:03 +02:00
Bezalel Brandwine
aefc5fded7 kubelet configuration is in kubescape 2021-11-25 17:54:26 +02:00
Bezalel Brandwine
5fd5a5d4fa [host-sensor] first integration in kubescape 2021-11-25 17:43:21 +02:00
David Wertenteil
0368ecf7f3 External object support
* Aggregate rego input
* Display `User` and `Group` in output
* Update dependencies
2021-11-25 14:33:28 +02:00
yiscah
d9ec5dcb56 rule version - kubescape version check 2021-11-25 12:30:02 +02:00
yiscah
030bc6c6b6 handle pretty print external objects 2021-11-25 12:29:05 +02:00
yiscah
c1dd2fe0f4 print warning in local build to work with latest version 2021-11-25 12:27:28 +02:00
Bezalel Brandwine
4e0851868e initial host sensor deployment stage 2021-11-24 15:23:50 +02:00
Bezbran
276178c27c Merge pull request #10 from armosec/dev
Dev
2021-11-23 10:53:41 +02:00
yiscah
3006e6bcbf add isKindToBeGrouped 2021-11-23 09:07:37 +02:00
yiscah
3a50c5686e print externalObjects with their relatedObjects 2021-11-22 20:34:29 +02:00
yiscah
f8eea4d082 use inputaggregator on k8sresources, don't use v0 rules 2021-11-22 11:06:29 +02:00
YiscahLevySilas1
8a42d77990 Merge branch 'armosec:dev' into dev 2021-11-22 11:05:07 +02:00
Rotem Refael
3980d1a9b0 Merge pull request #235 from armosec/dev
* Hot fixes
* Smoke tests
* Update documentation
2021-11-21 10:22:32 +02:00
Rotem Refael
53741ec26e Update README.md 2021-11-21 10:13:31 +02:00
David Wertenteil
c398cf46c9 Comment out policy version check 2021-11-21 09:23:21 +02:00
dwertent
e869ce4a64 comment out policy version check 2021-11-21 08:49:24 +02:00
YiscahLevySilas1
4064be6577 Merge branch 'armosec:dev' into dev 2021-11-18 16:47:58 +02:00
David Wertenteil
1f00cf4151 Fixed stdin support
* Fixed stdin
* Adding smoke tests
2021-11-16 17:30:06 +02:00
dwertent
bae0ca62b8 update smoke testing 2021-11-16 16:03:31 +02:00
dwertent
b7a51a2495 fixed stdin support 2021-11-16 15:57:23 +02:00
Rotem Refael
4f6a3e39d0 Update SAAS link 2021-11-15 21:59:30 +02:00
David Wertenteil
528f6b7402 Fix broken links
#231
2021-11-14 16:42:40 +02:00
David Wertenteil
c252f29e6d Adding basic exceptions documentation
#232 
#80
2021-11-14 14:46:31 +02:00
dwertent
fea84c9652 update opa-utils pkg version 2021-11-14 14:42:10 +02:00
dwertent
9b9940f708 adding exceptions docs 2021-11-14 14:31:53 +02:00
Thibault Le Reste
a34ab17307 fix Kubernetes Hardening Guidance broken links 2021-11-14 13:16:28 +01:00
yiscah
477a3e7263 update nsa url in readme 2021-11-14 08:59:34 +02:00
Rotem Refael
f94c9496df Merge pull request #223 from armosec/dev
Adding features and fixing bugs
2021-11-11 15:08:38 +02:00
lalafi@cyberarmor.io
1c31281b7b add baseScore to controlReport 2021-11-11 13:57:52 +02:00
dwertent
0e5204ecb4 support custom frameworks 2021-11-11 11:15:33 +02:00
David Wertenteil
f3dc6235d7 Merge pull request #225 from Daniel-GrunbergerCA/master
Supporting custom frameworks
2021-11-11 09:43:40 +02:00
Daniel-GrunbergerCA
37cdf1a19e erase repetitive frameworks 2021-11-10 18:57:05 +02:00
Daniel-GrunbergerCA
1fb642c777 scan with custom framework 2021-11-10 18:30:03 +02:00
dwertent
8f791ceb12 Improve readme 2021-11-10 09:38:34 +02:00
dwertent
f40eaa0f56 Merge branch 'dev' 2021-11-10 08:17:51 +02:00
dwertent
cb34d17ba1 fixed merge 2021-11-10 08:01:33 +02:00
dwertent
328ba82007 Merge branch 'master' of github.com:armosec/kubescape 2021-11-10 07:59:05 +02:00
David Wertenteil
010ed1b047 Merge pull request #222 from dwertent/master
Adding json to http headers
2021-11-10 07:55:38 +02:00
dwertent
5a81a77d92 adding json to http headers 2021-11-10 07:53:35 +02:00
David Wertenteil
c7ea10d206 Merge pull request #221 from dwertent/master
Checking latest version
2021-11-09 17:09:01 +02:00
dwertent
a37d00b40a checking latest version 2021-11-09 17:07:06 +02:00
David Wertenteil
0168b768d2 Merge pull request #214 from mboersma/fix-spelling-fail-threshold
Fix spelling in --fail-threshold description
2021-11-09 15:10:17 +02:00
David Wertenteil
9a85b57ba4 Merge pull request #201 from Joibel/fix/spelling
Minor spelling fixes
2021-11-09 15:10:02 +02:00
dwertent
eafece6497 update helm command in readme 2021-11-09 11:18:30 +02:00
dwertent
8f08271664 udpate cronjob configmap 2021-11-09 11:16:13 +02:00
David Wertenteil
da0271e624 Merge pull request #218 from yonahd/helm_chart
Helm chart for kubescape
2021-11-08 13:17:37 +02:00
Yonah Dissen
94f52fb4ac Documentation running using docker 2021-11-08 11:52:04 +02:00
David Wertenteil
524c2922a4 Merge pull request #219 from Daniel-GrunbergerCA/master
Support scanning multiple controls
2021-11-08 11:29:50 +02:00
Daniel-GrunbergerCA
0891d64654 comment to run workflow 2021-11-08 10:48:39 +02:00
Daniel-GrunbergerCA
d1c23f7442 scan multiple controls 2021-11-08 10:39:31 +02:00
Daniel-GrunbergerCA
8cbbe35f24 Merge remote-tracking branch 'upstream/dev' 2021-11-08 08:53:27 +02:00
Yonah Dissen
a21e9d706e small changes in helm chart 2021-11-07 21:23:57 +02:00
Yonah Dissen
57160c4d04 add helm chart to deploy kubescape in cluster 2021-11-07 21:17:45 +02:00
Yonah Dissen
8b46a49e23 add helm chart to deploy kubescape in cluster 2021-11-07 21:09:30 +02:00
David Wertenteil
c11ebb49f7 Merge pull request #217 from dwertent/master
Fixed include namespaces
2021-11-07 13:56:56 +02:00
dwertent
e4c3935a1b fixed include ns 2021-11-07 13:50:46 +02:00
David Wertenteil
ade062fdd3 Merge pull request #216 from dwertent/master
support armoBest framework name
2021-11-07 09:23:27 +02:00
dwertent
b0f6357482 support armoBest 2021-11-07 09:13:51 +02:00
Matt Boersma
38a9c11286 Fix spelling in --fail-threshold description 2021-11-05 10:33:06 -06:00
David Wertenteil
0d95f02e60 Merge pull request #213 from dwertent/master
support include namespaces
2021-11-04 12:09:27 +02:00
dwertent
1c30528eea support include ns 2021-11-04 12:06:34 +02:00
David Wertenteil
d1b116d314 Merge pull request #210 from dwertent/master
Sbumit support
2021-11-03 17:31:23 +02:00
dwertent
9d20fd41a8 fixed rbac submit 2021-11-03 17:28:37 +02:00
dwertent
54648bb973 update opa pkg 2021-11-03 15:28:42 +02:00
dwertent
fc4edb12f9 adding stdout to smoke tests 2021-11-02 18:59:34 +02:00
dwertent
9a1b8d7ce2 support submit 2021-11-02 16:14:09 +02:00
dwertent
6909975503 controls support yaml inputs 2021-11-02 10:14:39 +02:00
David Wertenteil
5d94bd990a Merge pull request #208 from dwertent/master
Adding smoke testing and support inputs for controls
2021-11-01 17:22:31 +02:00
Ben Hirschberg
1b2514e3ec git push origin masterMerge branch 'armosec-master' 2021-11-01 16:02:51 +02:00
Ben Hirschberg
0da4f40b48 merge 2021-11-01 16:02:23 +02:00
dwertent
67c8719f34 adding smoke tests to PR 2021-11-01 14:04:20 +02:00
dwertent
d5b60c6ac8 update config api 2021-11-01 13:52:40 +02:00
dwertent
a99d2e9e26 remove scan 2021-11-01 12:41:54 +02:00
dwertent
5c7d89cb9e use command 2021-11-01 11:55:54 +02:00
dwertent
ae7810f0d3 support input from file 2021-11-01 11:44:07 +02:00
Yonah Dissen
5a90dc46f0 fix version for cli in docker image 2021-11-01 09:17:46 +02:00
Yonah Dissen
294f886588 fix version for cli in docker image 2021-10-31 17:50:55 +02:00
dwertent
17aec665cf updated tests 2021-10-31 15:31:23 +02:00
dwertent
959b25e8b7 adding smoke tests 2021-10-31 15:05:22 +02:00
dwertent
9fd2bf3480 Merge remote-tracking branch 'upstream/dev' 2021-10-31 14:38:36 +02:00
dwertent
7b061a4e51 update opa pkg 2021-10-31 14:38:13 +02:00
David Wertenteil
4fcd89390b Merge pull request #206 from Joibel/feature/prometheus
Add a prometheus output format
2021-10-31 10:57:49 +02:00
dwertent
667ffe9cd3 Merge remote-tracking branch 'prometheus/feature/prometheus' 2021-10-31 08:58:57 +02:00
dwertent
6f4086cd8c Merge branch 'master' of github.com:armosec/kubescape 2021-10-31 08:58:44 +02:00
dwertent
2a45a1a400 support controls input 2021-10-28 16:29:28 +03:00
David Wertenteil
eee201de1e Merge pull request #205 from dwertent/master
Adding cronJob support doc
2021-10-28 11:30:17 +03:00
dwertent
6be24bd22a change repeatedly to periodically 2021-10-28 10:21:25 +03:00
dwertent
ca927dec30 update naming convention 2021-10-28 10:05:30 +03:00
dwertent
3a78ef46a3 Merge remote-tracking branch 'upstream/dev' 2021-10-28 09:40:26 +03:00
David Wertenteil
bdb1cd0905 Merge pull request #199 from Daniel-GrunbergerCA/master
Scan with multiple frameworks/control support
2021-10-28 09:35:07 +03:00
dwertent
ffb556a637 update readme 2021-10-28 09:15:22 +03:00
dwertent
40acfb5e9d Adding cronJob doc 2021-10-28 09:10:37 +03:00
Daniel-GrunbergerCA
de8bcfa0d2 enhance help msgs 2021-10-27 14:44:25 +03:00
Daniel-GrunbergerCA
9439f407da add env var to not check latest release 2021-10-27 13:39:03 +03:00
Daniel-GrunbergerCA
5095e62961 support scanning multiple frameworks from multiple files 2021-10-27 13:02:45 +03:00
Daniel-GrunbergerCA
3301907864 print only one table for controls & enhance help msg 2021-10-27 10:25:26 +03:00
Daniel-GrunbergerCA
151175c40f read single control from framework file 2021-10-27 08:49:40 +03:00
Daniel-GrunbergerCA
234d4fa537 Merge remote-tracking branch 'upstream/dev' 2021-10-27 08:27:21 +03:00
Rotem Refael
f384e8a6e3 Merge pull request #203 from armosec/cluster-name-issue
adopt cluster name (HotFix)
2021-10-26 20:51:42 +03:00
dwertent
66068757e1 update cluster name in mock struct 2021-10-26 20:39:18 +03:00
dwertent
8a7cda5dd1 adopt cluster name 2021-10-26 20:27:33 +03:00
Alan Clucas
8e67104ba4 Add prometheus to readme 2021-10-26 16:23:05 +01:00
Alan Clucas
0c9da9ddc8 Add a prometheus metrics style output
Output per control results and also per object counts

This can lead to running this as a service that prometheus can collect from
2021-10-26 16:19:35 +01:00
Alan Clucas
a5ef6aa126 Minor spelling fixes 2021-10-26 15:19:05 +01:00
Rotem Refael
c133b7a2c2 Merge pull request #191 from armosec/dev
Hot fixes relates to submit & account options
2021-10-26 14:10:23 +03:00
Daniel-GrunbergerCA
a0ca68cc41 update json and junit for multiple frameworks 2021-10-26 13:55:12 +03:00
Daniel-GrunbergerCA
41cae0bc93 Merge remote-tracking branch 'upstream/dev' 2021-10-26 13:23:00 +03:00
David Wertenteil
b4198fde8c Merge pull request #198 from dwertent/master
update pkg tag
2021-10-26 12:28:02 +03:00
dwertent
bd24f35738 update tag 2021-10-26 12:26:44 +03:00
Daniel-GrunbergerCA
6fcbb757b5 Merge remote-tracking branch 'upstream/dev' 2021-10-25 17:41:15 +03:00
Daniel-GrunbergerCA
3b8825e5d2 scan multiple frameworks and controls 2021-10-25 17:41:04 +03:00
Rotem Refael
5cf3244918 Merge pull request #192 from dwertent/master
Update multiple score
2021-10-25 17:40:19 +03:00
dwertent
934c9ccc8b fixed lowest 2021-10-25 15:51:23 +03:00
dwertent
41dfdfd1e8 support more than score 2021-10-25 15:14:31 +03:00
David Wertenteil
427fb59c99 Merge pull request #190 from dwertent/master
Fixed submit and url
2021-10-25 12:08:23 +03:00
David Wertenteil
ae825800f6 Merge pull request #189 from Daniel-GrunbergerCA/master
Update tag for newest release of k8s-interface
2021-10-25 12:08:06 +03:00
dwertent
d72700acf6 update submit 2021-10-25 12:05:51 +03:00
dwertent
3310a6a26f Merge remote-tracking branch 'upstream/dev' 2021-10-25 11:55:30 +03:00
dwertent
740b5aa772 add full url 2021-10-25 11:55:08 +03:00
Daniel-GrunbergerCA
04b55e764a fix k8s-interface pkg tag 2021-10-25 10:44:43 +03:00
Daniel-GrunbergerCA
beb4062bb1 update tag 2021-10-25 09:29:43 +03:00
David Wertenteil
5d4cd4acdc Merge pull request #188 from dwertent/master
Use interfaces
2021-10-25 09:21:43 +03:00
dwertent
aec8198131 adding score to interface 2021-10-25 08:41:15 +03:00
dwertent
0a850e47df use interfaces 2021-10-24 17:51:03 +03:00
Rotem Refael
5544820c5e Merge pull request #187 from armosec/dev
Fixed junit counter
2021-10-21 16:23:09 +03:00
dwertent
4f466d517a fixed junit counter 2021-10-21 16:05:51 +03:00
Bezbran
cd0f20ca2f Merge pull request #9 from armosec/master
Dev
2021-10-21 14:38:17 +03:00
Ben Hirschberg
b3661848dc Merge pull request #186 from armosec/dev
merge readme & install fix
2021-10-21 14:08:57 +03:00
Rotem Refael
548201c256 Merge pull request #185 from dwertent/master
Update readme with new featurs
2021-10-21 13:46:33 +03:00
dwertent
a54e5d9f8b update readme with new featurs 2021-10-21 13:25:41 +03:00
dwertent
536257afa1 fixed version in build.py 2021-10-21 12:48:52 +03:00
dwertent
5a71c3270a Merge branch 'master' of github.com:armosec/kubescape 2021-10-21 11:32:40 +03:00
dwertent
d194dd173f fixed image and entrypoint 2021-10-21 11:32:20 +03:00
Ben Hirschberg
be03a9e984 Merge pull request #173 from armosec/dev
Dev
2021-10-21 10:33:26 +03:00
dwertent
a90177e7c0 update go mod file 2021-10-21 09:56:05 +03:00
Rotem Refael
be9e8ca47d Merge pull request #184 from Daniel-GrunbergerCA/master
Implementing single control scan  (from file and and from regolibrary) & download single control
2021-10-21 08:33:26 +03:00
Daniel-GrunbergerCA
eb9fe85c75 add error handling 2021-10-20 17:45:11 +03:00
Daniel-GrunbergerCA
47183c405f add some comments 2021-10-20 17:29:25 +03:00
Daniel-GrunbergerCA
2725923b9b case insensitive 2021-10-20 16:58:09 +03:00
Daniel-GrunbergerCA
f6c03ed7a2 fix offline support 2021-10-20 16:50:59 +03:00
Daniel-GrunbergerCA
76b5548216 Merge remote-tracking branch 'upstream/dev' 2021-10-20 16:29:56 +03:00
Daniel-GrunbergerCA
cc57a34a32 run control scan form file 2021-10-20 16:18:53 +03:00
Bezbran
f7099b62e6 Merge pull request #182 from Bezbran/dev
Add API version to report structure
2021-10-20 16:05:46 +03:00
Bezalel Brandwine
093ee8916e Add API version to report structure 2021-10-20 16:04:10 +03:00
David Wertenteil
ac0259157b Merge pull request #181 from dwertent/master
Update junit results
2021-10-20 15:57:34 +03:00
dwertent
9cb937798f update readme 2021-10-20 14:53:19 +03:00
dwertent
11d4926c85 updte junit results 2021-10-20 14:49:27 +03:00
Daniel-GrunbergerCA
836211ae2b update go mod 2021-10-20 14:39:33 +03:00
Daniel-GrunbergerCA
fddf3d3f58 Merge remote-tracking branch 'upstream/dev' 2021-10-20 14:15:46 +03:00
Daniel-GrunbergerCA
b036d1079e support control scan with new api 2021-10-20 14:15:27 +03:00
Rotem Refael
137c39e918 Merge pull request #180 from AvnerTzurArmo/dev
add cronjob sample for kubescape
2021-10-20 13:57:26 +03:00
Avner Tzur
8778d022cf add cronjob sample for kubescape 2021-10-20 10:59:30 +03:00
Bezbran
043bdbacec Merge pull request #8 from armosec/dev
Dev
2021-10-20 10:49:28 +03:00
Daniel-GrunbergerCA
e0e19b0258 Merge remote-tracking branch 'upstream/dev' 2021-10-20 09:05:37 +03:00
David Wertenteil
c72e0f790a Merge pull request #179 from dwertent/master
fallback get customer guid from configMap
2021-10-19 16:30:47 +03:00
dwertent
87e79110a2 fallback customer guid from configMap 2021-10-19 16:25:54 +03:00
David Wertenteil
faeae1af60 Merge pull request #178 from dwertent/master
adding summary changes
2021-10-19 15:09:05 +03:00
dwertent
b371fbad01 adding summary 2021-10-19 15:08:00 +03:00
David Wertenteil
90831e153d Merge pull request #177 from dwertent/master
Distinct exclude and failed resources
2021-10-19 14:24:10 +03:00
dwertent
009d8275c1 Distinct exclude and failed resources 2021-10-19 14:22:09 +03:00
Daniel-GrunbergerCA
05c88e0ffc Merge remote-tracking branch 'upstream/dev' 2021-10-18 17:40:11 +03:00
Daniel-GrunbergerCA
7d12552932 add option to run single control 2021-10-18 17:39:35 +03:00
Rotem Refael
b761505bb1 Merge pull request #172 from Moshe-Rappaport-CA/master
Support scanning yamls from gitHub repo
2021-10-18 14:51:42 +03:00
moshep
63367f4f31 support yamls from repo 2021-10-18 14:29:44 +03:00
Rotem Refael
6f9d6b4af3 Update README.md 2021-10-18 14:16:29 +03:00
Rotem Refael
6ed8287b01 Update README.md 2021-10-18 14:15:45 +03:00
Rotem Refael
d948e20682 Change readme text 2021-10-18 14:12:57 +03:00
Rotem Refael
42929dac58 Merge pull request #171 from armosec/dev
Fix workflow for building image
2021-10-18 11:59:29 +03:00
Rotem Refael
74449c64a2 Merge pull request #170 from Daniel-GrunbergerCA/master
Fix env var name
2021-10-18 11:26:18 +03:00
Daniel-GrunbergerCA
0bd164c69e fix workflow for dev 2021-10-18 11:15:55 +03:00
Daniel-GrunbergerCA
d44c082134 fix env var name 2021-10-18 11:11:32 +03:00
Daniel-GrunbergerCA
d756b9bfe4 check repo name 2021-10-18 11:04:57 +03:00
Daniel-GrunbergerCA
6144050212 echo repo 2021-10-18 11:02:54 +03:00
Daniel-GrunbergerCA
b5fe456b0d fix test 2021-10-18 10:59:41 +03:00
Daniel-GrunbergerCA
37791ff391 testing workflow output 2021-10-18 10:57:04 +03:00
Daniel-GrunbergerCA
c2d99163a6 test output for workflow 2021-10-18 10:55:13 +03:00
Daniel-GrunbergerCA
d948353b99 test env var output for build 2021-10-18 10:51:50 +03:00
Daniel-GrunbergerCA
2649cb75f6 Revert "check env var output"
This reverts commit 7c8da4a4b9.
2021-10-18 10:51:28 +03:00
Daniel-GrunbergerCA
7c8da4a4b9 check env var output 2021-10-18 10:49:53 +03:00
Rotem Refael
b72f5d75f7 Merge pull request #167 from armosec/dev
Update resource count
2021-10-18 10:34:45 +03:00
Rotem Refael
947826a764 Merge pull request #168 from Daniel-GrunbergerCA/master
Build docker image only for original repo (not for forks)
2021-10-18 10:31:22 +03:00
David Wertenteil
906c69a86d Merge pull request #169 from armosec/rotemamsa-patch-1
Delete backendconnectormethods.go
2021-10-18 10:28:19 +03:00
Rotem Refael
9c65aadcc7 Delete backendconnectormethods.go 2021-10-18 10:26:59 +03:00
dwertent
70a9a7bbbd Update resource count 2021-10-18 10:23:06 +03:00
Daniel-GrunbergerCA
09879e00ba build image only for original repo 2021-10-18 10:14:42 +03:00
Daniel-GrunbergerCA
5cbb7d940d Revert "fix build again"
This reverts commit 050976d7a3.
2021-10-18 10:12:42 +03:00
Daniel-GrunbergerCA
050976d7a3 fix build again 2021-10-18 10:05:58 +03:00
Daniel-GrunbergerCA
6a96d9f8b5 fix build 2021-10-18 09:59:03 +03:00
Daniel-GrunbergerCA
8c55dfbcf6 fix workflow 2021-10-18 09:52:26 +03:00
Daniel-GrunbergerCA
78c5b49b5a create image only for original repo 2021-10-18 09:49:50 +03:00
dwertent
c33ca04a4f update dev build workflow 2021-10-18 09:06:14 +03:00
David Wertenteil
f12e66d315 Merge pull request #166 from dwertent/master
update package version
2021-10-18 09:02:25 +03:00
David Wertenteil
8e4bb36df8 Merge pull request #165 from AvnerTzurArmo/dev
Add Docker image builds
2021-10-18 09:00:27 +03:00
dwertent
034412f6fe update package version 2021-10-18 08:59:41 +03:00
Avner Tzur
88d83ba72d fix typo 2021-10-18 08:26:15 +03:00
Avner Tzur
829e7a33aa fix typo 2021-10-18 08:25:25 +03:00
Avner Tzur
1dcde1538e fix typo 2021-10-17 16:50:59 +03:00
Avner Tzur
b876cd0975 use armosec repo name 2021-10-17 16:49:01 +03:00
Avner Tzur
a06d11dc17 fix repo env usage 2021-10-17 16:38:00 +03:00
Avner Tzur
b1f5cd45c4 use repo as secret 2021-10-17 16:13:54 +03:00
Avner Tzur
2c44c0e1f0 fix Docker build and tags 2021-10-17 15:59:47 +03:00
Avner Tzur
37c429b264 add Docker container build 2021-10-17 15:55:06 +03:00
Avner Tzur
82994ce754 fix typo 2021-10-17 15:43:38 +03:00
Avner Tzur
621f64c363 fix image name 2021-10-17 15:25:50 +03:00
Benyamin Hirschberg
5591bf09d9 Merge branch 'armosec-master' 2021-10-15 22:24:04 +03:00
Benyamin Hirschberg
da94651656 merged original armosec 2021-10-15 22:23:47 +03:00
David Wertenteil
419a77f144 Merge pull request #164 from dwertent/master
using public packages
2021-10-14 17:36:37 +03:00
dwertent
f043358a59 merging to dev 2021-10-14 17:32:56 +03:00
dwertent
24b17a8c27 update counters 2021-10-14 17:12:10 +03:00
Rotem Refael
26a64b788b Merge pull request #163 from AvnerTzurArmo/dev
update Dockerfile using Python build script
2021-10-14 15:54:56 +03:00
Rotem Refael
89a05d247b Merge pull request #159 from armosec/dev
test help message after build
2021-10-14 15:43:28 +03:00
Avner Tzur
718d549bb3 update Dockerfile using Python build script 2021-10-14 15:23:35 +03:00
Bezbran
54a6a8324a Merge pull request #7 from armosec/dev
Dev
2021-10-14 14:54:14 +03:00
Bezbran
7faf24cf88 build python3 --version && python3 build.py 2021-10-14 14:48:47 +03:00
Bezbran
7bebc7a814 master PR python 3 for build 2021-10-14 14:48:16 +03:00
Bezbran
8647a087dd Merge pull request #162 from Bezbran/dev
python3 for build
2021-10-14 14:40:25 +03:00
Bezalel Brandwine
78670665c4 build.py support only pthon 3.7 and above 2021-10-14 14:39:43 +03:00
Bezalel Brandwine
1dd587cd83 try python3 2021-10-14 14:30:58 +03:00
Bezalel Brandwine
d69cccf821 print python version 2021-10-14 14:24:49 +03:00
Bezbran
8ae3b9c28f Merge pull request #161 from Bezbran/dev
adapt test_cli_prints to windows
2021-10-14 14:17:42 +03:00
Bezalel Brandwine
9b6ad102b1 adapt test_cli_prints to windows 2021-10-14 14:16:12 +03:00
Bezbran
e6787b77fb Merge pull request #160 from Bezbran/dev
files tests passed locally on windows
2021-10-14 14:10:35 +03:00
Bezalel Brandwine
46449045a6 files tests passed locally on windows 2021-10-14 14:04:55 +03:00
Bezalel Brandwine
d81984b4c6 try to adapt path to windows 2021-10-14 13:28:10 +03:00
Bezbran
22b463f306 Merge pull request #158 from Bezbran/dev
test help message after build
2021-10-14 12:13:32 +03:00
Bezalel Brandwine
475e45b848 add CLI prints checks skeleton 2021-10-14 12:07:51 +03:00
Rotem Refael
32fac97d21 Merge pull request #157 from armosec/dev
fix  --help flag
2021-10-14 11:47:22 +03:00
Bezalel Brandwine
f9f32a1062 comment in file loading tests 2021-10-14 11:43:11 +03:00
Bezbran
c90c3bbd05 test help message 2021-10-14 11:32:59 +03:00
dwertent
ea42d9a061 remove exceptions from pkg 2021-10-14 11:25:53 +03:00
Bezbran
cbae9a087b Merge pull request #6 from armosec/dev
Dev
2021-10-14 10:56:39 +03:00
Bezbran
4da199c43e Merge pull request #156 from Bezbran/dev
fix  --help flag
2021-10-14 10:55:43 +03:00
Bezalel Brandwine
00d8660a91 fix --help flag 2021-10-14 10:46:42 +03:00
Rotem Refael
76c2f6afe0 Merge pull request #155 from armosec/dev
Fixed #148
2021-10-14 10:20:35 +03:00
dwertent
efa53bd83c fixed loop 2021-10-14 09:13:07 +03:00
dwertent
01f6a1e1c0 fixed counters 2021-10-13 20:41:30 +03:00
dwertent
1d1344ebc1 fixed warning counter 2021-10-13 20:33:26 +03:00
Rotem Refael
9f78703dee Merge pull request #154 from armosec/dev
Support in --environment
2021-10-13 17:05:52 +03:00
Bezbran
1a0e96338e Merge pull request #5 from armosec/dev
Merge back pull request #153 from Bezbran/dev
2021-10-13 16:58:31 +03:00
Bezbran
eff4690e0e Merge pull request #153 from Bezbran/dev
add support in --environment=dev or customized URLs
2021-10-13 16:57:39 +03:00
Bezbran
266480c234 Merge branch 'dev' into dev 2021-10-13 16:57:14 +03:00
Bezalel Brandwine
a922d01005 add support in --environment=dev or customized URLs 2021-10-13 16:45:06 +03:00
dwertent
b053b84197 use packets 2021-10-13 15:56:43 +03:00
David Wertenteil
0d9711c8bb Merge pull request #151 from armosec/dev
Fixed issues
2021-10-13 13:02:52 +03:00
David Wertenteil
edab68f4fb Merge pull request #152 from Bezbran/dev
fix download framework command
2021-10-13 12:35:26 +03:00
Bezalel Brandwine
08f04e19ef fix download framework (create dir if necessary, lower case "download") 2021-10-13 12:24:21 +03:00
David Wertenteil
e62234a6ac Merge pull request #150 from dwertent/master
Fixed issues #149 #76
2021-10-13 12:07:12 +03:00
dwertent
5499c7a96f fixed #149 2021-10-13 12:03:57 +03:00
dwertent
7f9c5c25ae fixed #76 2021-10-13 11:58:22 +03:00
Bezbran
b1276d56f7 Merge pull request #4 from armosec/dev
Dev
2021-10-13 09:11:24 +03:00
Rotem Refael
b53bf320a6 Merge pull request #141 from armosec/dev
Update default upload of results to be opt-in
2021-10-12 18:31:16 +03:00
Daniel Grunberger
81a4c168ed Merge pull request #147 from Daniel-GrunbergerCA/master
fix warning numbers of resources
2021-10-12 18:13:51 +03:00
Daniel-GrunbergerCA
512a1a806e fix warning numbers of resources 2021-10-12 18:12:49 +03:00
David Wertenteil
c95ef05177 Merge pull request #146 from dwertent/master
update flag to keep-local
2021-10-12 18:06:02 +03:00
Daniel Grunberger
563bd8a6a3 Merge pull request #145 from Daniel-GrunbergerCA/master
fix number of resources
2021-10-12 17:41:48 +03:00
dwertent
b444542f4d update flag to keep-local 2021-10-12 17:40:56 +03:00
Daniel-GrunbergerCA
6eded41eee fix number of resources 2021-10-12 17:08:03 +03:00
YiscahLevySilas1
de91ce182d Merge pull request #144 from YiscahLevySilas1/dev
add controlID field, 'id' to be deprecated
2021-10-12 15:07:10 +03:00
dwertent
afc7f85460 adding demo link to readme 2021-10-12 12:39:26 +03:00
yiscah
c1b4d7de39 add controlID field, 'id' to be deprecated 2021-10-12 10:48:46 +03:00
dwertent
cde5b83bca update summary 2021-10-12 10:26:30 +03:00
Bezbran
f00106a502 Merge pull request #143 from Bezbran/dev
Run release workflow just for merged PRs to master
2021-10-12 10:17:26 +03:00
Bezalel Brandwine
3ae2742717 rmove workflow dependency 2021-10-12 10:05:55 +03:00
David Wertenteil
8deac19945 Merge pull request #142 from dwertent/master
Update default unregistered behavior
2021-10-12 10:00:46 +03:00
dwertent
2ad469a5f4 update summary image 2021-10-12 09:57:46 +03:00
dwertent
c67b111c77 do not submit unregisred user 2021-10-12 09:51:14 +03:00
Bezalel Brandwine
ee770e7429 add github actions workflow for opened PRs to master 2021-10-12 09:49:26 +03:00
Bezalel Brandwine
197a3adf6a trigger release only for push to master to avoid release on closed PRs without merging 2021-10-12 09:38:52 +03:00
Bezbran
269d39497b Merge pull request #1 from armosec/dev
Dev
2021-10-12 09:29:28 +03:00
David Wertenteil
0c3a7ac02b Merge pull request #140 from dwertent/master
split setCustomer func
2021-10-12 08:10:02 +03:00
dwertent
07443548c9 split setCustomer func 2021-10-12 07:59:41 +03:00
Ben Hirschberg
e561f78ada Merge pull request #133 from clfs/clfs/bugfixes
Add missing return
2021-10-11 23:22:29 +03:00
Ben Hirschberg
bb88272251 Merge pull request #129 from ferhaty/dev
removed references to go mod tidy
2021-10-11 23:21:08 +03:00
Ferhat Yildiz
ce2f3dafae removed references to go mod tidy since there is a go.sum file now 2021-10-11 17:02:21 +02:00
David Wertenteil
488c67056c Merge pull request #138 from dwertent/master
Update readme
2021-10-11 16:32:25 +03:00
dwertent
351e51208d Merge remote-tracking branch 'upstream/dev' 2021-10-11 16:29:43 +03:00
dwertent
0c79df7299 restore id after local run, update readme 2021-10-11 16:29:26 +03:00
David Wertenteil
b9a5c5af35 Merge pull request #137 from dwertent/master
Update default upload of results to be opt-in
2021-10-11 15:04:07 +03:00
dwertent
59741b84d1 Update default upload of results to be opt-in 2021-10-11 15:00:23 +03:00
David Wertenteil
25efcdf750 Merge pull request #135 from Daniel-GrunbergerCA/master
Improve help msgs
2021-10-10 18:50:46 +03:00
Daniel-GrunbergerCA
49dfa6c2a6 Merge remote-tracking branch 'upstream/dev' 2021-10-10 15:50:44 +03:00
Daniel-GrunbergerCA
659647353e fix return code in help msg 2021-10-10 15:43:08 +03:00
Daniel-GrunbergerCA
0eba51c80b beautify help msgs 2021-10-10 14:38:14 +03:00
David Wertenteil
35996d11f4 Merge pull request #134 from Daniel-GrunbergerCA/master
fix TestConvertLabelsToString test
2021-10-10 14:06:03 +03:00
Daniel-GrunbergerCA
e0e0a811eb Add supported frameworks to help msg 2021-10-10 14:01:35 +03:00
Daniel-GrunbergerCA
76f400d0aa fix TestConvertLabelsToString test 2021-10-10 13:06:51 +03:00
Calvin Figuereo-Supraner
8e950e0f54 Add missing return 2021-10-09 21:25:01 -07:00
Rotem Refael
0adb9dd540 Merge pull request #126 from armosec/dev 2021-10-07 19:18:10 +03:00
David Wertenteil
5825555534 Merge pull request #127 from dwertent/master
Adding unittest to workflow
2021-10-07 18:24:56 +03:00
dwertent
306a8c6201 Merge remote-tracking branch 'upstream/dev' 2021-10-07 18:19:00 +03:00
dwertent
123b620085 Merge branch 'master' of github.com:armosec/kubescape into dev 2021-10-07 18:14:29 +03:00
dwertent
831e7814be ignore files 2021-10-07 18:12:21 +03:00
Ben Hirschberg
7c85199ac2 Merge pull request #106 from Juneezee/go1.17
build: upgrade to Go 1.17
2021-10-07 18:05:42 +03:00
dwertent
efec8e4f2f adding unittest to workflow 2021-10-07 18:02:44 +03:00
Avner Tzur
af4faef9cf Add macOS brew installation 2021-10-07 17:30:12 +03:00
dwertent
90052ad9e3 Merge branch 'master' of github.com:armosec/kubescape into dev 2021-10-07 15:46:50 +03:00
Rotem Refael
35c7b16e4a Update build.yaml
change report link
2021-10-07 15:18:37 +03:00
David Wertenteil
7ac75acfc2 Merge pull request #125 from dwertent/master
Update display color
2021-10-07 15:11:43 +03:00
dwertent
22662fddcd update display color 2021-10-07 15:03:48 +03:00
David Wertenteil
6df1cdf5d8 Merge pull request #124 from dwertent/master
Changing warning to excluded in results
2021-10-07 14:00:19 +03:00
dwertent
2287c51d73 changing warning to excluded 2021-10-07 13:59:20 +03:00
David Wertenteil
372942a14f Merge pull request #122 from dwertent/master
Adding go sum
2021-10-07 10:18:23 +03:00
dwertent
6362246da4 Merge remote-tracking branch 'upstream/dev' 2021-10-07 10:11:41 +03:00
dwertent
9986d69215 adding go sum 2021-10-07 10:11:14 +03:00
Eng Zer Jun
21644e5cba refactor: move from io/ioutil to io and os package
The io/ioutil package has been deprecated as of Go 1.16, see
https://golang.org/doc/go1.16#ioutil. This commit replaces the existing
io/ioutil functions with their new definitions in io and os packages.

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2021-10-07 00:23:09 +08:00
Eng Zer Jun
ad93217bf6 build: upgrade to Go 1.17
Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
2021-10-07 00:23:04 +08:00
Ben Hirschberg
ca49bc1ddd Merge pull request #121 from armosec/dev
Fixed issues, update windows support
2021-10-06 18:20:37 +03:00
David Wertenteil
1229c73ddc Merge pull request #120 from dwertent/master
Fixing exception cluster name support
2021-10-06 18:16:51 +03:00
dwertent
7416202555 adding mitre example 2021-10-06 18:14:24 +03:00
dwertent
a0ba683eea Merge remote-tracking branch 'upstream/dev' 2021-10-06 18:12:48 +03:00
dwertent
89654eb26f update exception cluster name compare 2021-10-06 18:12:33 +03:00
Bezbran
9d1736a141 Typo in readme built.py -->> build.py 2021-10-06 15:22:38 +03:00
David Wertenteil
eaa4ed3da5 Merge pull request #118 from xdavidel/master
Update windows support in build
2021-10-06 15:10:02 +03:00
David Delarosa
0db3f65312 Merge remote-tracking branch 'upstream/dev' 2021-10-06 15:05:07 +03:00
David Wertenteil
1ea0a3ccc5 Merge pull request #117 from dwertent/master
load configMap configuration before file configuration
2021-10-06 14:24:30 +03:00
dwertent
16cd30bea8 load configMap before file 2021-10-06 14:06:36 +03:00
David Delarosa
075ba4c603 Comment out this windows exe
The build workflow relay on the same binary name - so we won't change
that right now.
2021-10-06 11:23:56 +03:00
David Delarosa
2d898822df Merge branch 'dev-win' 2021-10-06 10:36:11 +03:00
David Delarosa
25b8ec82e8 Cannot use both 'uses' and 'run' 2021-10-06 10:21:29 +03:00
David Delarosa
44b74e2681 Change workflow to use build.py script 2021-10-05 17:28:38 +03:00
David Wertenteil
485e171008 Merge pull request #116 from dwertent/master
Revert python script build
2021-10-05 17:17:13 +03:00
dwertent
c12eb83b4b remove comment 2021-10-05 17:12:43 +03:00
dwertent
84060e7823 revert python build 2021-10-05 17:07:30 +03:00
David Wertenteil
d80d50b59d Merge pull request #115 from dwertent/master
fixed in cluster crash - #114
2021-10-05 16:56:00 +03:00
dwertent
f11f054fea offline not new feature 2021-10-05 16:51:03 +03:00
dwertent
4c1d491d5a Merge remote-tracking branch 'upstream/dev' 2021-10-05 16:43:54 +03:00
dwertent
2b67cc520c windows install support 2021-10-05 16:43:05 +03:00
dwertent
2a5712bd3c fixed in cluster crash 2021-10-05 16:11:41 +03:00
Ben Hirschberg
ccbc11408b Merge pull request #109 from armosec/dev
Update master with fixed issues #95 #96
2021-10-05 09:25:35 +03:00
Bezbran
22bae315be Merge pull request #112 from Bezbran/dev
Add discord using github pages
2021-10-05 09:23:17 +03:00
Bezalel Brandwine
740ab7cb46 point readme discord to github pages 2021-10-05 09:21:58 +03:00
Bezalel Brandwine
cdaa9aa1b0 discord UI beautify 2021-10-05 09:18:39 +03:00
Bezbran
875f82dcbb Merge pull request #4 from armosec/dev
Dev from org
2021-10-05 09:18:03 +03:00
Ben Hirschberg
e8f6bdd64a Merge pull request #108 from YiscahLevySilas1/master
added mitre to supported frameworks
2021-10-04 17:06:00 +03:00
yiscah
25247491ee add mitre to supportedFrameworks, accept upper/lowercase "MITRE" 2021-10-04 16:56:18 +03:00
Ben Hirschberg
57ae3dc3a7 Merge pull request #107 from dwertent/master
Fixed #95, #96
2021-10-04 16:53:48 +03:00
dwertent
27d00b58d7 Adding star to readme, support wild labels 2021-10-04 15:15:18 +03:00
Bezalel Brandwine
263821ce67 landing page in docs dir 2021-10-04 14:57:11 +03:00
Bezalel Brandwine
3c12247b00 add index html as GitHub pages landing page 2021-10-04 14:54:04 +03:00
Bezbran
56d41596f6 Merge pull request #3 from armosec/dev
Dev merge
2021-10-04 14:32:51 +03:00
Avner Tzur
f0cd1965b4 update git repo URL using https 2021-10-04 13:51:47 +03:00
Ben Hirschberg
86b6a1d88a complete zip release and install with hash 2021-10-04 08:36:53 +03:00
Ben Hirschberg
f903e13d7b adding hash to release zip 2021-10-04 08:19:44 +03:00
Ben Hirschberg
015206a760 release id 2021-10-03 22:21:54 +03:00
Ben Hirschberg
0aff119260 fix bad reference 2021-10-03 22:13:00 +03:00
Ben Hirschberg
ddb8608501 moving up the publishing step 2021-10-03 22:11:20 +03:00
Ben Hirschberg
0d75a273f0 post release event 2021-10-03 22:06:27 +03:00
Ben Hirschberg
4f07d23dd6 moving to post release 2021-10-03 21:50:35 +03:00
Ben Hirschberg
79baa0d66e hashing 2021-10-03 21:36:37 +03:00
Ben Hirschberg
d5ca49ef9b removing not needed plugin 2021-10-03 21:23:00 +03:00
Ben Hirschberg
536d7fb3c5 try another release plugin 2021-10-03 21:20:25 +03:00
Ben Hirschberg
f66fd1f38c hash for release 2021-10-03 21:09:53 +03:00
dwertent
3a4c06a818 fixed issue #95 2021-10-03 17:22:50 +03:00
David Wertenteil
fd8dd7ab8a Merge pull request #100 from slashben/master
Adding discord links to readme
2021-10-03 15:26:39 +03:00
Ben Hirschberg
0434f6a935 Merge pull request #99 from taylor/patch-1
Fix link to hardening guide in README
2021-10-03 13:39:05 +03:00
Benyamin Hirschberg
b51a26442c returning examples to the main readme 2021-10-03 09:16:18 +03:00
Benyamin Hirschberg
6462ac0f0e moving options 2021-10-03 09:08:03 +03:00
Benyamin Hirschberg
fc01dbbac9 discord banner 2021-10-03 08:10:29 +03:00
Benyamin Hirschberg
df8576c066 Use a better discord logo 2021-10-03 08:07:42 +03:00
Benyamin Hirschberg
ca8adf28cf Discord invitation 2021-10-03 08:03:17 +03:00
Taylor Carpenter
f70c6c566e Fix link to hardening guide
Signed-off-by: Taylor Carpenter <taylor@vulk.coop>

Updating the link to the Kubernetes Hardening Guidance by NSA and CISA

Ref: https://github.com/armosec/kubescape/issues/97
2021-09-30 10:10:51 -05:00
Benyamin Hirschberg
056d4411b7 Merge pull request #92 from BenHirschbergCa/master
adding CONTRIBUTING.md
2021-09-27 13:55:13 +03:00
Ben Hirschberg
602b83b0e5 adding CONTRIBUTING.md 2021-09-27 13:53:54 +03:00
Benyamin Hirschberg
1cfcc6d930 Merge pull request #90 from BenHirschbergCa/master
installing kubescape in home directory if possible
2021-09-23 21:05:31 +03:00
Ben Hirschberg
8c6f618743 installing kubescape in home directory if possible 2021-09-23 17:54:05 +00:00
Benyamin Hirschberg
6adf1c3162 Merge pull request #88 from BenHirschbergCa/master
Fixing sudo missing issue and returning build without C runtime dependency
2021-09-21 23:59:30 +03:00
Ben Hirschberg
9f5f4f1832 removing useless command in install 2021-09-21 23:48:26 +03:00
Ben Hirschberg
9f49cc83e9 Fixing install.sh to work in environments where there is no sudo && enabling static executable build 2021-09-21 23:45:35 +03:00
Benyamin Hirschberg
67972199ce Merge pull request #86 from armosec/Daniel-GrunbergerCA-patch-1
Update README.md
2021-09-21 23:05:25 +03:00
Daniel Grunberger
9d5db86bf3 Update README.md 2021-09-19 17:42:59 +03:00
Benyamin Hirschberg
39efed5fc1 Merge pull request #85 from armosec/dev
Delivering signup to master
2021-09-19 17:34:31 +03:00
Benyamin Hirschberg
21c2bf22dd Update scaninfo.go 2021-09-19 17:28:46 +03:00
Benyamin Hirschberg
6c94b3a423 Merge branch 'master' into dev 2021-09-19 17:24:14 +03:00
Benyamin Hirschberg
683248db0b Merge pull request #83 from Daniel-GrunbergerCA/master
Finish config local flags
2021-09-19 17:08:56 +03:00
Daniel Grunberger
9032400528 Fix permission denied 2021-09-19 16:19:25 +03:00
Bezbran
1a925b1acf Merge pull request #84 from Bezbran/dev
attributes as lower
2021-09-19 15:09:01 +03:00
Bezalel Brandwine
dadc8c2c60 exception attributes to lower 2021-09-19 15:07:39 +03:00
Daniel-GrunbergerCA
01c1b44bfc finish config local flags 2021-09-19 14:53:04 +03:00
Bezbran
a394a99d8f Merge pull request #2 from armosec/dev
Dev from armosec
2021-09-19 13:39:31 +03:00
Benyamin Hirschberg
4213707b7f Merge pull request #82 from brsolomon-deloitte/bugfix-issue-81-output-ext
Bugfix: correctly compare filepath.Ext() result
2021-09-18 21:26:39 +03:00
Brad Solomon
d53b8272ee Bugfix: correctly compare filepath.Ext() result
Closes #81.

setOutputFile() will incorrectly append .json to an --output
value that already has it. This is because
https://pkg.go.dev/path/filepath#Ext result includes
the ., whereas the current logic only tests against
json, not .json.
2021-09-16 11:57:58 -04:00
Daniel-GrunbergerCA
fdd688ac68 Merge remote-tracking branch 'upstream/dev' 2021-09-14 17:53:35 +03:00
Daniel-GrunbergerCA
5bb961bdc6 start get/set for config.json 2021-09-14 17:53:21 +03:00
Daniel-GrunbergerCA
9e7cc06f97 give higher priority to config.json 2021-09-14 17:14:21 +03:00
Benyamin Hirschberg
1d184d9000 Merge pull request #78 from YiscahLevySilas1/master
remove redundant responses
2021-09-14 16:52:05 +03:00
Benyamin Hirschberg
a5e2ebf647 Update datastructuresmethods.go 2021-09-14 16:51:16 +03:00
Benyamin Hirschberg
29eb573de5 Merge pull request #79 from BenHirschbergCa/dev
Add build for dev branch
2021-09-14 16:49:45 +03:00
Ben Hirschberg
ec6c3da5ec no needs :) 2021-09-14 16:40:12 +03:00
yiscah
e2d4f8961e Merge remote-tracking branch 'upstream/dev' 2021-09-14 16:38:13 +03:00
Ben Hirschberg
a48c680201 build dev branch 2021-09-14 16:36:57 +03:00
yiscah
c869f2c962 append to msg, loop backward on ruleresponses, don't check redundant role/clusterrole k8sresources 2021-09-14 16:36:57 +03:00
Benyamin Hirschberg
f77fc9a06d Merge branch 'armosec:dev' into dev 2021-09-14 16:01:59 +03:00
Daniel Grunberger
e12eae93b9 Merge pull request #77 from Daniel-GrunbergerCA/master
Add  config clsuter & fix github workflow
2021-09-14 15:27:09 +03:00
Daniel-GrunbergerCA
d92fb32574 fix build.yaml 2021-09-14 15:14:26 +03:00
Daniel-GrunbergerCA
541dba3d79 update build.yaml 2021-09-14 15:06:18 +03:00
yiscah
033ed17125 controlReport status is passed only if ALL ruleReports passed 2021-09-14 14:58:10 +03:00
Daniel-GrunbergerCA
aaeb663d15 remove env vars 2021-09-14 14:02:15 +03:00
Daniel-GrunbergerCA
c337005985 update build.yaml 2021-09-14 13:55:53 +03:00
Daniel-GrunbergerCA
192eeee348 update build.yaml for testing 2021-09-14 13:47:34 +03:00
yiscah
27c97684b9 delete redundant rule responses 2021-09-14 13:38:53 +03:00
Daniel-GrunbergerCA
41d5fa70ed Merge remote-tracking branch 'upstream/dev' 2021-09-14 13:21:25 +03:00
Bezalel Brandwine
4206e9c175 add caution for URLs changes 2021-09-14 12:52:35 +03:00
Daniel-GrunbergerCA
8658bb05dd change token to invitation in configmap 2021-09-14 12:23:28 +03:00
Daniel-GrunbergerCA
9b707016a9 Add set/get key-value option 2021-09-14 11:50:53 +03:00
Bezalel Brandwine
4b02826883 invitation token as param in configmap 2021-09-14 10:36:51 +03:00
Bezalel Brandwine
b29774ea71 some outputs refining 2021-09-14 09:58:17 +03:00
Benyamin Hirschberg
bf68e90a8e Merge pull request #8 from armosec/dev
Dev
2021-09-13 17:07:58 +03:00
Benyamin Hirschberg
cc5cdcd831 Merge branch 'dev' into dev 2021-09-13 17:07:51 +03:00
lalafi@cyberarmor.io
07f23ff7d9 Merge branch 'dev' of ssh://github.com/armosec/kubescape into dev 2021-09-13 16:33:32 +03:00
lalafi@cyberarmor.io
2985da6dc9 remove redundant field 2021-09-13 16:33:25 +03:00
Bezalel Brandwine
1523973749 change consts to var so ldflag -s will work 2021-09-13 15:26:29 +03:00
Bezalel Brandwine
ccafd78a14 change URLs to production at build time 2021-09-13 15:10:43 +03:00
Bezalel Brandwine
4f71fe0d55 support rerun on the same cluster flows 2021-09-13 14:25:09 +03:00
David Wertenteil
7bd6b6b4d1 Merge pull request #75 from dwertent/master
Handle download framework error
2021-09-13 11:31:25 +03:00
dwertent
2b976489a2 handle download framework error 2021-09-13 11:21:57 +03:00
dwertent
1440f20f95 merged from master 2021-09-13 11:01:49 +03:00
dwertent
941e7e27c0 Merge branch 'master' into dev 2021-09-13 11:00:30 +03:00
dwertent
5428c6ab2f merged from dev 2021-09-13 10:57:34 +03:00
David Wertenteil
851bb65d17 Merge pull request #74 from dwertent/master
* Update exception support
* Update installation script
2021-09-13 10:53:10 +03:00
dwertent
a3ce04b7e8 update install script 2021-09-13 10:47:29 +03:00
dwertent
4d68ca6aa2 update exceptions support 2021-09-13 10:24:24 +03:00
Ben Hirschberg
29c6767d3c add run locally to readme 2021-09-13 09:41:16 +03:00
Benyamin Hirschberg
d8f5f7975c Merge pull request #73 from BenHirschbergCa/dev
Inverting posture score
2021-09-13 08:34:22 +03:00
Ben Hirschberg
4cd8476837 Inverting posture score 2021-09-13 08:31:13 +03:00
Benyamin Hirschberg
112257449f Merge pull request #72 from BenHirschbergCa/dev
fixing API path
2021-09-13 08:23:22 +03:00
Ben Hirschberg
ff9cf4adf0 fixing API path 2021-09-13 08:22:25 +03:00
Benyamin Hirschberg
f2d387bc9c Merge pull request #71 from Daniel-GrunbergerCA/master
Support score, cluster name and results flag
2021-09-12 20:53:49 +03:00
lalafi@cyberarmor.io
2ceb5150e2 adding control id 2021-09-12 19:28:11 +03:00
Daniel-GrunbergerCA
00006ec721 rm omit empty 2021-09-12 18:49:13 +03:00
Daniel-GrunbergerCA
00aa6948ab Merge remote-tracking branch 'upstream/dev' 2021-09-12 18:47:31 +03:00
lalafi@cyberarmor.io
aad32ec965 added controlID 2021-09-12 18:43:45 +03:00
Daniel-GrunbergerCA
bd24ed3af7 add framework score 2021-09-12 18:37:50 +03:00
Daniel-GrunbergerCA
9fc455bcec Update posture report flag and cluster name 2021-09-12 18:13:24 +03:00
dwertent
0f8ba1e7e8 Merge branches 'master' and 'master' of github.com:armosec/kubescape 2021-09-12 17:45:32 +03:00
David Wertenteil
d3137af3d7 skip score updating 2021-09-12 17:36:23 +03:00
dwertent
775dd037d6 Merge branch 'dev' 2021-09-12 17:34:27 +03:00
dwertent
49cbfe130c do not calculate score 2021-09-12 17:34:00 +03:00
David Wertenteil
9cd61dd996 Merge pull request #70 from Moshe-Rappaport-CA/master
Support control IDs
2021-09-12 17:30:27 +03:00
moshep
292c4aa060 support id 2021-09-12 16:13:33 +03:00
moshep
56a265930d Merge remote-tracking branch 'upstream/dev' 2021-09-12 14:21:19 +03:00
Daniel-GrunbergerCA
4171d110a4 separa interfaces 2021-09-12 11:06:43 +03:00
Benyamin Hirschberg
2568241ef8 Merge pull request #69 from pettersolberg88/master
fix: Fixed Docker build not working
2021-09-12 10:19:18 +03:00
Daniel-GrunbergerCA
d0775565e9 Merge remote-tracking branch 'upstream/dev' 2021-09-12 09:40:17 +03:00
Petter Solberg
3c6b2db919 fix: Fixed Docker build not working
Building the docker image does currently not work because go.mod does not exist.
By running: `docker build -t kubescape -f build/Dockerfile .`
It fails:
```
Step 7/10 : RUN GOOS=linux CGO_ENABLED=0 go build -ldflags="-s -w " -installsuffix cgo  -o kubescape .
 ---> Running in 3e7d4a124446
cautils/k8sinterface/cloudvendorregistrycreds.go:14:2: missing go.sum entry for module providing package github.com/aws/aws-sdk-go/aws (imported by github.com/armosec/kubescape/cautils/k8sinterface); to add:
        go get github.com/armosec/kubescape/cautils/k8sinterface
cautils/k8sinterface/cloudvendorregistrycreds.go:15:2: missing go.sum entry for module providing package github.com/aws/aws-sdk-go/aws/session (imported by github.com/armosec/kubescape/cautils/k8sinterface); to add:
        go get github.com/armosec/kubescape/cautils/k8sinterface
...
```
By changing mod download to go mod tidy, it creates go.sum and the docker build works.
2021-09-10 14:10:09 +02:00
David Wertenteil
ca4d4a096c Support self registration 2021-09-10 01:20:28 +03:00
dwertent
ff27db6b83 Merge remote-tracking branch 'upstream/dev' 2021-09-10 00:58:24 +03:00
Daniel Grunberger
c9ecb6c563 Add version handling (#67)
* start version handling

* add version handling

* update latest version check

* update build.yaml

* erase unused vars

* fix build.yaml

* fix var name

* handle error
2021-09-10 00:57:33 +03:00
dwertent
b9e5782264 support self registeration 2021-09-10 00:56:51 +03:00
Daniel-GrunbergerCA
d852f81cb0 handle error 2021-09-09 19:56:29 +03:00
dwertent
6137aa5d8e support fronegg 2021-09-09 17:15:55 +03:00
Daniel-GrunbergerCA
131b67ee83 fix var name 2021-09-09 16:26:42 +03:00
Daniel-GrunbergerCA
db6f00be08 fix build.yaml 2021-09-09 15:23:34 +03:00
Daniel-GrunbergerCA
2ecc80985a Merge remote-tracking branch 'upstream/dev' 2021-09-09 15:18:56 +03:00
Daniel-GrunbergerCA
93dbfd5110 erase unused vars 2021-09-09 15:15:36 +03:00
Daniel-GrunbergerCA
ae0c384c85 update build.yaml 2021-09-09 15:02:48 +03:00
Benyamin Hirschberg
f60ff1fb26 Merge pull request #63 from zc2638/feat/dockerfile
add docker build
2021-09-09 14:59:46 +03:00
Daniel-GrunbergerCA
08a81696a1 update latest version check 2021-09-09 14:58:52 +03:00
Daniel-GrunbergerCA
8375a8ae63 add version handling 2021-09-09 14:37:25 +03:00
Daniel-GrunbergerCA
31d8cf5118 start version handling 2021-09-09 12:29:42 +03:00
dwertent
597b967e55 Merge remote-tracking branch 'upstream/dev' 2021-09-09 11:43:44 +03:00
dwertent
679238ec13 download from release 2021-09-09 09:53:08 +03:00
dwertent
94884ac3d7 Merge branch 'master' into dev 2021-09-09 09:50:45 +03:00
Benyamin Hirschberg
0ef8f20c50 Merge pull request #65 from BenHirschbergCa/master
Cleanup in build file
2021-09-08 21:30:22 +03:00
Ben Hirschberg
82f3d62de5 clean up build file 2021-09-08 21:29:21 +03:00
Benyamin Hirschberg
46f1e6a83b Merge pull request #7 from armosec/master
rebase
2021-09-08 21:27:16 +03:00
Benyamin Hirschberg
65841a014f Merge branch 'master' into master 2021-09-08 21:27:02 +03:00
Benyamin Hirschberg
985c6868c1 Fixing URL typo 2021-09-08 21:21:25 +03:00
Shauli Rozen
fca862b2c7 Update README.md 2021-09-07 21:10:27 +03:00
zc
77a9956d91 add docker build 2021-09-07 14:21:39 +08:00
David Wertenteil
3a4a58fdd5 remove deffer func (#60) 2021-09-05 17:36:41 +03:00
dwertent
a1e639453d remove deffer func 2021-09-05 17:35:56 +03:00
dwertent
7da23c111e adding exceptions after merge 2021-09-05 17:29:53 +03:00
dwertent
768556251d support exceptions
use rego store
2021-09-05 17:22:47 +03:00
dwertent
00fcc565b5 ignore md5sum 2021-09-05 17:14:00 +03:00
Ben Hirschberg
9c74e5c93b Merge branch 'master' of github.com:BenHirschbergCa/kubescape 2021-09-05 17:00:37 +03:00
Ben Hirschberg
6a0ee6e0d7 specific upload files 2021-09-05 16:59:55 +03:00
Benyamin Hirschberg
93bb09d78e Merge pull request #6 from BenHirschbergCa/dev
removing unneeded fields
2021-09-05 16:51:40 +03:00
Ben Hirschberg
228e7703a8 removing unneeded fields 2021-09-05 16:51:00 +03:00
Benyamin Hirschberg
4b15a3b8e0 Merge pull request #5 from BenHirschbergCa/dev
moving to alexellis/upload-assets
2021-09-05 16:47:11 +03:00
Ben Hirschberg
80c5fd7439 moving to alexellis/upload-assets 2021-09-05 16:46:13 +03:00
Benyamin Hirschberg
504c4acc42 Merge pull request #4 from BenHirschbergCa/dev
returning master push run
2021-09-05 15:38:09 +03:00
Ben Hirschberg
573d85d770 returning master push run 2021-09-05 15:37:18 +03:00
Benyamin Hirschberg
4247f66378 Merge pull request #3 from BenHirschbergCa/dev
fixing upload file list
2021-09-05 15:34:37 +03:00
Benyamin Hirschberg
7d6a10e787 Merge pull request #59 from BenHirschbergCa/dev
Dev
2021-09-05 15:29:19 +03:00
Ben Hirschberg
bad303692e fixing upload file list 2021-09-05 15:28:33 +03:00
Benyamin Hirschberg
af3b33f7b0 Merge pull request #2 from BenHirschbergCa/dev
Dev
2021-09-05 15:23:12 +03:00
Ben Hirschberg
fd66b2eba5 build on pull requests only! 2021-09-05 15:22:02 +03:00
Ben Hirschberg
157ba1a08d ws 2021-09-05 15:20:13 +03:00
Benyamin Hirschberg
6b15e6575b Merge pull request #1 from BenHirschbergCa/dev
Dev
2021-09-05 15:18:48 +03:00
Ben Hirschberg
53f3229e9f adding m5sum 2021-09-05 15:17:55 +03:00
Ben Hirschberg
186435de69 test pinging 2021-09-05 15:03:39 +03:00
David Wertenteil
4d027d691f Support exceptions (#58)
* support exceptions

* update screenshot

* update summary
2021-09-05 14:44:55 +03:00
dwertent
3f84ee3fcc update summary 2021-09-05 14:42:49 +03:00
dwertent
38103ac90b update screenshot 2021-09-05 14:39:13 +03:00
dwertent
13d27697e1 update readme 2021-09-05 14:33:32 +03:00
dwertent
942f356d19 support exceptions 2021-09-05 14:21:51 +03:00
dwertent
b87b687e2f support exceptions 2021-09-02 17:41:03 +03:00
dwertent
2e313719bb adding scaore and excpetion to code 2021-09-02 14:54:04 +03:00
dwertent
0c5eb48fdb Merge remote-tracking branch 'upstream/master' 2021-09-02 13:21:13 +03:00
dwertent
2ae2c81e0b rm download 2021-09-02 13:18:23 +03:00
dwertent
222b154505 store file localy 2021-08-31 17:08:02 +03:00
dwertent
67c2de74f1 adding download script 2021-08-31 17:05:16 +03:00
dwertent
4a9b36807a remove sudo 2021-08-31 16:43:37 +03:00
dwertent
c6241fab38 remove sudo 2021-08-31 16:42:12 +03:00
dwertent
afbc69c6d2 Merge remote-tracking branch 'upstream/dev' 2021-08-31 16:41:44 +03:00
dwertent
2779cb4e25 update module 2021-08-31 11:47:44 +03:00
dwertent
f46ee93539 update modul name 2021-08-31 11:39:27 +03:00
dwertent
3eb087e5c1 Merge remote-tracking branch 'upstream/dev' 2021-08-31 11:38:14 +03:00
dwertent
59c935e723 update output f 2021-08-31 09:00:52 +03:00
dwertent
bae45d277f Merge remote-tracking branch 'upstream/dev' 2021-08-31 08:47:37 +03:00
dwertent
0b6dfa9cd0 Merge remote-tracking branch 'upstream/dev' 2021-08-30 18:47:07 +03:00
dwertent
1ff3a6c92c support output to file 2021-08-30 18:44:42 +03:00
dwertent
f75cee0d78 support stdin input 2021-08-30 14:54:01 +03:00
dwertent
229f16cb01 Merge remote-tracking branch 'upstream/dev' 2021-08-30 13:52:58 +03:00
dwertent
2c6b1a440f update glob function 2021-08-30 08:53:34 +03:00
dwertent
37afc1352f adding helm support to readme 2021-08-29 13:34:40 +03:00
dwertent
9943119033 recursive glob 2021-08-29 13:15:34 +03:00
dwertent
41457ff551 Merge remote-tracking branch 'upstream/dev' 2021-08-29 10:38:42 +03:00
dwertent
82b64b5828 Merge remote-tracking branch 'origin/dev' 2021-08-29 10:35:59 +03:00
dwertent
229e8acc74 Merge remote-tracking branch 'origin/yamlsupport' 2021-08-29 10:35:32 +03:00
David Wertenteil
30324e1c01 Merge branch 'dev' into yamlsupport 2021-08-29 10:19:09 +03:00
dwertent
8ca356eae7 Merge remote-tracking branch 'upstream/master' 2021-08-29 10:09:54 +03:00
dwertent
29f4ae368d support url input, update readme 2021-08-29 10:08:49 +03:00
dwertent
409080f51b update package name o kubescape 2021-08-29 08:17:09 +03:00
dwertent
0b24c46279 Merge remote-tracking branch 'upstream/dev' 2021-08-26 18:30:50 +03:00
dwertent
49596c5ac1 split to function 2021-08-26 18:29:32 +03:00
dwertent
9bf79db8f8 Merge branch 'Daniel-GrunbergerCA-master' into dev 2021-08-26 12:22:34 +03:00
254 changed files with 20135 additions and 8932 deletions

View File

@@ -3,10 +3,6 @@ name: build
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
types: [ closed ]
jobs:
once:
name: Create release
@@ -20,8 +16,8 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: v1.0.${{ github.run_number }}
release_name: Release v1.0.${{ github.run_number }}
tag_name: v2.0.${{ github.run_number }}
release_name: Release v2.0.${{ github.run_number }}
draft: false
prerelease: false
build:
@@ -33,16 +29,31 @@ jobs:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v1
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.16
go-version: 1.17
- name: Test
run: go test -v ./...
- name: Build
run: mkdir -p build/${{ matrix.os }} && go mod tidy && go build -ldflags "-w -s" -o build/${{ matrix.os }}/kubescape
env:
RELEASE: v2.0.${{ github.run_number }}
ArmoBEServer: api.armo.cloud
ArmoAuthServer: auth.armo.cloud
ArmoERServer: report.armo.cloud
ArmoWebsite: portal.armo.cloud
CGO_ENABLED: 0
run: python3 --version && python3 build.py
- name: Upload Release Asset
- name: Smoke Testing
env:
RELEASE: v2.0.${{ github.run_number }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
- name: Upload Release binaries
id: upload-release-asset
uses: actions/upload-release-asset@v1
env:
@@ -52,3 +63,60 @@ jobs:
asset_path: build/${{ matrix.os }}/kubescape
asset_name: kubescape-${{ matrix.os }}
asset_content_type: application/octet-stream
build-docker:
name: Build docker container, tag and upload to registry
needs: build
runs-on: ubuntu-latest
if: ${{ github.repository == 'armosec/kubescape' }} # TODO
permissions:
id-token: write
packages: write
contents: read
steps:
- uses: actions/checkout@v2
- name: Set image version
id: image-version
run: echo '::set-output name=IMAGE_VERSION::v2.0.${{ github.run_number }}'
- name: Set image name
id: image-name
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
- name: Build the Docker image
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
- name: Re-Tag Image to latest
run: docker tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
- name: Login to Quay.io
env: # Or as an environment variable
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
# - name: Login to GitHub Container Registry
# uses: docker/login-action@v1
# with:
# registry: ghcr.io
# username: ${{ github.actor }}
# password: ${{ secrets.GITHUB_TOKEN }}
- name: Push Docker image
run: |
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
# TODO - Wait for casign to support fixed tags -> https://github.com/sigstore/cosign/issues/1424
# - name: Install cosign
# uses: sigstore/cosign-installer@main
# with:
# cosign-release: 'v1.5.1' # optional
# - name: sign kubescape container image
# env:
# COSIGN_EXPERIMENTAL: "true"
# run: |
# cosign sign --force ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
# cosign sign --force ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}

84
.github/workflows/build_dev.yaml vendored Normal file
View File

@@ -0,0 +1,84 @@
name: build-dev
on:
push:
branches: [ dev ]
jobs:
build:
name: Create cross-platform dev build
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v1
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.17
- name: Test
run: go test -v ./...
- name: Build
env:
RELEASE: v2.0.${{ github.run_number }}
ArmoBEServer: api.armo.cloud
ArmoAuthServer: auth.armo.cloud
ArmoERServer: report.armo.cloud
ArmoWebsite: portal.armo.cloud
CGO_ENABLED: 0
run: python3 --version && python3 build.py
- name: Smoke Testing
env:
RELEASE: v2.0.${{ github.run_number }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
- name: Upload build artifacts
uses: actions/upload-artifact@v2
with:
name: kubescape-${{ matrix.os }}
path: build/${{ matrix.os }}/kubescape
build-docker:
name: Build docker container, tag and upload to registry
needs: build
if: ${{ github.repository == 'armosec/kubescape' }} # TODO
runs-on: ubuntu-latest
permissions:
id-token: write
packages: write
contents: read
steps:
- uses: actions/checkout@v2
- name: Set image version
id: image-version
run: echo '::set-output name=IMAGE_VERSION::dev-v2.0.${{ github.run_number }}'
- name: Set image name
id: image-name
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
- name: Build the Docker image
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
- name: Login to Quay.io
env:
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
# - name: Login to GitHub Container Registry
# uses: docker/login-action@v1
# with:
# registry: ghcr.io
# username: ${{ github.actor }}
# password: ${{ secrets.GITHUB_TOKEN }}
- name: Push Docker image
run: |
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}

40
.github/workflows/master_pr_checks.yaml vendored Normal file
View File

@@ -0,0 +1,40 @@
name: master-pr
on:
pull_request:
branches: [ master ]
types: [ edited, opened, synchronize, reopened ]
jobs:
build:
name: Create cross-platform build
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v1
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.17
- name: Test
run: go test -v ./...
- name: Build
env:
RELEASE: v2.0.${{ github.run_number }}
ArmoBEServer: api.armo.cloud
ArmoAuthServer: auth.armo.cloud
ArmoERServer: report.armo.cloud
ArmoWebsite: portal.armo.cloud
CGO_ENABLED: 0
run: python3 --version && python3 build.py
- name: Smoke Testing
env:
RELEASE: v2.0.${{ github.run_number }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape

17
.github/workflows/post-release.yaml vendored Normal file
View File

@@ -0,0 +1,17 @@
name: create release digests
on:
release:
types: [ published]
branches: [ master ]
jobs:
once:
name: Creating digests
runs-on: ubuntu-latest
steps:
- name: Digest
uses: MCJack123/ghaction-generate-release-hashes@v1
with:
hash-type: sha1
file-name: kubescape-release-digests

7
.gitignore vendored
View File

@@ -1,4 +1,7 @@
*.vs*
*go.sum*
*kubescape*
*debug*
*debug*
*vender*
*.pyc*
.idea
ca.srl

127
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,127 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement [here](mailto:ben@armosec.io).
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

100
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,100 @@
# Contributing
First, it is awesome that you are considering contributing to Kubescape! Contributing is important and fun and we welcome your efforts.
When contributing, we categorize contributions into two:
* Small code changes or fixes, whose scope are limited to a single or two files
* Complex features and improvements, whose are not limited
If you have a small change, feel free to fire up a Pull Request.
When planning a bigger change, please first discuss the change you wish to make via issue,
email, or any other method with the owners of this repository before making a change. Most likely your changes or features are great, but sometimes we might already going to this direction (or the exact opposite ;-) ) and we don't want to waste your time.
Please note we have a code of conduct, please follow it in all your interactions with the project.
## Pull Request Process
1. Ensure any install or build dependencies are removed before the end of the layer when doing a
build.
2. Update the README.md with details of changes to the interface, this includes new environment
variables, exposed ports, useful file locations and container parameters.
3. Open Pull Request to `dev` branch - we test the component before merging into the `master` branch
4. We will merge the Pull Request in once you have the sign-off.
## Code of Conduct
### Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level of experience,
nationality, personal appearance, race, religion, or sexual identity and
orientation.
### Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
We will distance those who are constantly adhere to unacceptable behavior.
### Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
### Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
### Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at [INSERT EMAIL ADDRESS]. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
### Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

9
MAINTAINERS.md Normal file
View File

@@ -0,0 +1,9 @@
# Maintainers
The following table lists Kubescape project maintainers
| Name | GitHub | Email | Organization | Repositories/Area of Expertise | Added/Renewed On |
| --- | --- | --- | --- | --- | --- |
| Ben Hirschberg | @slashben | ben@armosec.io | ARMO | Kubescape CLI | 2021-09-01 |
| Rotem Refael | @rotemamsa | rrefael@armosec.io | ARMO | Kubescape CLI | 2021-10-11 |
| David Wertenteil | @dwertent | dwertent@armosec.io | ARMO | Kubescape CLI | 2021-09-01 |

304
README.md
View File

@@ -1,136 +1,327 @@
<img src="docs/kubescape.png" width="300" alt="logo" align="center">
[![build](https://github.com/armosec/kubescape/actions/workflows/build.yaml/badge.svg)](https://github.com/armosec/kubescape/actions/workflows/build.yaml)
[![Github All Releases](https://img.shields.io/github/downloads/armosec/kubescape/total.svg)](https://github.com/armosec/kubescape)
[![Go Report Card](https://goreportcard.com/badge/github.com/armosec/kubescape)](https://goreportcard.com/report/github.com/armosec/kubescape)
Kubescape is the first tool for testing if Kubernetes is deployed securely as defined in [Kubernetes Hardening Guidance by NSA and CISA](https://www.nsa.gov/News-Features/Feature-Stories/Article-View/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/)
Kubescape is a K8s open-source tool providing a multi-cloud K8s single pane of glass, including risk analysis, security compliance, RBAC visualizer and image vulnerabilities scanning.
Kubescape scans K8s clusters, YAML files, and HELM charts, detecting misconfigurations according to multiple frameworks (such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo) , [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/)), software vulnerabilities, and RBAC (role-based-access-control) violations at early stages of the CI/CD pipeline, calculates risk score instantly and shows risk trends over time.
It became one of the fastest-growing Kubernetes tools among developers due to its easy-to-use CLI interface, flexible output formats, and automated scanning capabilities, saving Kubernetes users and admins precious time, effort, and resources.
Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI, Github workflows, Prometheus, and Slack, and supports multi-cloud K8s deployments like EKS, GKE, and AKS.
Use Kubescape to test clusters or scan single YAML files and integrate it to your processes.
</br>
<img src="docs/demo.gif">
# TL;DR
## Install & Run
### Install:
## Install:
```
curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh | /bin/bash
```
### Run:
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
```
[Install on windows](#install-on-windows)
If you wish to scan all namespaces in your cluster, remove the `--exclude-namespaces` flag.
[Install on macOS](#install-on-macos)
## Run:
```
kubescape scan --submit --enable-host-scan
```
<img src="docs/summary.png">
</br>
### Flags
> Kubescape is an open source project, we welcome your feedback and ideas for improvement. Were also aiming to collaborate with the Kubernetes community to help make the tests themselves more robust and complete as Kubernetes develops.
</br>
### Click [👍](https://github.com/armosec/kubescape/stargazers) if you want us to continue to develop and improve Kubescape 😀
</br>
# Being part of the team
We invite you to our team! We are excited about this project and want to return the love we get.
Want to contribute? Want to discuss something? Have an issue?
* Open a issue, we are trying to respond within 48 hours
* [Join us](https://armosec.github.io/kubescape/) in a discussion on our discord server!
[<img src="docs/discord-banner.png" width="100" alt="logo" align="center">](https://armosec.github.io/kubescape/)
# Options and examples
## Playground
* [Kubescape playground](https://www.katacoda.com/pathaksaiyam/scenarios/kubescape)
## Tutorials
* [Overview](https://youtu.be/wdBkt_0Qhbg)
* [How To Secure Kubernetes Clusters With Kubescape And Armo](https://youtu.be/ZATGiDIDBQk)
* [Scanning Kubernetes YAML files](https://youtu.be/Ox6DaR7_4ZI)
* [Scan Kubescape on an air-gapped environment (offline support)](https://youtu.be/IGXL9s37smM)
* [Managing exceptions in the Kubescape SaaS version](https://youtu.be/OzpvxGmCR80)
## Install on Windows
**Requires powershell v5.0+**
``` powershell
iwr -useb https://raw.githubusercontent.com/armosec/kubescape/master/install.ps1 | iex
```
Note: if you get an error you might need to change the execution policy (i.e. enable Powershell) with
``` powershell
Set-ExecutionPolicy RemoteSigned -scope CurrentUser
```
## Install on macOS
1. ```
brew tap armosec/kubescape
```
2. ```
brew install kubescape
```
## Flags
| flag | default | description | options |
|-----------------------------|---------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|
| `-e`/`--exclude-namespaces` | Scan all namespaces | Namespaces to exclude from scanning. Recommended to exclude `kube-system` and `kube-public` namespaces | |
| `--include-namespaces` | Scan all namespaces | Scan specific namespaces | |
| `-s`/`--silent` | Display progress messages | Silent progress messages | |
| `-t`/`--fail-threshold` | `100` (do not fail) | fail command (return exit code 1) if result is above threshold | `0` -> `100` |
| `-f`/`--format` | `pretty-printer` | Output format | `pretty-printer`/`json`/`junit`/`prometheus` |
| `-o`/`--output` | print to stdout | Save scan result in file | |
| `--use-from` | | Load local framework object from specified path. If not used will download latest |
| `--use-artifacts-from` | | Load artifacts (frameworks, control-config, exceptions) from local directory. If not used will download them | |
| `--use-default` | `false` | Load local framework object from default path. If not used will download latest | `true`/`false` |
| `--exceptions` | | Path to an exceptions obj, [examples](examples/exceptions/README.md). Default will download exceptions from Kubescape SaaS |
| `--controls-config` | | Path to a controls-config obj. If not set will download controls-config from ARMO management portal | |
| `--submit` | `false` | If set, Kubescape will send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not sent | `true`/`false` |
| `--keep-local` | `false` | Kubescape will not send scan results to Armo management portal. Use this flag if you ran with the `--submit` flag in the past and you do not want to submit your current scan results | `true`/`false` |
| `--account` | | Armo portal account ID. Default will load account ID from configMap or config file | |
| `--kube-context` | current-context | Cluster context to scan | |
| `--verbose` | `false` | Display all of the input resources and not only failed resources | `true`/`false` |
| `--logger` | `info` | Set the logger level | `debug`/`info`/`success`/`warning`/`error`/`fatal` |
| flag | default | description | options |
| --- | --- | --- | --- |
| `-e`/`--exclude-namespaces` | Scan all namespaces | Namespaces to exclude from scanning. Recommended to exclude `kube-system` and `kube-public` namespaces |
| `-s`/`--silent` | Display progress messages | Silent progress messages |
| `-t`/`--fail-threshold` | `0` (do not fail) | fail command (return exit code 1) if result bellow threshold| `0` -> `100` |
| `-f`/`--format` | `pretty-printer` | Output format | `pretty-printer`/`json`/`junit` |
| `-o`/`--output` | print to stdout | Save scan result in file |
## Usage & Examples
### Examples
* Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/News-Features/Feature-Stories/Article-View/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework
#### Scan a running Kubernetes cluster and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
kubescape scan --submit
```
* Scan local `yaml`/`json` files before deploying <img src="docs/new-feature.svg">
#### Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
```
kubescape scan framework nsa *.yaml
kubescape scan framework nsa --submit
```
* Scan `yaml`/`json` files from url <img src="docs/new-feature.svg">
#### Scan a running Kubernetes cluster with [`MITRE ATT&CK®`](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) framework and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
```
kubescape scan framework nsa https://raw.githubusercontent.com/GoogleCloudPlatform/microservices-demo/master/release/kubernetes-manifests.yaml
kubescape scan framework mitre --submit
```
* Output in `json` format <img src="docs/new-feature.svg">
#### Scan a running Kubernetes cluster with a specific control using the control name or control ID. [List of controls](https://hub.armo.cloud/docs/controls)
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format json --output results.json
kubescape scan control "Privileged container"
```
* Output in `junit xml` format <img src="docs/new-feature.svg">
#### Scan specific namespaces
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format junit --output results.xml
kubescape scan --include-namespaces development,staging,production
```
### Helm Support
* Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout <img src="docs/new-feature.svg">
#### Scan cluster and exclude some namespaces
```
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan framework nsa -
kubescape scan --exclude-namespaces kube-system,kube-public
```
for example:
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI)
```
helm template bitnami/mysql --generate-name --dry-run | kubescape scan framework nsa -
kubescape scan *.yaml
```
### Offline Support <img src="docs/new-feature.svg">
#### Scan kubernetes manifest files from a public github repository
```
kubescape scan https://github.com/armosec/kubescape
```
#### Display all scanned resources (including the resources who passed)
```
kubescape scan --verbose
```
#### Output in `json` format
```
kubescape scan --format json --output results.json
```
#### Output in `junit xml` format
```
kubescape scan --format junit --output results.xml
```
#### Output in `prometheus` metrics format - Contributed by [@Joibel](https://github.com/Joibel)
```
kubescape scan --format prometheus
```
#### Scan with exceptions, objects with exceptions will be presented as `exclude` and not `fail`
[Full documentation](examples/exceptions/README.md)
```
kubescape scan --exceptions examples/exceptions/exclude-kube-namespaces.json
```
#### Scan Helm charts - Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
```
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan -
```
e.g.
```
helm template bitnami/mysql --generate-name --dry-run | kubescape scan -
```
### Offline/Air-gaped Environment Support
[Video tutorial](https://youtu.be/IGXL9s37smM)
It is possible to run Kubescape offline!
#### Download all artifacts
First download the framework and then scan with `--use-from` flag
* Download and save in file, if file name not specified, will store save to `~/.kubescape/<framework name>.json`
1. Download and save in local directory, if path not specified, will save all in `~/.kubescape`
```
kubescape download framework nsa --output nsa.json
kubescape download artifacts --output path/to/local/dir
```
2. Copy the downloaded artifacts to the air-gaped/offline environment
3. Scan using the downloaded artifacts
```
kubescape scan --use-artifacts-from path/to/local/dir
```
* Scan using the downloaded framework
#### Download a single artifacts
You can also download a single artifacts and scan with the `--use-from` flag
1. Download and save in file, if file name not specified, will save in `~/.kubescape/<framework name>.json`
```
kubescape scan framework nsa --use-from nsa.json
kubescape download framework nsa --output /path/nsa.json
```
2. Copy the downloaded artifacts to the air-gaped/offline environment
3. Scan using the downloaded framework
```
kubescape scan framework nsa --use-from /path/nsa.json
```
# How to build
## Scan Periodically using Helm - Contributed by [@yonahd](https://github.com/yonahd)
[Please follow the instructions here](https://hub.armo.cloud/docs/installation-of-armo-in-cluster)
[helm chart repo](https://github.com/armosec/armo-helm)
Note: development (and the release process) is done with Go `1.16`
## Scan using docker image
Official Docker image `quay.io/armosec/kubescape`
```
docker run -v "$(pwd)/example.yaml:/app/example.yaml quay.io/armosec/kubescape scan /app/example.yaml
```
# Submit data manually
Use the `submit` command if you wish to submit data manually
## Submit scan results manually
First, scan your cluster using the `json` format flag: `kubescape scan framework <name> --format json --output path/to/results.json`.
Now you can submit the results to the Kubaescape SaaS version -
```
kubescape submit results path/to/results.json
```
# How to build
## Build using python (3.7^) script
Kubescape can be built using:
``` sh
python build.py
```
Note: In order to built using the above script, one must set the environment
variables in this script:
+ RELEASE
+ ArmoBEServer
+ ArmoERServer
+ ArmoWebsite
+ ArmoAuthServer
## Build using go
Note: development (and the release process) is done with Go `1.17`
1. Clone Project
```
git clone git@github.com:armosec/kubescape.git kubescape && cd "$_"
git clone https://github.com/armosec/kubescape.git kubescape && cd "$_"
```
2. Build
```
go mod tidy && go build -o kubescape .
go build -o kubescape .
```
3. Run
```
./kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
./kubescape scan --submit --enable-host-scan
```
4. Enjoy :zany_face:
## Docker Build
### Build your own Docker image
1. Clone Project
```
git clone https://github.com/armosec/kubescape.git kubescape && cd "$_"
```
2. Build
```
docker build -t kubescape -f build/Dockerfile .
```
# Under the hood
## Tests
Kubescape is running the following tests according to what is defined by [Kubernetes Hardening Guidance by NSA and CISA](https://www.nsa.gov/News-Features/Feature-Stories/Article-View/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/)
Kubescape is running the following tests according to what is defined by [Kubernetes Hardening Guidance by NSA and CISA](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/)
* Non-root containers
* Immutable container filesystem
* Privileged containers
* Immutable container filesystem
* Privileged containers
* hostPID, hostIPC privileges
* hostNetwork access
* allowedHostPaths field
* Protecting pod service account tokens
* Resource policies
* Control plane hardening
* Control plane hardening
* Exposed dashboard
* Allow privilege escalation
* Applications credentials in configuration files
@@ -142,16 +333,21 @@ Kubescape is running the following tests according to what is defined by [Kubern
* Ingress and Egress blocked
* Container hostPort
* Network policies
* Symlink Exchange Can Allow Host Filesystem Access (CVE-2021-25741)
## Technology
Kubescape based on OPA engine: https://github.com/open-policy-agent/opa and ARMO's posture controls.
Kubescape based on OPA engine: https://github.com/open-policy-agent/opa and ARMO's posture controls.
The tools retrieves Kubernetes objects from the API server and runs a set of [regos snippets](https://www.openpolicyagent.org/docs/latest/policy-language/) developed by [ARMO](https://www.armosec.io/).
The tools retrieves Kubernetes objects from the API server and runs a set of [regos snippets](https://www.openpolicyagent.org/docs/latest/policy-language/) developed by [ARMO](https://www.armosec.io/).
The results by default printed in a pretty "console friendly" manner, but they can be retrieved in JSON format for further processing.
Kubescape is an open source project, we welcome your feedback and ideas for improvement. Were also aiming to collaborate with the Kubernetes community to help make the tests themselves more robust and complete as Kubernetes develops.
## Thanks to all the contributors ❤️
<a href = "https://github.com/armosec/kubescape/graphs/contributors">
<img src = "https://contrib.rocks/image?repo=armosec/kubescape"/>
</a>

76
build.py Normal file
View File

@@ -0,0 +1,76 @@
import os
import sys
import hashlib
import platform
import subprocess
BASE_GETTER_CONST = "github.com/armosec/kubescape/cautils/getter"
BE_SERVER_CONST = BASE_GETTER_CONST + ".ArmoBEURL"
ER_SERVER_CONST = BASE_GETTER_CONST + ".ArmoERURL"
WEBSITE_CONST = BASE_GETTER_CONST + ".ArmoFEURL"
AUTH_SERVER_CONST = BASE_GETTER_CONST + ".armoAUTHURL"
def checkStatus(status, msg):
if status != 0:
sys.stderr.write(msg)
exit(status)
def getBuildDir():
currentPlatform = platform.system()
buildDir = "build/"
if currentPlatform == "Windows": buildDir += "windows-latest"
elif currentPlatform == "Linux": buildDir += "ubuntu-latest"
elif currentPlatform == "Darwin": buildDir += "macos-latest"
else: raise OSError("Platform %s is not supported!" % (currentPlatform))
return buildDir
def getPackageName():
packageName = "kubescape"
# if platform.system() == "Windows": packageName += ".exe"
return packageName
def main():
print("Building Kubescape")
# print environment variables
# print(os.environ)
# Set some variables
packageName = getPackageName()
buildUrl = "github.com/armosec/kubescape/cautils.BuildNumber"
releaseVersion = os.getenv("RELEASE")
ArmoBEServer = os.getenv("ArmoBEServer")
ArmoERServer = os.getenv("ArmoERServer")
ArmoWebsite = os.getenv("ArmoWebsite")
ArmoAuthServer = os.getenv("ArmoAuthServer")
# Create build directory
buildDir = getBuildDir()
if not os.path.isdir(buildDir):
os.makedirs(buildDir)
# Build kubescape
ldflags = "-w -s -X %s=%s -X %s=%s -X %s=%s -X %s=%s -X %s=%s" \
% (buildUrl, releaseVersion, BE_SERVER_CONST, ArmoBEServer,
ER_SERVER_CONST, ArmoERServer, WEBSITE_CONST, ArmoWebsite,
AUTH_SERVER_CONST, ArmoAuthServer)
status = subprocess.call(["go", "build", "-o", "%s/%s" % (buildDir, packageName), "-ldflags" ,ldflags])
checkStatus(status, "Failed to build kubescape")
sha1 = hashlib.sha1()
with open(buildDir + "/" + packageName, "rb") as kube:
sha1.update(kube.read())
with open(buildDir + "/" + packageName + ".sha1", "w") as kube_sha:
kube_sha.write(sha1.hexdigest())
print("Build Done")
if __name__ == "__main__":
main()

32
build/Dockerfile Normal file
View File

@@ -0,0 +1,32 @@
FROM golang:1.17-alpine as builder
#ENV GOPROXY=https://goproxy.io,direct
ARG image_version
ENV RELEASE=image_version
ENV GO111MODULE=
ENV CGO_ENABLED=0
# Install required python/pip
ENV PYTHONUNBUFFERED=1
RUN apk add --update --no-cache python3 && ln -sf python3 /usr/bin/python
RUN python3 -m ensurepip
RUN pip3 install --no-cache --upgrade pip setuptools
WORKDIR /work
ADD . .
RUN python build.py
RUN ls -ltr build/ubuntu-latest
RUN cat /work/build/ubuntu-latest/kubescape.sha1
FROM alpine
COPY --from=builder /work/build/ubuntu-latest/kubescape /usr/bin/kubescape
# # Download the frameworks. Use the "--use-default" flag when running kubescape
# RUN kubescape download framework nsa && kubescape download framework mitre
ENTRYPOINT ["kubescape"]

View File

@@ -1,101 +0,0 @@
package apis
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
)
// HTTPReqFunc allows you to insert query params and more to aggregation message while using update aggregator
type HTTPReqFunc func(req *http.Request, qryData interface{})
func BasicBEQuery(req *http.Request, qryData interface{}) {
q := req.URL.Query()
if notificationData, isok := qryData.(*LoginObject); isok {
q.Add("customerGUID", notificationData.GUID)
}
req.URL.RawQuery = q.Encode()
}
func EmptyQuery(req *http.Request, qryData interface{}) {
q := req.URL.Query()
req.URL.RawQuery = q.Encode()
}
func MapQuery(req *http.Request, qryData interface{}) {
q := req.URL.Query()
if qryMap, isok := qryData.(map[string]string); isok {
for k, v := range qryMap {
q.Add(k, v)
}
}
req.URL.RawQuery = q.Encode()
}
func BEHttpRequest(loginobj *LoginObject, beURL,
httpverb string,
endpoint string,
payload []byte,
f HTTPReqFunc,
qryData interface{}) ([]byte, error) {
client := &http.Client{}
beURL = fmt.Sprintf("%v/%v", beURL, endpoint)
req, err := http.NewRequest(httpverb, beURL, bytes.NewReader(payload))
if err != nil {
return nil, err
}
req.Header.Set("Authorization", loginobj.Authorization)
f(req, qryData)
for _, cookie := range loginobj.Cookies {
req.AddCookie(cookie)
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
fmt.Printf("req:\n%v\nresp:%v\n", req, resp)
return nil, fmt.Errorf("Error #%v Due to: %v", resp.StatusCode, resp.Status)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
type BELoginResponse struct {
Name string `json:"name"`
PreferredUsername string `json:"preferred_username"`
Email string `json:"email"`
CustomerGuid string `json:"customerGuid"`
Expires string `json:"expires"`
Authorization string `json:"authorization"`
Cookies []*http.Cookie
}
func (r *BELoginResponse) ToLoginObject() *LoginObject {
l := &LoginObject{}
l.Authorization = r.Authorization
l.Cookies = r.Cookies
l.Expires = r.Expires
l.GUID = r.CustomerGuid
return l
}
type BackendConnector struct {
BaseURL string
BELoginResponse *BELoginResponse
Credentials *CustomerLoginDetails
HTTPClient *http.Client
}

View File

@@ -1,128 +0,0 @@
package apis
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
)
func MakeBackendConnector(client *http.Client, baseURL string, loginDetails *CustomerLoginDetails) (*BackendConnector, error) {
if err := ValidateBEConnectorMakerInput(client, baseURL, loginDetails); err != nil {
return nil, err
}
conn := &BackendConnector{BaseURL: baseURL, Credentials: loginDetails, HTTPClient: client}
err := conn.Login()
return conn, err
}
func ValidateBEConnectorMakerInput(client *http.Client, baseURL string, loginDetails *CustomerLoginDetails) error {
if client == nil {
fmt.Errorf("You must provide an initialized httpclient")
}
if len(baseURL) == 0 {
return fmt.Errorf("you must provide a valid backend url")
}
if loginDetails == nil || (len(loginDetails.Email) == 0 && len(loginDetails.Password) == 0) {
return fmt.Errorf("you must provide valid login details")
}
return nil
}
func (r *BackendConnector) Login() error {
if !r.IsExpired() {
return nil
}
loginInfoBytes, err := json.Marshal(r.Credentials)
if err != nil {
return fmt.Errorf("unable to marshal credentials properly")
}
beURL := fmt.Sprintf("%v/%v", r.BaseURL, "login")
req, err := http.NewRequest("POST", beURL, bytes.NewReader(loginInfoBytes))
if err != nil {
return err
}
req.Header.Set("Referer", strings.Replace(beURL, "dashbe", "cpanel", 1))
resp, err := r.HTTPClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("unable to read login response")
}
loginS := &BELoginResponse{}
json.Unmarshal(body, &loginS)
loginS.Cookies = resp.Cookies()
r.BELoginResponse = loginS
return nil
}
func (r *BackendConnector) IsExpired() bool {
return r.BELoginResponse == nil || r.BELoginResponse.ToLoginObject().IsExpired()
}
func (r *BackendConnector) GetBaseURL() string {
return r.BaseURL
}
func (r *BackendConnector) GetLoginObj() *LoginObject {
return r.BELoginResponse.ToLoginObject()
}
func (r *BackendConnector) GetClient() *http.Client {
return r.HTTPClient
}
func (r *BackendConnector) HTTPSend(httpverb string,
endpoint string,
payload []byte,
f HTTPReqFunc,
qryData interface{}) ([]byte, error) {
beURL := fmt.Sprintf("%v/%v", r.GetBaseURL(), endpoint)
req, err := http.NewRequest(httpverb, beURL, bytes.NewReader(payload))
if err != nil {
return nil, err
}
if r.IsExpired() {
r.Login()
}
loginobj := r.GetLoginObj()
req.Header.Set("Authorization", loginobj.Authorization)
f(req, qryData)
q := req.URL.Query()
q.Set("customerGUID", loginobj.GUID)
req.URL.RawQuery = q.Encode()
for _, cookie := range loginobj.Cookies {
req.AddCookie(cookie)
}
resp, err := r.GetClient().Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
fmt.Printf("req:\n%v\nresp:%v\n", req, resp)
return nil, fmt.Errorf("Error #%v Due to: %v", resp.StatusCode, resp.Status)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}

View File

@@ -1,25 +0,0 @@
package apis
// WebsocketScanCommand api
const (
WebsocketScanCommandVersion string = "v1"
WebsocketScanCommandPath string = "scanImage"
)
// commands send via websocket
const (
UPDATE string = "update"
ATTACH string = "Attach"
REMOVE string = "remove"
DETACH string = "Detach"
INCOMPATIBLE string = "Incompatible"
REPLACE_HEADERS string = "ReplaceHeaders"
IMAGE_UNREACHABLE string = "ImageUnreachable"
SIGN string = "sign"
UNREGISTERED string = "unregistered"
INJECT string = "inject"
RESTART string = "restart"
ENCRYPT string = "encryptSecret"
DECRYPT string = "decryptSecret"
SCAN string = "scan"
)

View File

@@ -1,78 +0,0 @@
package apis
import (
"encoding/json"
"fmt"
"net/http"
"github.com/docker/docker/api/types"
)
// WebsocketScanCommand trigger scan thru the websocket
type WebsocketScanCommand struct {
// CustomerGUID string `json:"customerGUID"`
ImageTag string `json:"imageTag"`
Wlid string `json:"wlid"`
IsScanned bool `json:"isScanned"`
ContainerName string `json:"containerName"`
JobID string `json:"jobID,omitempty"`
LastAction int `json:"actionIDN"`
// ImageHash string `json:"imageHash"`
Credentials *types.AuthConfig `json:"credentials,omitempty"`
}
//taken from BE
// ElasticRespTotal holds the total struct in Elastic array response
type ElasticRespTotal struct {
Value int `json:"value"`
Relation string `json:"relation"`
}
// V2ListResponse holds the response of some list request with some metadata
type V2ListResponse struct {
Total ElasticRespTotal `json:"total"`
Response interface{} `json:"response"`
// Cursor for quick access to the next page. Not supported yet
Cursor string `json:"cursor"`
}
// Oauth2Customer returns inside the "ca_groups" field in claims section of
// Oauth2 verification process
type Oauth2Customer struct {
CustomerName string `json:"customerName"`
CustomerGUID string `json:"customerGUID"`
}
type LoginObject struct {
Authorization string `json:"authorization"`
GUID string
Cookies []*http.Cookie
Expires string
}
type SafeMode struct {
Reporter string `json:"reporter"` // "Agent"
Action string `json:"action,omitempty"` // "action"
Wlid string `json:"wlid"` // CAA_WLID
PodName string `json:"podName"` // CAA_POD_NAME
InstanceID string `json:"instanceID"` // CAA_POD_NAME
ContainerName string `json:"containerName,omitempty"` // CAA_CONTAINER_NAME
ProcessName string `json:"processName,omitempty"`
ProcessID int `json:"processID,omitempty"`
ProcessCMD string `json:"processCMD,omitempty"`
ComponentGUID string `json:"componentGUID,omitempty"` // CAA_GUID
StatusCode int `json:"statusCode"` // 0/1/2
ProcessExitCode int `json:"processExitCode"` // 0 +
Timestamp int64 `json:"timestamp"`
Message string `json:"message,omitempty"` // any string
JobID string `json:"jobID,omitempty"` // any string
Compatible *bool `json:"compatible,omitempty"`
}
func (safeMode *SafeMode) Json() string {
b, err := json.Marshal(*safeMode)
if err != nil {
return ""
}
return fmt.Sprintf("%s", b)
}

View File

@@ -1,26 +0,0 @@
package apis
// import (
// "fmt"
// "net/http"
// "testing"
// )
// func TestAuditStructure(t *testing.T) {
// c := http.Client{}
// be, err := MakeBackendConnector(&c, "https://dashbe.eudev3.cyberarmorsoft.com", &CustomerLoginDetails{Email: "lalafi@cyberarmor.io", Password: "*", CustomerName: "CyberArmorTests"})
// if err != nil {
// t.Errorf("sad1")
// }
// b, err := be.HTTPSend("GET", "v1/microservicesOverview", nil, MapQuery, map[string]string{"wlid": "wlid://cluster-childrenofbodom/namespace-default/deployment-pos"})
// if err != nil {
// t.Errorf("sad2")
// }
// fmt.Printf("%v", string(b))
// t.Errorf("sad")
// }

View File

@@ -1,27 +0,0 @@
package apis
import (
"net/http"
)
// type Dashboard interface {
// OPAFRAMEWORKGet(string, bool) ([]opapolicy.Framework, error)
// }
// Connector - interface for any connector (BE/Portal and so on)
type Connector interface {
//may used for a more generic httpsend interface based method
GetBaseURL() string
GetLoginObj() *LoginObject
GetClient() *http.Client
Login() error
IsExpired() bool
HTTPSend(httpverb string,
endpoint string,
payload []byte,
f HTTPReqFunc,
qryData interface{}) ([]byte, error)
}

View File

@@ -1,256 +0,0 @@
package apis
import (
"bytes"
"net/http"
"time"
"io/ioutil"
oidc "github.com/coreos/go-oidc"
uuid "github.com/satori/go.uuid"
// "go.uber.org/zap"
"context"
"encoding/json"
"fmt"
"strings"
"golang.org/x/oauth2"
)
func GetOauth2TokenURL() string {
return "https://idens.eudev3.cyberarmorsoft.com/auth/realms/CyberArmorSites"
}
func GetLoginStruct() (LoginAux, error) {
return LoginAux{Referer: "https://cpanel.eudev3.cyberarmorsoft.com/login", Url: "https://cpanel.eudev3.cyberarmorsoft.com/login"}, nil
}
func LoginWithKeycloak(loginDetails CustomerLoginDetails) ([]uuid.UUID, *oidc.IDToken, error) {
// var custGUID uuid.UUID
// config.Oauth2TokenURL
if GetOauth2TokenURL() == "" {
return nil, nil, fmt.Errorf("missing oauth2 token URL")
}
urlaux, _ := GetLoginStruct()
conf, err := getOauth2Config(urlaux)
if err != nil {
return nil, nil, err
}
ctx := context.Background()
provider, err := oidc.NewProvider(ctx, GetOauth2TokenURL())
if err != nil {
return nil, nil, err
}
// "Oauth2ClientID": "golang-client"
oidcConfig := &oidc.Config{
ClientID: "golang-client",
SkipClientIDCheck: true,
}
verifier := provider.Verifier(oidcConfig)
ouToken, err := conf.PasswordCredentialsToken(ctx, loginDetails.Email, loginDetails.Password)
if err != nil {
return nil, nil, err
}
// "Authorization",
authorization := fmt.Sprintf("%s %s", ouToken.Type(), ouToken.AccessToken)
// oidc.IDTokenVerifier
tkn, err := verifier.Verify(ctx, ouToken.AccessToken)
if err != nil {
return nil, tkn, err
}
tkn.Nonce = authorization
if loginDetails.CustomerName == "" {
customers, err := getCustomersNames(tkn)
if err != nil {
return nil, tkn, err
}
if len(customers) == 1 {
loginDetails.CustomerName = customers[0]
} else {
return nil, tkn, fmt.Errorf("login with one of the following customers: %v", customers)
}
}
custGUID, err := getCustomerGUID(tkn, &loginDetails)
if err != nil {
return nil, tkn, err
}
return []uuid.UUID{custGUID}, tkn, nil
}
func getOauth2Config(urlaux LoginAux) (*oauth2.Config, error) {
reURLSlices := strings.Split(urlaux.Referer, "/")
if len(reURLSlices) == 0 {
reURLSlices = strings.Split(urlaux.Url, "/")
}
// zapLogger.With(zap.Strings("referer", reURLSlices)).Info("Searching oauth2Config for")
if len(reURLSlices) < 3 {
reURLSlices = []string{reURLSlices[0], reURLSlices[0], reURLSlices[0]}
}
lg, _ := GetLoginStruct()
provider, _ := oidc.NewProvider(context.Background(), GetOauth2TokenURL())
//provider.Endpoint {"AuthURL":"https://idens.eudev3.cyberarmorsoft.com/auth/realms/CyberArmorSites/protocol/openid-connect/auth","TokenURL":"https://idens.eudev3.cyberarmorsoft.com/auth/realms/CyberArmorSites/protocol/openid-connect/token","AuthStyle":0}
conf := oauth2.Config{
ClientID: "golang-client",
ClientSecret: "4e33bad2-3491-41a6-b486-93c492cfb4a2",
RedirectURL: lg.Referer,
// Discovery returns the OAuth2 endpoints.
Endpoint: provider.Endpoint(),
// "openid" is a required scope for OpenID Connect flows.
Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
}
return &conf, nil
// return nil, fmt.Errorf("canno't find oauth2Config for referer '%+v'.\nPlease set referer or origin headers", reURLSlices)
}
func getCustomersNames(oauth2Details *oidc.IDToken) ([]string, error) {
var claimsJSON Oauth2Claims
if err := oauth2Details.Claims(&claimsJSON); err != nil {
return nil, err
}
customersList := make([]string, 0, len(claimsJSON.CAGroups))
for _, v := range claimsJSON.CAGroups {
var caCustomer Oauth2Customer
if err := json.Unmarshal([]byte(v), &caCustomer); err == nil {
customersList = append(customersList, caCustomer.CustomerName)
}
}
return customersList, nil
}
func getCustomerGUID(tkn *oidc.IDToken, loginDetails *CustomerLoginDetails) (uuid.UUID, error) {
customers, err := getCustomersList(tkn)
if err != nil {
return uuid.UUID{}, err
}
// if customer name not provided - use default customer
if loginDetails.CustomerName == "" && len(customers) > 0 {
return uuid.FromString(customers[0].CustomerGUID)
}
for _, i := range customers {
if i.CustomerName == loginDetails.CustomerName {
return uuid.FromString(i.CustomerGUID)
}
}
return uuid.UUID{}, fmt.Errorf("customer name not found in customer list")
}
func getCustomersList(oauth2Details *oidc.IDToken) ([]Oauth2Customer, error) {
var claimsJSON Oauth2Claims
if err := oauth2Details.Claims(&claimsJSON); err != nil {
return nil, err
}
customersList := make([]Oauth2Customer, 0, len(claimsJSON.CAGroups))
for _, v := range claimsJSON.CAGroups {
var caCustomer Oauth2Customer
if err := json.Unmarshal([]byte(v), &caCustomer); err == nil {
customersList = append(customersList, caCustomer)
}
}
return customersList, nil
}
// func MakeAuthCookies(custGUID uuid.UUID, ouToken *oidc.IDToken) (*http.Cookie, error) {
// var ccc http.Cookie
// var responseData AuthenticationCookie
// expireDate := time.Now().UTC().Add(time.Duration(config.CookieExpirationHours) * time.Hour)
// if ouToken != nil {
// expireDate = ouToken.Expiry
// }
// ccc.Expires = expireDate
// responseData.CustomerGUID = custGUID
// responseData.Expires = ccc.Expires
// responseData.Version = 0
// authorizationStr := ""
// if ouToken != nil {
// authorizationStr = ouToken.Nonce
// if err := ouToken.Claims(&responseData.Oauth2Claims); err != nil {
// errStr := fmt.Sprintf("failed to get claims from JWT")
// return nil, fmt.Errorf("%v", errStr)
// }
// }
// jsonBytes, err := json.Marshal(responseData)
// if err != nil {
// errStr := fmt.Sprintf("failed to get claims from JWT")
// return nil, fmt.Errorf("%v", errStr)
// }
// ccc.Name = "auth"
// ccc.Value = hex.EncodeToString(jsonBytes) + "." + cacheaccess.CalcHmac256(jsonBytes)
// // TODO: HttpOnly for security...
// ccc.HttpOnly = false
// ccc.Path = "/"
// ccc.Secure = true
// ccc.SameSite = http.SameSiteNoneMode
// http.SetCookie(w, &ccc)
// responseData.Authorization = authorizationStr
// jsonBytes, err = json.Marshal(responseData)
// if err != nil {
// w.WriteHeader(http.StatusInternalServerError)
// fmt.Fprintf(w, "error while marshaling response(2) %s", err)
// return
// }
// w.Write(jsonBytes)
// }
func Login(loginDetails CustomerLoginDetails) (*LoginObject, error) {
return nil, nil
}
func GetBEInfo(cfgFile string) string {
return "https://dashbe.eudev3.cyberarmorsoft.com"
}
func BELogin(loginDetails *CustomerLoginDetails, login string, cfg string) (*BELoginResponse, error) {
client := &http.Client{}
basebeURL := GetBEInfo(cfg)
beURL := fmt.Sprintf("%v/%v", basebeURL, login)
loginInfoBytes, err := json.Marshal(loginDetails)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", beURL, bytes.NewReader(loginInfoBytes))
if err != nil {
return nil, err
}
req.Header.Set("Referer", strings.Replace(beURL, "dashbe", "cpanel", 1))
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
loginS := &BELoginResponse{}
json.Unmarshal(body, &loginS)
loginS.Cookies = resp.Cookies()
return loginS, nil
}
func (r *LoginObject) IsExpired() bool {
if r == nil {
return true
}
t, err := time.Parse(time.RFC3339, r.Expires)
if err != nil {
return true
}
return t.UTC().Before(time.Now().UTC())
}

View File

@@ -1,41 +0,0 @@
package apis
// func TestLogin2BE(t *testing.T) {
// loginDetails := CustomerLoginDetails{Email: "lalafi@cyberarmor.io", Password: "***", CustomerName: "CyberArmorTests"}
// res, err := BELogin(loginDetails, "login")
// if err != nil {
// t.Errorf("failed to get raw audit is different ")
// }
// k := res.ToLoginObject()
// fmt.Printf("%v\n", k)
// }
// func TestGetMicroserviceOverview(t *testing.T) {
// // client := &http.Client{}
// loginDetails := CustomerLoginDetails{Email: "lalafi@cyberarmor.io", Password: "***", CustomerName: "CyberArmorTests"}
// loginobj, err := BELogin(loginDetails, "login")
// if err != nil {
// t.Errorf("failed to get raw audit is different ")
// }
// k := loginobj.ToLoginObject()
// beURL := GetBEInfo("")
// res, err := BEHttpRequest(k, beURL,
// "GET",
// "v1/microservicesOverview",
// nil,
// BasicBEQuery,
// k)
// if err != nil {
// t.Errorf("failed to get raw audit is different ")
// }
// s := string(res)
// fmt.Printf("%v\n", s)
// }

View File

@@ -1,38 +0,0 @@
package apis
import (
"time"
"github.com/gofrs/uuid"
)
// AuthenticationCookie is what it is
type AuthenticationCookie struct {
Oauth2Claims `json:",inline"`
CustomerGUID uuid.UUID `json:"customerGuid"`
Expires time.Time `json:"expires"`
Version int `json:"version"`
Authorization string `json:"authorization,omitempty"`
}
type LoginAux struct {
Referer string
Url string
}
// CustomerLoginDetails is what it is
type CustomerLoginDetails struct {
Email string `json:"email"`
Password string `json:"password"`
CustomerName string `json:"customer,omitempty"`
CustomerGUID uuid.UUID `json:"customerGuid,omitempty"`
}
// Oauth2Claims returns in claims section of Oauth2 verification process
type Oauth2Claims struct {
Sub string `json:"sub"`
Name string `json:"name"`
PreferredUserName string `json:"preferred_username"`
CAGroups []string `json:"ca_groups"`
Email string `json:"email"`
}

View File

@@ -1,132 +0,0 @@
package apis
import (
"encoding/json"
"fmt"
)
// Commands list of commands received from websocket
type Commands struct {
Commands []Command `json:"commands"`
}
// Command structure of command received from websocket
type Command struct {
CommandName string `json:"commandName"`
ResponseID string `json:"responseID"`
Wlid string `json:"wlid,omitempty"`
WildWlid string `json:"wildWlid,omitempty"`
Sid string `json:"sid,omitempty"`
WildSid string `json:"wildSid,omitempty"`
JobTracking JobTracking `json:"jobTracking"`
Args map[string]interface{} `json:"args,omitempty"`
}
type JobTracking struct {
JobID string `json:"jobID,omitempty"`
ParentID string `json:"parentAction,omitempty"`
LastActionNumber int `json:"numSeq,omitempty"`
}
func (c *Command) DeepCopy() *Command {
newCommand := &Command{}
newCommand.CommandName = c.CommandName
newCommand.ResponseID = c.ResponseID
newCommand.Wlid = c.Wlid
newCommand.WildWlid = c.WildWlid
if c.Args != nil {
newCommand.Args = make(map[string]interface{})
for i, j := range c.Args {
newCommand.Args[i] = j
}
}
return newCommand
}
func (c *Command) GetLabels() map[string]string {
if c.Args != nil {
if ilabels, ok := c.Args["labels"]; ok {
labels := map[string]string{}
if b, e := json.Marshal(ilabels); e == nil {
if e = json.Unmarshal(b, &labels); e == nil {
return labels
}
}
}
}
return map[string]string{}
}
func (c *Command) SetLabels(labels map[string]string) {
if c.Args == nil {
c.Args = make(map[string]interface{})
}
c.Args["labels"] = labels
}
func (c *Command) GetFieldSelector() map[string]string {
if c.Args != nil {
if ilabels, ok := c.Args["fieldSelector"]; ok {
labels := map[string]string{}
if b, e := json.Marshal(ilabels); e == nil {
if e = json.Unmarshal(b, &labels); e == nil {
return labels
}
}
}
}
return map[string]string{}
}
func (c *Command) SetFieldSelector(labels map[string]string) {
if c.Args == nil {
c.Args = make(map[string]interface{})
}
c.Args["fieldSelector"] = labels
}
func (c *Command) GetID() string {
if c.WildWlid != "" {
return c.WildWlid
}
if c.WildSid != "" {
return c.WildSid
}
if c.Wlid != "" {
return c.Wlid
}
if c.Sid != "" {
return c.Sid
}
return ""
}
func (c *Command) Json() string {
b, _ := json.Marshal(*c)
return fmt.Sprintf("%s", b)
}
func SIDFallback(c *Command) {
if c.GetID() == "" {
sid, err := getSIDFromArgs(c.Args)
if err != nil || sid == "" {
return
}
c.Sid = sid
}
}
func getSIDFromArgs(args map[string]interface{}) (string, error) {
sidInterface, ok := args["sid"]
if !ok {
return "", nil
}
sid, ok := sidInterface.(string)
if !ok || sid == "" {
return "", fmt.Errorf("sid found in args but empty")
}
// if _, err := secrethandling.SplitSecretID(sid); err != nil {
// return "", err
// }
return sid, nil
}

View File

@@ -1,16 +0,0 @@
package armotypes
type EnforcmentsRule struct {
MonitoredObject []string `json:"monitoredObject"`
MonitoredObjectExistence []string `json:"objectExistence"`
MonitoredObjectEvent []string `json:"event"`
Action []string `json:"action"`
}
type ExecutionPolicy struct {
PortalBase `json:",inline"`
Designators []PortalDesignator `json:"designators"`
PolicyType string `json:"policyType"`
CreationTime string `json:"creation_time"`
ExecutionEnforcmentsRules []EnforcmentsRule `json:"enforcementRules"`
}

View File

@@ -1,57 +0,0 @@
package armotypes
const (
CostumerGuidQuery = "costumerGUID"
ClusterNameQuery = "cluster"
DatacenterNameQuery = "datacenter"
NamespaceQuery = "namespace"
ProjectQuery = "project"
WlidQuery = "wlid"
SidQuery = "sid"
)
// PortalBase holds basic items data from portal BE
type PortalBase struct {
GUID string `json:"guid"`
Name string `json:"name"`
Attributes map[string]interface{} `json:"attributes,omitempty"` // could be string
}
type DesignatorType string
// Supported designators
const (
DesignatorAttributes DesignatorType = "Attributes"
/*
WorkloadID format.
k8s format: wlid://cluster-<cluster>/namespace-<namespace>/<kind>-<name>
native format: wlid://datacenter-<datacenter>/project-<project>/native-<name>
*/
DesignatorWlid DesignatorType = "Wlid"
/*
Wild card - subset of wlid. e.g.
1. Include cluster:
wlid://cluster-<cluster>/
2. Include cluster and namespace (filter out all other namespaces):
wlid://cluster-<cluster>/namespace-<namespace>/
*/
DesignatorWildWlid DesignatorType = "WildWlid"
DesignatorWlidContainer DesignatorType = "WlidContainer"
DesignatorWlidProcess DesignatorType = "WlidProcess"
DesignatorSid DesignatorType = "Sid" // secret id
)
// attributes
const (
AttributeCluster = "cluster"
AttributeNamespace = "namespace"
)
// PortalDesignator represented single designation options
type PortalDesignator struct {
DesignatorType DesignatorType `json:"designatorType"`
WLID string `json:"wlid"`
WildWLID string `json:"wildwlid"`
SID string `json:"sid"`
Attributes map[string]string `json:"attributes"`
}

View File

@@ -1,18 +0,0 @@
package armotypes
func MockPortalBase(customerGUID, name string, attributes map[string]interface{}) *PortalBase {
if customerGUID == "" {
customerGUID = "36b6f9e1-3b63-4628-994d-cbe16f81e9c7"
}
if name == "" {
name = "portalbase-a"
}
if attributes == nil {
attributes = make(map[string]interface{})
}
return &PortalBase{
GUID: customerGUID,
Name: name,
Attributes: attributes,
}
}

View File

@@ -1,36 +0,0 @@
package armotypes
var IgnoreLabels = []string{AttributeCluster, AttributeNamespace}
// DigestPortalDesignator - get cluster namespace and labels from designator
func DigestPortalDesignator(designator *PortalDesignator) (string, string, map[string]string) {
switch designator.DesignatorType {
case DesignatorAttributes:
return DigestAttributesDesignator(designator.Attributes)
// case DesignatorWlid: TODO
// case DesignatorWildWlid: TODO
default:
}
return "", "", nil
}
func DigestAttributesDesignator(attributes map[string]string) (string, string, map[string]string) {
cluster := ""
namespace := ""
labels := map[string]string{}
if attributes == nil || len(attributes) == 0 {
return cluster, namespace, labels
}
for k, v := range attributes {
labels[k] = v
}
if v, ok := attributes[AttributeNamespace]; ok {
namespace = v
delete(labels, AttributeNamespace)
}
if v, ok := attributes[AttributeCluster]; ok {
cluster = v
delete(labels, AttributeCluster)
}
return cluster, namespace, labels
}

View File

@@ -1,197 +0,0 @@
package cautils
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/golang/glog"
)
// labels added to the workload
const (
ArmoPrefix string = "armo"
ArmoAttach string = ArmoPrefix + ".attach"
ArmoInitialSecret string = ArmoPrefix + ".initial"
ArmoSecretStatus string = ArmoPrefix + ".secret"
ArmoCompatibleLabel string = ArmoPrefix + ".compatible"
ArmoSecretProtectStatus string = "protect"
ArmoSecretClearStatus string = "clear"
)
// annotations added to the workload
const (
ArmoUpdate string = ArmoPrefix + ".last-update"
ArmoWlid string = ArmoPrefix + ".wlid"
ArmoSid string = ArmoPrefix + ".sid"
ArmoJobID string = ArmoPrefix + ".job"
ArmoJobIDPath string = ArmoJobID + "/id"
ArmoJobParentPath string = ArmoJobID + "/parent"
ArmoJobActionPath string = ArmoJobID + "/action"
ArmoCompatibleAnnotation string = ArmoAttach + "/compatible"
ArmoReplaceheaders string = ArmoAttach + "/replaceheaders"
)
const ( // DEPRECATED
CAAttachLabel string = "cyberarmor"
Patched string = "Patched"
Done string = "Done"
Encrypted string = "Protected"
CAInjectOld = "injectCyberArmor"
CAPrefix string = "cyberarmor"
CAProtectedSecret string = CAPrefix + ".secret"
CAInitialSecret string = CAPrefix + ".initial"
CAInject string = CAPrefix + ".inject"
CAIgnore string = CAPrefix + ".ignore"
CAReplaceHeaders string = CAPrefix + ".removeSecurityHeaders"
)
const ( // DEPRECATED
CAUpdate string = CAPrefix + ".last-update"
CAStatus string = CAPrefix + ".status"
CAWlid string = CAPrefix + ".wlid"
)
type ClusterConfig struct {
EventReceiverREST string `json:"eventReceiverREST"`
EventReceiverWS string `json:"eventReceiverWS"`
MaserNotificationServer string `json:"maserNotificationServer"`
Postman string `json:"postman"`
Dashboard string `json:"dashboard"`
Portal string `json:"portal"`
CustomerGUID string `json:"customerGUID"`
ClusterGUID string `json:"clusterGUID"`
ClusterName string `json:"clusterName"`
OciImageURL string `json:"ociImageURL"`
NotificationWSURL string `json:"notificationWSURL"`
NotificationRestURL string `json:"notificationRestURL"`
VulnScanURL string `json:"vulnScanURL"`
OracleURL string `json:"oracleURL"`
ClairURL string `json:"clairURL"`
}
// represents workload basic info
type SpiffeBasicInfo struct {
//cluster/datacenter
Level0 string `json:"level0"`
Level0Type string `json:"level0Type"`
//namespace/project
Level1 string `json:"level0"`
Level1Type string `json:"level0Type"`
Kind string `json:"kind"`
Name string `json:"name"`
}
type ImageInfo struct {
Registry string `json:"registry"`
VersionImage string `json:"versionImage"`
}
func IsAttached(labels map[string]string) *bool {
attach := false
if labels == nil {
return nil
}
if attached, ok := labels[ArmoAttach]; ok {
if strings.ToLower(attached) == "true" {
attach = true
return &attach
} else {
return &attach
}
}
// deprecated
if _, ok := labels[CAAttachLabel]; ok {
attach = true
return &attach
}
// deprecated
if inject, ok := labels[CAInject]; ok {
if strings.ToLower(inject) == "true" {
attach = true
return &attach
}
}
// deprecated
if ignore, ok := labels[CAIgnore]; ok {
if strings.ToLower(ignore) == "true" {
return &attach
}
}
return nil
}
func IsSecretProtected(labels map[string]string) *bool {
protect := false
if labels == nil {
return nil
}
if protected, ok := labels[ArmoSecretStatus]; ok {
if strings.ToLower(protected) == ArmoSecretProtectStatus {
protect = true
return &protect
} else {
return &protect
}
}
return nil
}
func LoadConfig(configPath string, loadToEnv bool) (*ClusterConfig, error) {
if configPath == "" {
configPath = "/etc/config/clusterData.json"
}
dat, err := ioutil.ReadFile(configPath)
if err != nil || len(dat) == 0 {
return nil, fmt.Errorf("Config empty or not found. path: %s", configPath)
}
componentConfig := &ClusterConfig{}
if err := json.Unmarshal(dat, componentConfig); err != nil {
return componentConfig, fmt.Errorf("Failed to read component config, path: %s, reason: %s", configPath, err.Error())
}
if loadToEnv {
componentConfig.LoadConfigToEnv()
}
return componentConfig, nil
}
func (clusterConfig *ClusterConfig) LoadConfigToEnv() {
SetEnv("CA_CLUSTER_NAME", clusterConfig.ClusterName)
SetEnv("CA_CLUSTER_GUID", clusterConfig.ClusterGUID)
SetEnv("CA_ORACLE_SERVER", clusterConfig.OracleURL)
SetEnv("CA_CUSTOMER_GUID", clusterConfig.CustomerGUID)
SetEnv("CA_DASHBOARD_BACKEND", clusterConfig.Dashboard)
SetEnv("CA_NOTIFICATION_SERVER_REST", clusterConfig.NotificationWSURL)
SetEnv("CA_NOTIFICATION_SERVER_WS", clusterConfig.NotificationWSURL)
SetEnv("CA_NOTIFICATION_SERVER_REST", clusterConfig.NotificationRestURL)
SetEnv("CA_OCIMAGE_URL", clusterConfig.OciImageURL)
SetEnv("CA_K8S_REPORT_URL", clusterConfig.EventReceiverWS)
SetEnv("CA_EVENT_RECEIVER_HTTP", clusterConfig.EventReceiverREST)
SetEnv("CA_VULNSCAN", clusterConfig.VulnScanURL)
SetEnv("CA_POSTMAN", clusterConfig.Postman)
SetEnv("MASTER_NOTIFICATION_SERVER_HOST", clusterConfig.MaserNotificationServer)
SetEnv("CLAIR_URL", clusterConfig.ClairURL)
}
func SetEnv(key, value string) {
if e := os.Getenv(key); e == "" {
if err := os.Setenv(key, value); err != nil {
glog.Warning("%s: %s", key, err.Error())
}
}
}

View File

@@ -1,29 +0,0 @@
package cautils
import (
"testing"
)
// tests wlid parse
func TestSpiffeWLIDToInfoSuccess(t *testing.T) {
WLID := "wlid://cluster-HipsterShopCluster2/namespace-prod/deployment-cartservice"
ms, er := SpiffeToSpiffeInfo(WLID)
if er != nil || ms.Level0 != "HipsterShopCluster2" || ms.Level0Type != "cluster" || ms.Level1 != "prod" || ms.Level1Type != "namespace" ||
ms.Kind != "deployment" || ms.Name != "cartservice" {
t.Errorf("TestSpiffeWLIDToInfoSuccess failed to parse %v", WLID)
}
}
func TestSpiffeSIDInfoSuccess(t *testing.T) {
SID := "sid://cluster-HipsterShopCluster2/namespace-dev/secret-caregcred"
ms, er := SpiffeToSpiffeInfo(SID)
if er != nil || ms.Level0 != "HipsterShopCluster2" || ms.Level0Type != "cluster" || ms.Level1 != "dev" || ms.Level1Type != "namespace" ||
ms.Kind != "secret" || ms.Name != "caregcred" {
t.Errorf("TestSpiffeSIDInfoSuccess failed to parse %v", SID)
}
}

View File

@@ -1,118 +0,0 @@
package cautils
import (
"crypto/sha256"
"fmt"
"strings"
)
// wlid/ sid utils
const (
SpiffePrefix = "://"
)
// wlid/ sid utils
const (
PackagePath = "vendor/github.com/armosec/capacketsgo"
)
//AsSHA256 takes anything turns it into string :) https://blog.8bitzen.com/posts/22-08-2019-how-to-hash-a-struct-in-go
func AsSHA256(v interface{}) string {
h := sha256.New()
h.Write([]byte(fmt.Sprintf("%v", v)))
return fmt.Sprintf("%x", h.Sum(nil))
}
func SpiffeToSpiffeInfo(spiffe string) (*SpiffeBasicInfo, error) {
basicInfo := &SpiffeBasicInfo{}
pos := strings.Index(spiffe, SpiffePrefix)
if pos < 0 {
return nil, fmt.Errorf("invalid spiffe %s", spiffe)
}
pos += len(SpiffePrefix)
spiffeNoPrefix := spiffe[pos:]
splits := strings.Split(spiffeNoPrefix, "/")
if len(splits) < 3 {
return nil, fmt.Errorf("invalid spiffe %s", spiffe)
}
p0 := strings.Index(splits[0], "-")
p1 := strings.Index(splits[1], "-")
p2 := strings.Index(splits[2], "-")
if p0 == -1 || p1 == -1 || p2 == -1 {
return nil, fmt.Errorf("invalid spiffe %s", spiffe)
}
basicInfo.Level0Type = splits[0][:p0]
basicInfo.Level0 = splits[0][p0+1:]
basicInfo.Level1Type = splits[1][:p1]
basicInfo.Level1 = splits[1][p1+1:]
basicInfo.Kind = splits[2][:p2]
basicInfo.Name = splits[2][p2+1:]
return basicInfo, nil
}
func ImageTagToImageInfo(imageTag string) (*ImageInfo, error) {
ImageInfo := &ImageInfo{}
spDelimiter := "/"
pos := strings.Index(imageTag, spDelimiter)
if pos < 0 {
ImageInfo.Registry = ""
ImageInfo.VersionImage = imageTag
return ImageInfo, nil
}
splits := strings.Split(imageTag, spDelimiter)
if len(splits) == 0 {
return nil, fmt.Errorf("Invalid image info %s", imageTag)
}
ImageInfo.Registry = splits[0]
if len(splits) > 1 {
ImageInfo.VersionImage = splits[len(splits)-1]
} else {
ImageInfo.VersionImage = ""
}
return ImageInfo, nil
}
func BoolPointer(b bool) *bool { return &b }
func BoolToString(b bool) string {
if b {
return "true"
}
return "false"
}
func BoolPointerToString(b *bool) string {
if b == nil {
return ""
}
if *b {
return "true"
}
return "false"
}
func StringToBool(s string) bool {
if strings.ToLower(s) == "true" || strings.ToLower(s) == "1" {
return true
}
return false
}
func StringToBoolPointer(s string) *bool {
if strings.ToLower(s) == "true" || strings.ToLower(s) == "1" {
return BoolPointer(true)
}
if strings.ToLower(s) == "false" || strings.ToLower(s) == "0" {
return BoolPointer(false)
}
return nil
}

View File

@@ -1,52 +0,0 @@
package cautils
import (
"fmt"
"hash/fnv"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var NamespacesListToIgnore = make([]string, 0)
var KubeNamespaces = []string{metav1.NamespaceSystem, metav1.NamespacePublic}
// NamespacesListToIgnore namespaces to ignore if a pod
func InitNamespacesListToIgnore(caNamespace string) {
if len(NamespacesListToIgnore) > 0 {
return
}
NamespacesListToIgnore = append(NamespacesListToIgnore, KubeNamespaces...)
NamespacesListToIgnore = append(NamespacesListToIgnore, caNamespace)
}
func IfIgnoreNamespace(ns string) bool {
for i := range NamespacesListToIgnore {
if NamespacesListToIgnore[i] == ns {
return true
}
}
return false
}
func IfKubeNamespace(ns string) bool {
for i := range KubeNamespaces {
if NamespacesListToIgnore[i] == ns {
return true
}
}
return false
}
func hash(s string) string {
h := fnv.New32a()
h.Write([]byte(s))
return fmt.Sprintf("%d", h.Sum32())
}
func GenarateConfigMapName(wlid string) string {
name := strings.ToLower(fmt.Sprintf("ca-%s-%s-%s", GetNamespaceFromWlid(wlid), GetKindFromWlid(wlid), GetNameFromWlid(wlid)))
if len(name) >= 63 {
name = hash(name)
}
return name
}

View File

@@ -1,238 +0,0 @@
package cautils
import (
"fmt"
"strings"
)
// API fields
var (
WlidPrefix = "wlid://"
SidPrefix = "sid://"
ClusterWlidPrefix = "cluster-"
NamespaceWlidPrefix = "namespace-"
DataCenterWlidPrefix = "datacenter-"
ProjectWlidPrefix = "project-"
SecretSIDPrefix = "secret-"
SubSecretSIDPrefix = "subsecret-"
K8SKindsList = []string{"ComponentStatus", "ConfigMap", "ControllerRevision", "CronJob",
"CustomResourceDefinition", "DaemonSet", "Deployment", "Endpoints", "Event", "HorizontalPodAutoscaler",
"Ingress", "Job", "Lease", "LimitRange", "LocalSubjectAccessReview", "MutatingWebhookConfiguration",
"Namespace", "NetworkPolicy", "Node", "PersistentVolume", "PersistentVolumeClaim", "Pod",
"PodDisruptionBudget", "PodSecurityPolicy", "PodTemplate", "PriorityClass", "ReplicaSet",
"ReplicationController", "ResourceQuota", "Role", "RoleBinding", "Secret", "SelfSubjectAccessReview",
"SelfSubjectRulesReview", "Service", "ServiceAccount", "StatefulSet", "StorageClass",
"SubjectAccessReview", "TokenReview", "ValidatingWebhookConfiguration", "VolumeAttachment"}
NativeKindsList = []string{"Dockerized", "Native"}
KindReverseMap = map[string]string{}
dataImagesList = []string{}
)
func IsWlid(id string) bool {
return strings.HasPrefix(id, WlidPrefix)
}
func IsSid(id string) bool {
return strings.HasPrefix(id, SidPrefix)
}
// GetK8SKindFronList get the calculated wlid
func GetK8SKindFronList(kind string) string { // TODO GetK8SKindFromList
for i := range K8SKindsList {
if strings.ToLower(kind) == strings.ToLower(K8SKindsList[i]) {
return K8SKindsList[i]
}
}
return kind
}
// IsK8SKindInList Check if the kind is a known kind
func IsK8SKindInList(kind string) bool {
for i := range K8SKindsList {
if strings.ToLower(kind) == strings.ToLower(K8SKindsList[i]) {
return true
}
}
return false
}
// generateWLID
func generateWLID(pLevel0, level0, pLevel1, level1, k, name string) string {
kind := strings.ToLower(k)
kind = strings.Replace(kind, "-", "", -1)
wlid := WlidPrefix
wlid += fmt.Sprintf("%s%s", pLevel0, level0)
if level1 == "" {
return wlid
}
wlid += fmt.Sprintf("/%s%s", pLevel1, level1)
if kind == "" {
return wlid
}
wlid += fmt.Sprintf("/%s", kind)
if name == "" {
return wlid
}
wlid += fmt.Sprintf("-%s", name)
return wlid
}
// GetWLID get the calculated wlid
func GetWLID(level0, level1, k, name string) string {
return generateWLID(ClusterWlidPrefix, level0, NamespaceWlidPrefix, level1, k, name)
}
// GetK8sWLID get the k8s calculated wlid
func GetK8sWLID(level0, level1, k, name string) string {
return generateWLID(ClusterWlidPrefix, level0, NamespaceWlidPrefix, level1, k, name)
}
// GetNativeWLID get the native calculated wlid
func GetNativeWLID(level0, level1, k, name string) string {
return generateWLID(DataCenterWlidPrefix, level0, ProjectWlidPrefix, level1, k, name)
}
// WildWlidContainsWlid does WildWlid contains Wlid
func WildWlidContainsWlid(wildWlid, wlid string) bool { // TODO- test
if wildWlid == wlid {
return true
}
wildWlidR, _ := RestoreMicroserviceIDsFromSpiffe(wildWlid)
wlidR, _ := RestoreMicroserviceIDsFromSpiffe(wlid)
if len(wildWlidR) > len(wildWlidR) {
// invalid wlid
return false
}
for i := range wildWlidR {
if wildWlidR[i] != wlidR[i] {
return false
}
}
return true
}
func restoreInnerIdentifiersFromID(spiffeSlices []string) []string {
if len(spiffeSlices) >= 1 && strings.HasPrefix(spiffeSlices[0], ClusterWlidPrefix) {
spiffeSlices[0] = spiffeSlices[0][len(ClusterWlidPrefix):]
}
if len(spiffeSlices) >= 2 && strings.HasPrefix(spiffeSlices[1], NamespaceWlidPrefix) {
spiffeSlices[1] = spiffeSlices[1][len(NamespaceWlidPrefix):]
}
if len(spiffeSlices) >= 3 && strings.Contains(spiffeSlices[2], "-") {
dashIdx := strings.Index(spiffeSlices[2], "-")
spiffeSlices = append(spiffeSlices, spiffeSlices[2][dashIdx+1:])
spiffeSlices[2] = spiffeSlices[2][:dashIdx]
if val, ok := KindReverseMap[spiffeSlices[2]]; ok {
spiffeSlices[2] = val
}
}
return spiffeSlices
}
// RestoreMicroserviceIDsFromSpiffe -
func RestoreMicroserviceIDsFromSpiffe(spiffe string) ([]string, error) {
if spiffe == "" {
return nil, fmt.Errorf("in RestoreMicroserviceIDsFromSpiffe, expecting valid wlid recieved empty string")
}
if StringHasWhitespace(spiffe) {
return nil, fmt.Errorf("wlid %s invalid. whitespace found", spiffe)
}
if strings.HasPrefix(spiffe, WlidPrefix) {
spiffe = spiffe[len(WlidPrefix):]
} else if strings.HasPrefix(spiffe, SidPrefix) {
spiffe = spiffe[len(SidPrefix):]
}
spiffeSlices := strings.Split(spiffe, "/")
// The documented WLID format (https://cyberarmorio.sharepoint.com/sites/development2/Shared%20Documents/kubernetes_design1.docx?web=1)
if len(spiffeSlices) <= 3 {
spiffeSlices = restoreInnerIdentifiersFromID(spiffeSlices)
}
if len(spiffeSlices) != 4 { // first used WLID, deprecated since 24.10.2019
return spiffeSlices, fmt.Errorf("invalid WLID format. format received: %v", spiffeSlices)
}
for i := range spiffeSlices {
if spiffeSlices[i] == "" {
return spiffeSlices, fmt.Errorf("one or more entities are empty, spiffeSlices: %v", spiffeSlices)
}
}
return spiffeSlices, nil
}
// RestoreMicroserviceIDsFromSpiffe -
func RestoreMicroserviceIDs(spiffe string) []string {
if spiffe == "" {
return []string{}
}
if StringHasWhitespace(spiffe) {
return []string{}
}
if strings.HasPrefix(spiffe, WlidPrefix) {
spiffe = spiffe[len(WlidPrefix):]
} else if strings.HasPrefix(spiffe, SidPrefix) {
spiffe = spiffe[len(SidPrefix):]
}
spiffeSlices := strings.Split(spiffe, "/")
return restoreInnerIdentifiersFromID(spiffeSlices)
}
// GetClusterFromWlid parse wlid and get cluster
func GetClusterFromWlid(wlid string) string {
r := RestoreMicroserviceIDs(wlid)
if len(r) >= 1 {
return r[0]
}
return ""
}
// GetNamespaceFromWlid parse wlid and get Namespace
func GetNamespaceFromWlid(wlid string) string {
r := RestoreMicroserviceIDs(wlid)
if len(r) >= 2 {
return r[1]
}
return ""
}
// GetKindFromWlid parse wlid and get kind
func GetKindFromWlid(wlid string) string {
r := RestoreMicroserviceIDs(wlid)
if len(r) >= 3 {
return GetK8SKindFronList(r[2])
}
return ""
}
// GetNameFromWlid parse wlid and get name
func GetNameFromWlid(wlid string) string {
r := RestoreMicroserviceIDs(wlid)
if len(r) >= 4 {
return GetK8SKindFronList(r[3])
}
return ""
}
// IsWlidValid test if wlid is a valid wlid
func IsWlidValid(wlid string) error {
_, err := RestoreMicroserviceIDsFromSpiffe(wlid)
return err
}
// StringHasWhitespace check if a string has whitespace
func StringHasWhitespace(str string) bool {
if whitespace := strings.Index(str, " "); whitespace != -1 {
return true
}
return false
}

519
cautils/customerloader.go Normal file
View File

@@ -0,0 +1,519 @@
package cautils
import (
"context"
"encoding/json"
"fmt"
"os"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/kubescape/cautils/logger"
corev1 "k8s.io/api/core/v1"
)
const configFileName = "config"
func ConfigFileFullPath() string { return getter.GetDefaultPath(configFileName + ".json") }
// ======================================================================================
// =============================== Config structure =====================================
// ======================================================================================
type ConfigObj struct {
AccountID string `json:"accountID,omitempty"`
ClientID string `json:"clientID,omitempty"`
SecretKey string `json:"secretKey,omitempty"`
CustomerGUID string `json:"customerGUID,omitempty"` // Deprecated
Token string `json:"invitationParam,omitempty"`
CustomerAdminEMail string `json:"adminMail,omitempty"`
ClusterName string `json:"clusterName,omitempty"`
}
// Config - convert ConfigObj to config file
func (co *ConfigObj) Config() []byte {
// remove cluster name before saving to file
clusterName := co.ClusterName
customerAdminEMail := co.CustomerAdminEMail
token := co.Token
co.ClusterName = ""
co.Token = ""
co.CustomerAdminEMail = ""
b, err := json.MarshalIndent(co, "", " ")
co.ClusterName = clusterName
co.CustomerAdminEMail = customerAdminEMail
co.Token = token
if err == nil {
return b
}
return []byte{}
}
// ======================================================================================
// =============================== interface ============================================
// ======================================================================================
type ITenantConfig interface {
// set
SetTenant() error
UpdateCachedConfig() error
DeleteCachedConfig() error
// getters
GetClusterName() string
GetAccountID() string
GetConfigObj() *ConfigObj
// GetBackendAPI() getter.IBackend
// GenerateURL()
IsConfigFound() bool
}
// ======================================================================================
// ============================ Local Config ============================================
// ======================================================================================
// Config when scanning YAML files or URL but not a Kubernetes cluster
type LocalConfig struct {
backendAPI getter.IBackend
configObj *ConfigObj
}
func NewLocalConfig(backendAPI getter.IBackend, customerGUID, clusterName string) *LocalConfig {
var configObj *ConfigObj
lc := &LocalConfig{
backendAPI: backendAPI,
configObj: &ConfigObj{},
}
// get from configMap
if existsConfigFile() { // get from file
configObj, _ = loadConfigFromFile()
} else {
configObj = &ConfigObj{}
}
if configObj != nil {
lc.configObj = configObj
}
if customerGUID != "" {
lc.configObj.AccountID = customerGUID // override config customerGUID
}
if clusterName != "" {
lc.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
}
getAccountFromEnv(lc.configObj)
lc.backendAPI.SetAccountID(lc.configObj.AccountID)
lc.backendAPI.SetClientID(lc.configObj.ClientID)
lc.backendAPI.SetSecretKey(lc.configObj.SecretKey)
if lc.configObj.AccountID != "" {
if err := lc.SetTenant(); err != nil {
logger.L().Error(err.Error())
}
}
return lc
}
func (lc *LocalConfig) GetConfigObj() *ConfigObj { return lc.configObj }
func (lc *LocalConfig) GetAccountID() string { return lc.configObj.AccountID }
func (lc *LocalConfig) GetClusterName() string { return lc.configObj.ClusterName }
func (lc *LocalConfig) IsConfigFound() bool { return existsConfigFile() }
func (lc *LocalConfig) SetTenant() error {
// ARMO tenant GUID
if err := getTenantConfigFromBE(lc.backendAPI, lc.configObj); err != nil {
return err
}
lc.UpdateCachedConfig()
return nil
}
func (lc *LocalConfig) UpdateCachedConfig() error {
return updateConfigFile(lc.configObj)
}
func (lc *LocalConfig) DeleteCachedConfig() error {
return DeleteConfigFile()
}
func getTenantConfigFromBE(backendAPI getter.IBackend, configObj *ConfigObj) error {
// get from armoBE
tenantResponse, err := backendAPI.GetTenant()
if err == nil && tenantResponse != nil {
if tenantResponse.AdminMail != "" { // registered tenant
configObj.CustomerAdminEMail = tenantResponse.AdminMail
} else { // new tenant
configObj.Token = tenantResponse.Token
configObj.AccountID = tenantResponse.TenantID
}
} else {
if err != nil && !strings.Contains(err.Error(), "already exists") {
return err
}
}
return nil
}
// ======================================================================================
// ========================== Cluster Config ============================================
// ======================================================================================
// ClusterConfig configuration of specific cluster
/*
Supported environments variables:
KS_DEFAULT_CONFIGMAP_NAME // name of configmap, if not set default is 'kubescape'
KS_DEFAULT_CONFIGMAP_NAMESPACE // configmap namespace, if not set default is 'default'
KS_ACCOUNT_ID
KS_CLIENT_ID
KS_SECRET_KEY
TODO - supprot:
KS_CACHE // path to cached files
*/
type ClusterConfig struct {
k8s *k8sinterface.KubernetesApi
configMapName string
configMapNamespace string
backendAPI getter.IBackend
configObj *ConfigObj
}
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, customerGUID, clusterName string) *ClusterConfig {
var configObj *ConfigObj
c := &ClusterConfig{
k8s: k8s,
backendAPI: backendAPI,
configObj: &ConfigObj{},
configMapName: getConfigMapName(),
configMapNamespace: getConfigMapNamespace(),
}
// get from configMap
if c.existsConfigMap() {
configObj, _ = c.loadConfigFromConfigMap()
}
if configObj == nil && existsConfigFile() { // get from file
configObj, _ = loadConfigFromFile()
}
if configObj != nil {
c.configObj = configObj
}
if customerGUID != "" {
c.configObj.AccountID = customerGUID // override config customerGUID
}
if clusterName != "" {
c.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
}
getAccountFromEnv(c.configObj)
if c.configObj.ClusterName == "" {
c.configObj.ClusterName = AdoptClusterName(k8sinterface.GetClusterName())
} else { // override the cluster name if it has unwanted characters
c.configObj.ClusterName = AdoptClusterName(c.configObj.ClusterName)
}
c.backendAPI.SetAccountID(c.configObj.AccountID)
c.backendAPI.SetClientID(c.configObj.ClientID)
c.backendAPI.SetSecretKey(c.configObj.SecretKey)
if c.configObj.AccountID != "" {
if err := c.SetTenant(); err != nil {
logger.L().Error(err.Error())
}
}
return c
}
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
func (c *ClusterConfig) GetDefaultNS() string { return c.configMapNamespace }
func (c *ClusterConfig) GetAccountID() string { return c.configObj.AccountID }
func (c *ClusterConfig) IsConfigFound() bool { return existsConfigFile() || c.existsConfigMap() }
func (c *ClusterConfig) SetTenant() error {
// ARMO tenant GUID
if err := getTenantConfigFromBE(c.backendAPI, c.configObj); err != nil {
return err
}
c.UpdateCachedConfig()
return nil
}
func (c *ClusterConfig) UpdateCachedConfig() error {
// update/create config
if c.existsConfigMap() {
if err := c.updateConfigMap(); err != nil {
return err
}
} else {
if err := c.createConfigMap(); err != nil {
return err
}
}
return updateConfigFile(c.configObj)
}
func (c *ClusterConfig) DeleteCachedConfig() error {
if err := c.deleteConfigMap(); err != nil {
return err
}
if err := DeleteConfigFile(); err != nil {
return err
}
return nil
}
func (c *ClusterConfig) GetClusterName() string {
return c.configObj.ClusterName
}
func (c *ClusterConfig) ToMapString() map[string]interface{} {
m := map[string]interface{}{}
if bc, err := json.Marshal(c.configObj); err == nil {
json.Unmarshal(bc, &m)
}
return m
}
func (c *ClusterConfig) loadConfigFromConfigMap() (*ConfigObj, error) {
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
if err != nil {
return nil, err
}
if bData, err := json.Marshal(configMap.Data); err == nil {
return readConfig(bData)
}
return nil, nil
}
func (c *ClusterConfig) existsConfigMap() bool {
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
// TODO - check if has customerGUID
return err == nil
}
func (c *ClusterConfig) GetValueByKeyFromConfigMap(key string) (string, error) {
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
if err != nil {
return "", err
}
if val, ok := configMap.Data[key]; ok {
return val, nil
} else {
return "", fmt.Errorf("value does not exist")
}
}
func GetValueFromConfigJson(key string) (string, error) {
data, err := os.ReadFile(ConfigFileFullPath())
if err != nil {
return "", err
}
var obj map[string]interface{}
if err := json.Unmarshal(data, &obj); err != nil {
return "", err
}
if val, ok := obj[key]; ok {
return fmt.Sprint(val), nil
} else {
return "", fmt.Errorf("value does not exist")
}
}
func SetKeyValueInConfigJson(key string, value string) error {
data, err := os.ReadFile(ConfigFileFullPath())
if err != nil {
return err
}
var obj map[string]interface{}
err = json.Unmarshal(data, &obj)
if err != nil {
return err
}
obj[key] = value
newData, err := json.Marshal(obj)
if err != nil {
return err
}
return os.WriteFile(ConfigFileFullPath(), newData, 0664)
}
func (c *ClusterConfig) SetKeyValueInConfigmap(key string, value string) error {
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
if err != nil {
configMap = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: c.configMapName,
},
}
}
if len(configMap.Data) == 0 {
configMap.Data = make(map[string]string)
}
configMap.Data[key] = value
if err != nil {
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Create(context.Background(), configMap, metav1.CreateOptions{})
} else {
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
}
return err
}
func existsConfigFile() bool {
_, err := os.ReadFile(ConfigFileFullPath())
return err == nil
}
func (c *ClusterConfig) createConfigMap() error {
if c.k8s == nil {
return nil
}
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: c.configMapName,
},
}
c.updateConfigData(configMap)
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Create(context.Background(), configMap, metav1.CreateOptions{})
return err
}
func (c *ClusterConfig) updateConfigMap() error {
if c.k8s == nil {
return nil
}
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
if err != nil {
return err
}
c.updateConfigData(configMap)
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
return err
}
func updateConfigFile(configObj *ConfigObj) error {
if err := os.WriteFile(ConfigFileFullPath(), configObj.Config(), 0664); err != nil {
return err
}
return nil
}
func (c *ClusterConfig) updateConfigData(configMap *corev1.ConfigMap) {
if len(configMap.Data) == 0 {
configMap.Data = make(map[string]string)
}
m := c.ToMapString()
for k, v := range m {
if s, ok := v.(string); ok {
configMap.Data[k] = s
}
}
}
func loadConfigFromFile() (*ConfigObj, error) {
dat, err := os.ReadFile(ConfigFileFullPath())
if err != nil {
return nil, err
}
return readConfig(dat)
}
func readConfig(dat []byte) (*ConfigObj, error) {
if len(dat) == 0 {
return nil, nil
}
configObj := &ConfigObj{}
if err := json.Unmarshal(dat, configObj); err != nil {
return nil, err
}
if configObj.AccountID == "" {
configObj.AccountID = configObj.CustomerGUID
}
configObj.CustomerGUID = ""
return configObj, nil
}
// Check if the customer is submitted
func (clusterConfig *ClusterConfig) IsSubmitted() bool {
return clusterConfig.existsConfigMap() || existsConfigFile()
}
// Check if the customer is registered
func (clusterConfig *ClusterConfig) IsRegistered() bool {
// get from armoBE
tenantResponse, err := clusterConfig.backendAPI.GetTenant()
if err == nil && tenantResponse != nil {
if tenantResponse.AdminMail != "" { // this customer already belongs to some user
return true
}
}
return false
}
func (clusterConfig *ClusterConfig) deleteConfigMap() error {
return clusterConfig.k8s.KubernetesClient.CoreV1().ConfigMaps(clusterConfig.configMapNamespace).Delete(context.Background(), clusterConfig.configMapName, metav1.DeleteOptions{})
}
func DeleteConfigFile() error {
return os.Remove(ConfigFileFullPath())
}
func AdoptClusterName(clusterName string) string {
return strings.ReplaceAll(clusterName, "/", "-")
}
func getConfigMapName() string {
if n := os.Getenv("KS_DEFAULT_CONFIGMAP_NAME"); n != "" {
return n
}
return "kubescape"
}
func getConfigMapNamespace() string {
if n := os.Getenv("KS_DEFAULT_CONFIGMAP_NAMESPACE"); n != "" {
return n
}
return "default"
}
func getAccountFromEnv(configObj *ConfigObj) {
// load from env
if accountID := os.Getenv("KS_ACCOUNT_ID"); accountID != "" {
configObj.AccountID = accountID
}
if clientID := os.Getenv("KS_CLIENT_ID"); clientID != "" {
configObj.ClientID = clientID
}
if secretKey := os.Getenv("KS_SECRET_KEY"); secretKey != "" {
configObj.SecretKey = secretKey
}
}

View File

@@ -1,23 +1,35 @@
package cautils
import (
"github.com/armosec/kubescape/cautils/opapolicy"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
)
// K8SResources map[<api group>/<api version>/<resource>]<resource object>
type K8SResources map[string]interface{}
// K8SResources map[<api group>/<api version>/<resource>][]<resourceID>
type K8SResources map[string][]string
type OPASessionObj struct {
Frameworks []opapolicy.Framework
K8SResources *K8SResources
PostureReport *opapolicy.PostureReport
K8SResources *K8SResources // input k8s objects
Frameworks []reporthandling.Framework // list of frameworks to scan
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
PostureReport *reporthandling.PostureReport // scan results v1
Report *reporthandlingv2.PostureReport // scan results v2
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
}
func NewOPASessionObj(frameworks []opapolicy.Framework, k8sResources *K8SResources) *OPASessionObj {
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources) *OPASessionObj {
return &OPASessionObj{
Frameworks: frameworks,
K8SResources: k8sResources,
PostureReport: &opapolicy.PostureReport{
Report: &reporthandlingv2.PostureReport{},
Frameworks: frameworks,
K8SResources: k8sResources,
AllResources: make(map[string]workloadinterface.IMetadata),
ResourcesResult: make(map[string]resourcesresults.Result),
PostureReport: &reporthandling.PostureReport{
ClusterName: ClusterName,
CustomerGUID: CustomerGUID,
},
@@ -26,9 +38,12 @@ func NewOPASessionObj(frameworks []opapolicy.Framework, k8sResources *K8SResourc
func NewOPASessionObjMock() *OPASessionObj {
return &OPASessionObj{
Frameworks: nil,
K8SResources: nil,
PostureReport: &opapolicy.PostureReport{
Frameworks: nil,
K8SResources: nil,
AllResources: make(map[string]workloadinterface.IMetadata),
ResourcesResult: make(map[string]resourcesresults.Result),
Report: &reporthandlingv2.PostureReport{},
PostureReport: &reporthandling.PostureReport{
ClusterName: "",
CustomerGUID: "",
ReportID: "",
@@ -42,8 +57,19 @@ type ComponentConfig struct {
}
type Exception struct {
Ignore *bool `json:"ignore"` // ignore test results
MultipleScore *opapolicy.AlertScore `json:"multipleScore"` // MultipleScore number - float32
Namespaces []string `json:"namespaces"`
Regex string `json:"regex"` // not supported
Ignore *bool `json:"ignore"` // ignore test results
MultipleScore *reporthandling.AlertScore `json:"multipleScore"` // MultipleScore number - float32
Namespaces []string `json:"namespaces"`
Regex string `json:"regex"` // not supported
}
type RegoInputData struct {
PostureControlInputs map[string][]string `json:"postureControlInputs"`
// ClusterName string `json:"clusterName"`
// K8sConfig RegoK8sConfig `json:"k8sconfig"`
}
type Policies struct {
Frameworks []string
Controls map[string]reporthandling.Control // map[<control ID>]<control>
}

View File

@@ -0,0 +1,67 @@
package cautils
import (
pkgcautils "github.com/armosec/utils-go/utils"
"github.com/armosec/opa-utils/reporthandling"
)
func NewPolicies() *Policies {
return &Policies{
Frameworks: make([]string, 0),
Controls: make(map[string]reporthandling.Control),
}
}
func (policies *Policies) Set(frameworks []reporthandling.Framework, version string) {
for i := range frameworks {
if frameworks[i].Name != "" {
policies.Frameworks = append(policies.Frameworks, frameworks[i].Name)
}
for j := range frameworks[i].Controls {
compatibleRules := []reporthandling.PolicyRule{}
for r := range frameworks[i].Controls[j].Rules {
if !ruleWithArmoOpaDependency(frameworks[i].Controls[j].Rules[r].Attributes) && isRuleKubescapeVersionCompatible(frameworks[i].Controls[j].Rules[r].Attributes, version) {
compatibleRules = append(compatibleRules, frameworks[i].Controls[j].Rules[r])
}
}
if len(compatibleRules) > 0 {
frameworks[i].Controls[j].Rules = compatibleRules
policies.Controls[frameworks[i].Controls[j].ControlID] = frameworks[i].Controls[j]
}
}
}
}
func ruleWithArmoOpaDependency(attributes map[string]interface{}) bool {
if attributes == nil {
return false
}
if s, ok := attributes["armoOpa"]; ok { // TODO - make global
return pkgcautils.StringToBool(s.(string))
}
return false
}
// Checks that kubescape version is in range of use for this rule
// In local build (BuildNumber = ""):
// returns true only if rule doesn't have the "until" attribute
func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version string) bool {
if from, ok := attributes["useFromKubescapeVersion"]; ok && from != nil {
if version != "" {
if from.(string) > BuildNumber {
return false
}
}
}
if until, ok := attributes["useUntilKubescapeVersion"]; ok && until != nil {
if version != "" {
if until.(string) <= BuildNumber {
return false
}
} else {
return false
}
}
return true
}

View File

@@ -1,7 +1,6 @@
package cautils
import (
"fmt"
"os"
"time"
@@ -21,48 +20,16 @@ func IsSilent() bool {
}
var FailureDisplay = color.New(color.Bold, color.FgHiRed).FprintfFunc()
var WarningDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
var FailureTextDisplay = color.New(color.Faint, color.FgHiRed).FprintfFunc()
var InfoDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
var InfoTextDisplay = color.New(color.Faint, color.FgHiYellow).FprintfFunc()
var SimpleDisplay = color.New(color.Bold, color.FgHiWhite).FprintfFunc()
var InfoDisplay = color.New(color.Bold, color.FgCyan).FprintfFunc()
var InfoTextDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
var SimpleDisplay = color.New().FprintfFunc()
var SuccessDisplay = color.New(color.Bold, color.FgHiGreen).FprintfFunc()
var DescriptionDisplay = color.New(color.Faint, color.FgWhite).FprintfFunc()
var Spinner *spinner.Spinner
func ScanStartDisplay() {
if IsSilent() {
return
}
InfoDisplay(os.Stdout, "ARMO security scanner starting\n")
}
func SuccessTextDisplay(str string) {
if IsSilent() {
return
}
SuccessDisplay(os.Stdout, "[success] ")
SimpleDisplay(os.Stdout, fmt.Sprintf("%s\n", str))
}
func ErrorDisplay(str string) {
if IsSilent() {
return
}
SuccessDisplay(os.Stdout, "[Error] ")
SimpleDisplay(os.Stdout, fmt.Sprintf("%s\n", str))
}
func ProgressTextDisplay(str string) {
if IsSilent() {
return
}
InfoDisplay(os.Stdout, "[progress] ")
SimpleDisplay(os.Stdout, fmt.Sprintf("%s\n", str))
}
func StartSpinner() {
if !IsSilent() && isatty.IsTerminal(os.Stdout.Fd()) {
Spinner = spinner.New(spinner.CharSets[7], 100*time.Millisecond) // Build our new spinner

View File

@@ -1,6 +1,9 @@
package cautils
type DownloadInfo struct {
Path string
FrameworkName string
Path string // directory to save artifact. Default is "~/.kubescape/"
FileName string // can be empty
Target string // type of artifact to download
Name string // name of artifact to download
Account string // customerGUID
}

330
cautils/getter/armoapi.go Normal file
View File

@@ -0,0 +1,330 @@
package getter
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/kubescape/cautils/logger"
"github.com/armosec/opa-utils/reporthandling"
)
// =======================================================================================================================
// =============================================== ArmoAPI ===============================================================
// =======================================================================================================================
var (
// ATTENTION!!!
// Changes in this URLs variable names, or in the usage is affecting the build process! BE CAREFULL
armoERURL = "report.armo.cloud"
armoBEURL = "api.armo.cloud"
armoFEURL = "portal.armo.cloud"
armoAUTHURL = "auth.armo.cloud"
armoDevERURL = "report.eudev3.cyberarmorsoft.com"
armoDevBEURL = "api-dev.armo.cloud"
armoDevFEURL = "armoui-dev.eudev3.cyberarmorsoft.com"
armoDevAUTHURL = "eggauth.eudev3.cyberarmorsoft.com"
)
// Armo API for downloading policies
type ArmoAPI struct {
httpClient *http.Client
apiURL string
authURL string
erURL string
feURL string
accountID string
clientID string
secretKey string
feToken FeLoginResponse
authCookie string
loggedIn bool
}
var globalArmoAPIConnector *ArmoAPI
func SetARMOAPIConnector(armoAPI *ArmoAPI) {
globalArmoAPIConnector = armoAPI
}
func GetArmoAPIConnector() *ArmoAPI {
if globalArmoAPIConnector == nil {
logger.L().Error("returning nil API connector")
}
return globalArmoAPIConnector
}
func NewARMOAPIDev() *ArmoAPI {
apiObj := newArmoAPI()
apiObj.apiURL = armoDevBEURL
apiObj.authURL = armoDevAUTHURL
apiObj.erURL = armoDevERURL
apiObj.feURL = armoDevFEURL
return apiObj
}
func NewARMOAPIProd() *ArmoAPI {
apiObj := newArmoAPI()
apiObj.apiURL = armoBEURL
apiObj.erURL = armoERURL
apiObj.feURL = armoFEURL
apiObj.authURL = armoAUTHURL
return apiObj
}
func NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL, armoAUTHURL string) *ArmoAPI {
apiObj := newArmoAPI()
apiObj.erURL = armoERURL
apiObj.apiURL = armoBEURL
apiObj.feURL = armoFEURL
apiObj.authURL = armoAUTHURL
return apiObj
}
func newArmoAPI() *ArmoAPI {
return &ArmoAPI{
httpClient: &http.Client{Timeout: time.Duration(61) * time.Second},
loggedIn: false,
}
}
func (armoAPI *ArmoAPI) Post(fullURL string, headers map[string]string, body []byte) (string, error) {
if headers == nil {
headers = make(map[string]string)
}
armoAPI.appendAuthHeaders(headers)
return HttpPost(armoAPI.httpClient, fullURL, headers, body)
}
func (armoAPI *ArmoAPI) Get(fullURL string, headers map[string]string) (string, error) {
if headers == nil {
headers = make(map[string]string)
}
armoAPI.appendAuthHeaders(headers)
return HttpGetter(armoAPI.httpClient, fullURL, headers)
}
func (armoAPI *ArmoAPI) GetAccountID() string { return armoAPI.accountID }
func (armoAPI *ArmoAPI) IsLoggedIn() bool { return armoAPI.loggedIn }
func (armoAPI *ArmoAPI) GetClientID() string { return armoAPI.clientID }
func (armoAPI *ArmoAPI) GetSecretKey() string { return armoAPI.secretKey }
func (armoAPI *ArmoAPI) GetFrontendURL() string { return armoAPI.feURL }
func (armoAPI *ArmoAPI) GetAPIURL() string { return armoAPI.apiURL }
func (armoAPI *ArmoAPI) GetReportReceiverURL() string { return armoAPI.erURL }
func (armoAPI *ArmoAPI) SetAccountID(accountID string) { armoAPI.accountID = accountID }
func (armoAPI *ArmoAPI) SetClientID(clientID string) { armoAPI.clientID = clientID }
func (armoAPI *ArmoAPI) SetSecretKey(secretKey string) { armoAPI.secretKey = secretKey }
func (armoAPI *ArmoAPI) GetFramework(name string) (*reporthandling.Framework, error) {
respStr, err := armoAPI.Get(armoAPI.getFrameworkURL(name), nil)
if err != nil {
return nil, nil
}
framework := &reporthandling.Framework{}
if err = JSONDecoder(respStr).Decode(framework); err != nil {
return nil, err
}
SaveInFile(framework, GetDefaultPath(name+".json"))
return framework, err
}
func (armoAPI *ArmoAPI) GetFrameworks() ([]reporthandling.Framework, error) {
respStr, err := armoAPI.Get(armoAPI.getListFrameworkURL(), nil)
if err != nil {
return nil, nil
}
frameworks := []reporthandling.Framework{}
if err = JSONDecoder(respStr).Decode(&frameworks); err != nil {
return nil, err
}
// SaveInFile(framework, GetDefaultPath(name+".json"))
return frameworks, err
}
func (armoAPI *ArmoAPI) GetControl(policyName string) (*reporthandling.Control, error) {
return nil, fmt.Errorf("control api is not public")
}
func (armoAPI *ArmoAPI) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
exceptions := []armotypes.PostureExceptionPolicy{}
respStr, err := armoAPI.Get(armoAPI.getExceptionsURL(clusterName), nil)
if err != nil {
return nil, err
}
if err = JSONDecoder(respStr).Decode(&exceptions); err != nil {
return nil, err
}
return exceptions, nil
}
func (armoAPI *ArmoAPI) GetTenant() (*TenantResponse, error) {
url := armoAPI.getAccountURL()
if armoAPI.accountID != "" {
url = fmt.Sprintf("%s?customerGUID=%s", url, armoAPI.accountID)
}
respStr, err := armoAPI.Get(url, nil)
if err != nil {
return nil, err
}
tenant := &TenantResponse{}
if err = JSONDecoder(respStr).Decode(tenant); err != nil {
return nil, err
}
if tenant.TenantID != "" {
armoAPI.accountID = tenant.TenantID
}
return tenant, nil
}
// ControlsInputs // map[<control name>][<input arguments>]
func (armoAPI *ArmoAPI) GetAccountConfig(clusterName string) (*armotypes.CustomerConfig, error) {
accountConfig := &armotypes.CustomerConfig{}
if armoAPI.accountID == "" {
return accountConfig, nil
}
respStr, err := armoAPI.Get(armoAPI.getAccountConfig(clusterName), nil)
if err != nil {
return nil, err
}
if err = JSONDecoder(respStr).Decode(&accountConfig); err != nil {
return nil, err
}
return accountConfig, nil
}
// ControlsInputs // map[<control name>][<input arguments>]
func (armoAPI *ArmoAPI) GetControlsInputs(clusterName string) (map[string][]string, error) {
accountConfig, err := armoAPI.GetAccountConfig(clusterName)
if err == nil {
return accountConfig.Settings.PostureControlInputs, nil
}
return nil, err
}
func (armoAPI *ArmoAPI) ListCustomFrameworks() ([]string, error) {
respStr, err := armoAPI.Get(armoAPI.getListFrameworkURL(), nil)
if err != nil {
return nil, err
}
frs := []reporthandling.Framework{}
if err = json.Unmarshal([]byte(respStr), &frs); err != nil {
return nil, err
}
frameworkList := []string{}
for _, fr := range frs {
if !isNativeFramework(fr.Name) {
frameworkList = append(frameworkList, fr.Name)
}
}
return frameworkList, nil
}
func (armoAPI *ArmoAPI) ListFrameworks() ([]string, error) {
respStr, err := armoAPI.Get(armoAPI.getListFrameworkURL(), nil)
if err != nil {
return nil, err
}
frs := []reporthandling.Framework{}
if err = json.Unmarshal([]byte(respStr), &frs); err != nil {
return nil, err
}
frameworkList := []string{}
for _, fr := range frs {
if isNativeFramework(fr.Name) {
frameworkList = append(frameworkList, strings.ToLower(fr.Name))
} else {
frameworkList = append(frameworkList, fr.Name)
}
}
return frameworkList, nil
}
func (armoAPI *ArmoAPI) ListControls(l ListType) ([]string, error) {
return nil, fmt.Errorf("control api is not public")
}
func (armoAPI *ArmoAPI) PostExceptions(exceptions []armotypes.PostureExceptionPolicy) error {
for i := range exceptions {
ex, err := json.Marshal(exceptions[i])
if err != nil {
return err
}
_, err = armoAPI.Post(armoAPI.postExceptionsURL(), map[string]string{"Content-Type": "application/json"}, ex)
if err != nil {
return err
}
}
return nil
}
func (armoAPI *ArmoAPI) Login() error {
if armoAPI.accountID == "" {
return fmt.Errorf("failed to login, missing accountID")
}
if armoAPI.clientID == "" {
return fmt.Errorf("failed to login, missing clientID")
}
if armoAPI.secretKey == "" {
return fmt.Errorf("failed to login, missing secretKey")
}
// init URLs
feLoginData := FeLoginData{ClientId: armoAPI.clientID, Secret: armoAPI.secretKey}
body, _ := json.Marshal(feLoginData)
resp, err := http.Post(armoAPI.getApiToken(), "application/json", bytes.NewBuffer(body))
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("error authenticating: %d", resp.StatusCode)
}
responseBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
var feLoginResponse FeLoginResponse
if err = json.Unmarshal(responseBody, &feLoginResponse); err != nil {
return err
}
armoAPI.feToken = feLoginResponse
/* Now we have JWT */
armoAPI.authCookie, err = armoAPI.getAuthCookie()
if err != nil {
return err
}
armoAPI.loggedIn = true
return nil
}

View File

@@ -0,0 +1,158 @@
package getter
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
)
var NativeFrameworks = []string{"nsa", "mitre", "armobest", "devopsbest"}
func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Path = "api/v1/armoFrameworks"
q := u.Query()
q.Add("customerGUID", armoAPI.accountID)
if isNativeFramework(frameworkName) {
q.Add("frameworkName", strings.ToUpper(frameworkName))
} else {
// For customer framework has to be the way it was added
q.Add("frameworkName", frameworkName)
}
u.RawQuery = q.Encode()
return u.String()
}
func (armoAPI *ArmoAPI) getListFrameworkURL() string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Path = "api/v1/armoFrameworks"
q := u.Query()
q.Add("customerGUID", armoAPI.accountID)
u.RawQuery = q.Encode()
return u.String()
}
func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Path = "api/v1/armoPostureExceptions"
q := u.Query()
q.Add("customerGUID", armoAPI.accountID)
// if clusterName != "" { // TODO - fix customer name support in Armo BE
// q.Add("clusterName", clusterName)
// }
u.RawQuery = q.Encode()
return u.String()
}
func (armoAPI *ArmoAPI) postExceptionsURL() string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Path = "api/v1/postureExceptionPolicy"
q := u.Query()
q.Add("customerGUID", armoAPI.accountID)
u.RawQuery = q.Encode()
return u.String()
}
func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Path = "api/v1/armoCustomerConfiguration"
q := u.Query()
q.Add("customerGUID", armoAPI.accountID)
if clusterName != "" { // TODO - fix customer name support in Armo BE
q.Add("clusterName", clusterName)
}
u.RawQuery = q.Encode()
return u.String()
}
func (armoAPI *ArmoAPI) getAccountURL() string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Path = "api/v1/createTenant"
return u.String()
}
func (armoAPI *ArmoAPI) getApiToken() string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.authURL
u.Path = "frontegg/identity/resources/auth/v1/api-token"
return u.String()
}
func (armoAPI *ArmoAPI) getOpenidCustomers() string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Path = "api/v1/openid_customers"
return u.String()
}
func (armoAPI *ArmoAPI) getAuthCookie() (string, error) {
selectCustomer := ArmoSelectCustomer{SelectedCustomerGuid: armoAPI.accountID}
requestBody, _ := json.Marshal(selectCustomer)
client := &http.Client{}
httpRequest, err := http.NewRequest(http.MethodPost, armoAPI.getOpenidCustomers(), bytes.NewBuffer(requestBody))
if err != nil {
return "", err
}
httpRequest.Header.Set("Content-Type", "application/json")
httpRequest.Header.Set("Authorization", fmt.Sprintf("Bearer %s", armoAPI.feToken.Token))
httpResponse, err := client.Do(httpRequest)
if err != nil {
return "", err
}
defer httpResponse.Body.Close()
if httpResponse.StatusCode != http.StatusOK {
return "", fmt.Errorf("failed to get cookie from %s: status %d", armoAPI.getOpenidCustomers(), httpResponse.StatusCode)
}
cookies := httpResponse.Header.Get("set-cookie")
if len(cookies) == 0 {
return "", fmt.Errorf("no cookie field in response from %s", armoAPI.getOpenidCustomers())
}
authCookie := ""
for _, cookie := range strings.Split(cookies, ";") {
kv := strings.Split(cookie, "=")
if kv[0] == "auth" {
authCookie = kv[1]
}
}
if len(authCookie) == 0 {
return "", fmt.Errorf("no auth cookie field in response from %s", armoAPI.getOpenidCustomers())
}
return authCookie, nil
}
func (armoAPI *ArmoAPI) appendAuthHeaders(headers map[string]string) {
if armoAPI.feToken.Token != "" {
headers["Authorization"] = fmt.Sprintf("Bearer %s", armoAPI.feToken.Token)
}
if armoAPI.authCookie != "" {
headers["Cookie"] = fmt.Sprintf("auth=%s", armoAPI.authCookie)
}
}

View File

@@ -0,0 +1,24 @@
package getter
type FeLoginData struct {
Secret string `json:"secret"`
ClientId string `json:"clientId"`
}
type FeLoginResponse struct {
Token string `json:"accessToken"`
RefreshToken string `json:"refreshToken"`
ExpiresIn int32 `json:"expiresIn"`
Expires string `json:"expires"`
}
type ArmoSelectCustomer struct {
SelectedCustomerGuid string `json:"selectedCustomer"`
}
type TenantResponse struct {
TenantID string `json:"tenantId"`
Token string `json:"token"`
Expires string `json:"expires"`
AdminMail string `json:"adminMail,omitempty"`
}

View File

@@ -0,0 +1,92 @@
package getter
import (
"strings"
"github.com/armosec/opa-utils/gitregostore"
"github.com/armosec/opa-utils/reporthandling"
)
// =======================================================================================================================
// ======================================== DownloadReleasedPolicy =======================================================
// =======================================================================================================================
// Use gitregostore to get policies from github release
type DownloadReleasedPolicy struct {
gs *gitregostore.GitRegoStore
}
func NewDownloadReleasedPolicy() *DownloadReleasedPolicy {
return &DownloadReleasedPolicy{
gs: gitregostore.NewDefaultGitRegoStore(-1),
}
}
func (drp *DownloadReleasedPolicy) GetControl(policyName string) (*reporthandling.Control, error) {
var control *reporthandling.Control
var err error
control, err = drp.gs.GetOPAControl(policyName)
if err != nil {
return nil, err
}
return control, nil
}
func (drp *DownloadReleasedPolicy) GetFramework(name string) (*reporthandling.Framework, error) {
framework, err := drp.gs.GetOPAFrameworkByName(name)
if err != nil {
return nil, err
}
return framework, err
}
func (drp *DownloadReleasedPolicy) GetFrameworks() ([]reporthandling.Framework, error) {
frameworks, err := drp.gs.GetOPAFrameworks()
if err != nil {
return nil, err
}
return frameworks, err
}
func (drp *DownloadReleasedPolicy) ListFrameworks() ([]string, error) {
return drp.gs.GetOPAFrameworksNamesList()
}
func (drp *DownloadReleasedPolicy) ListControls(listType ListType) ([]string, error) {
switch listType {
case ListID:
return drp.gs.GetOPAControlsIDsList()
default:
return drp.gs.GetOPAControlsNamesList()
}
}
func (drp *DownloadReleasedPolicy) GetControlsInputs(clusterName string) (map[string][]string, error) {
defaultConfigInputs, err := drp.gs.GetDefaultConfigInputs()
if err != nil {
return nil, err
}
return defaultConfigInputs.Settings.PostureControlInputs, err
}
func (drp *DownloadReleasedPolicy) SetRegoObjects() error {
fwNames, err := drp.gs.GetOPAFrameworksNamesList()
if len(fwNames) != 0 && err == nil {
return nil
}
return drp.gs.SetRegoObjects()
}
func isNativeFramework(framework string) bool {
return contains(NativeFrameworks, framework)
}
func contains(s []string, str string) bool {
for _, v := range s {
if strings.EqualFold(v, str) {
return true
}
}
return false
}

View File

@@ -1,160 +1,40 @@
package getter
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/armosec/kubescape/cautils/opapolicy"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/opa-utils/reporthandling"
)
const DefaultLocalStore = ".kubescape"
// supported listing
type ListType string
const ListID ListType = "id"
const ListName ListType = "name"
type IPolicyGetter interface {
GetFramework(name string) (*opapolicy.Framework, error)
GetFramework(name string) (*reporthandling.Framework, error)
GetFrameworks() ([]reporthandling.Framework, error)
GetControl(name string) (*reporthandling.Control, error)
ListFrameworks() ([]string, error)
ListControls(ListType) ([]string, error)
}
// =======================================================================================================================
// ======================================== DownloadReleasedPolicy =======================================================
// =======================================================================================================================
type IExceptionsGetter interface {
GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error)
}
type IBackend interface {
GetAccountID() string
GetClientID() string
GetSecretKey() string
// Download released version
type DownloadReleasedPolicy struct {
hostURL string
httpClient *http.Client
SetAccountID(accountID string)
SetClientID(clientID string)
SetSecretKey(secretKey string)
GetTenant() (*TenantResponse, error)
}
func NewDownloadReleasedPolicy() *DownloadReleasedPolicy {
return &DownloadReleasedPolicy{
hostURL: "",
httpClient: &http.Client{},
}
}
func (drp *DownloadReleasedPolicy) GetFramework(name string) (*opapolicy.Framework, error) {
drp.setURL(name)
respStr, err := HttpGetter(drp.httpClient, drp.hostURL)
if err != nil {
return nil, err
}
framework := &opapolicy.Framework{}
if err = JSONDecoder(respStr).Decode(framework); err != nil {
return framework, err
}
SaveFrameworkInFile(framework, GetDefaultPath(name))
return framework, err
}
func (drp *DownloadReleasedPolicy) setURL(frameworkName string) error {
latestReleases := "https://api.github.com/repos/armosec/regolibrary/releases/latest"
resp, err := http.Get(latestReleases)
if err != nil {
return fmt.Errorf("failed to get latest releases from '%s', reason: %s", latestReleases, err.Error())
}
defer resp.Body.Close()
if resp.StatusCode < 200 || 301 < resp.StatusCode {
return fmt.Errorf("failed to download file, status code: %s", resp.Status)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read response body from '%s', reason: %s", latestReleases, err.Error())
}
var data map[string]interface{}
err = json.Unmarshal(body, &data)
if err != nil {
return fmt.Errorf("failed to unmarshal response body from '%s', reason: %s", latestReleases, err.Error())
}
if assets, ok := data["assets"].([]interface{}); ok {
for i := range assets {
if asset, ok := assets[i].(map[string]interface{}); ok {
if name, ok := asset["name"].(string); ok {
if name == frameworkName {
if url, ok := asset["browser_download_url"].(string); ok {
drp.hostURL = url
}
}
}
}
}
}
return nil
}
// =======================================================================================================================
// ============================================== LoadPolicy =============================================================
// =======================================================================================================================
// Load policies from a local repository
type LoadPolicy struct {
filePath string
}
func NewLoadPolicy(filePath string) *LoadPolicy {
return &LoadPolicy{
filePath: filePath,
}
}
func (lp *LoadPolicy) GetFramework(frameworkName string) (*opapolicy.Framework, error) {
framework := &opapolicy.Framework{}
f, err := ioutil.ReadFile(lp.filePath)
if err != nil {
return nil, err
}
err = json.Unmarshal(f, framework)
if frameworkName != "" && !strings.EqualFold(frameworkName, framework.Name) {
return nil, fmt.Errorf("framework from file not matching")
}
return framework, err
}
// =======================================================================================================================
// =============================================== ArmoAPI ===============================================================
// =======================================================================================================================
// Armo API for downloading policies
type ArmoAPI struct {
httpClient *http.Client
hostURL string
}
func NewArmoAPI() *ArmoAPI {
return &ArmoAPI{
httpClient: &http.Client{},
hostURL: "https://dashbe.eustage2.cyberarmorsoft.com",
}
}
func (armoAPI *ArmoAPI) GetFramework(name string) (*opapolicy.Framework, error) {
armoAPI.setURL(name)
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.hostURL)
if err != nil {
return nil, err
}
framework := &opapolicy.Framework{}
if err = JSONDecoder(respStr).Decode(framework); err != nil {
return nil, err
}
SaveFrameworkInFile(framework, GetDefaultPath(name))
return framework, err
}
func (armoAPI *ArmoAPI) setURL(frameworkName string) {
requestURI := "v1/armoFrameworks"
requestURI += fmt.Sprintf("?customerGUID=%s", "11111111-1111-1111-1111-111111111111")
requestURI += fmt.Sprintf("&frameworkName=%s", strings.ToUpper(frameworkName))
requestURI += "&getRules=true"
armoAPI.hostURL = urlEncoder(fmt.Sprintf("%s/%s", armoAPI.hostURL, requestURI))
type IControlsInputsGetter interface {
GetControlsInputs(clusterName string) (map[string][]string, error)
}

View File

@@ -1,34 +1,45 @@
package getter
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"github.com/armosec/kubescape/cautils/opapolicy"
)
func GetDefaultPath(frameworkName string) string {
defaultfilePath := filepath.Join(DefaultLocalStore, frameworkName+".json")
func GetDefaultPath(name string) string {
defaultfilePath := filepath.Join(DefaultLocalStore, name)
if homeDir, err := os.UserHomeDir(); err == nil {
defaultfilePath = filepath.Join(homeDir, defaultfilePath)
}
return defaultfilePath
}
func SaveFrameworkInFile(framework *opapolicy.Framework, path string) error {
encodedData, err := json.Marshal(framework)
func SaveInFile(policy interface{}, pathStr string) error {
encodedData, err := json.MarshalIndent(policy, "", " ")
if err != nil {
return err
}
err = os.WriteFile(path, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
if err != nil {
return err
if os.IsNotExist(err) {
pathDir := path.Dir(pathStr)
if err := os.Mkdir(pathDir, 0744); err != nil {
return err
}
} else {
return err
}
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
if err != nil {
return err
}
}
return nil
}
@@ -40,12 +51,14 @@ func JSONDecoder(origin string) *json.Decoder {
return dec
}
func HttpGetter(httpClient *http.Client, fullURL string) (string, error) {
func HttpGetter(httpClient *http.Client, fullURL string, headers map[string]string) (string, error) {
req, err := http.NewRequest("GET", fullURL, nil)
if err != nil {
return "", err
}
setHeaders(req, headers)
resp, err := httpClient.Do(req)
if err != nil {
return "", err
@@ -57,6 +70,32 @@ func HttpGetter(httpClient *http.Client, fullURL string) (string, error) {
return respStr, nil
}
func HttpPost(httpClient *http.Client, fullURL string, headers map[string]string, body []byte) (string, error) {
req, err := http.NewRequest("POST", fullURL, bytes.NewReader(body))
if err != nil {
return "", err
}
setHeaders(req, headers)
resp, err := httpClient.Do(req)
if err != nil {
return "", err
}
respStr, err := httpRespToString(resp)
if err != nil {
return "", err
}
return respStr, nil
}
func setHeaders(req *http.Request, headers map[string]string) {
if len(headers) >= 0 { // might be nil
for k, v := range headers {
req.Header.Set(k, v)
}
}
}
// HTTPRespToString parses the body as string and checks the HTTP status code, it closes the body reader at the end
func httpRespToString(resp *http.Response) (string, error) {
if resp == nil || resp.Body == nil {
@@ -67,48 +106,23 @@ func httpRespToString(resp *http.Response) (string, error) {
if resp.ContentLength > 0 {
strBuilder.Grow(int(resp.ContentLength))
}
bytesNum, err := io.Copy(&strBuilder, resp.Body)
_, err := io.Copy(&strBuilder, resp.Body)
respStr := strBuilder.String()
if err != nil {
respStrNewLen := len(respStr)
if respStrNewLen > 1024 {
respStrNewLen = 1024
}
return "", fmt.Errorf("HTTP request failed. URL: '%s', Read-ERROR: '%s', HTTP-CODE: '%s', BODY(top): '%s', HTTP-HEADERS: %v, HTTP-BODY-BUFFER-LENGTH: %v", resp.Request.URL.RequestURI(), err, resp.Status, respStr[:respStrNewLen], resp.Header, bytesNum)
return "", fmt.Errorf("http-error: '%s', reason: '%s'", resp.Status, respStr[:respStrNewLen])
// return "", fmt.Errorf("HTTP request failed. URL: '%s', Read-ERROR: '%s', HTTP-CODE: '%s', BODY(top): '%s', HTTP-HEADERS: %v, HTTP-BODY-BUFFER-LENGTH: %v", resp.Request.URL.RequestURI(), err, resp.Status, respStr[:respStrNewLen], resp.Header, bytesNum)
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
respStrNewLen := len(respStr)
if respStrNewLen > 1024 {
respStrNewLen = 1024
}
err = fmt.Errorf("HTTP request failed. URL: '%s', HTTP-ERROR: '%s', BODY: '%s', HTTP-HEADERS: %v, HTTP-BODY-BUFFER-LENGTH: %v", resp.Request.URL.RequestURI(), resp.Status, respStr[:respStrNewLen], resp.Header, bytesNum)
err = fmt.Errorf("http-error: '%s', reason: '%s'", resp.Status, respStr[:respStrNewLen])
}
return respStr, err
}
// URLEncoder encode url
func urlEncoder(oldURL string) string {
fullURL := strings.Split(oldURL, "?")
baseURL, err := url.Parse(fullURL[0])
if err != nil {
return ""
}
// Prepare Query Parameters
if len(fullURL) > 1 {
params := url.Values{}
queryParams := strings.Split(fullURL[1], "&")
for _, i := range queryParams {
queryParam := strings.Split(i, "=")
val := ""
if len(queryParam) > 1 {
val = queryParam[1]
}
params.Add(queryParam[0], val)
}
baseURL.RawQuery = params.Encode()
}
return baseURL.String()
}

View File

@@ -0,0 +1,140 @@
package getter
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/opa-utils/reporthandling"
)
// =======================================================================================================================
// ============================================== LoadPolicy =============================================================
// =======================================================================================================================
const DefaultLocalStore = ".kubescape"
// Load policies from a local repository
type LoadPolicy struct {
filePaths []string
}
func NewLoadPolicy(filePaths []string) *LoadPolicy {
return &LoadPolicy{
filePaths: filePaths,
}
}
// Return control from file
func (lp *LoadPolicy) GetControl(controlName string) (*reporthandling.Control, error) {
control := &reporthandling.Control{}
filePath := lp.filePath()
f, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
if err = json.Unmarshal(f, control); err != nil {
return control, err
}
if controlName != "" && !strings.EqualFold(controlName, control.Name) && !strings.EqualFold(controlName, control.ControlID) {
framework, err := lp.GetFramework(control.Name)
if err != nil {
return nil, fmt.Errorf("control from file not matching")
} else {
for _, ctrl := range framework.Controls {
if strings.EqualFold(ctrl.Name, controlName) || strings.EqualFold(ctrl.ControlID, controlName) {
control = &ctrl
break
}
}
}
}
return control, err
}
func (lp *LoadPolicy) GetFramework(frameworkName string) (*reporthandling.Framework, error) {
framework := &reporthandling.Framework{}
var err error
for _, filePath := range lp.filePaths {
f, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
if err = json.Unmarshal(f, framework); err != nil {
return framework, err
}
if strings.EqualFold(frameworkName, framework.Name) {
break
}
}
if frameworkName != "" && !strings.EqualFold(frameworkName, framework.Name) {
return nil, fmt.Errorf("framework from file not matching")
}
return framework, err
}
func (lp *LoadPolicy) GetFrameworks() ([]reporthandling.Framework, error) {
frameworks := []reporthandling.Framework{}
var err error
return frameworks, err
}
func (lp *LoadPolicy) ListFrameworks() ([]string, error) {
fwNames := []string{}
framework := &reporthandling.Framework{}
for _, f := range lp.filePaths {
file, err := os.ReadFile(f)
if err == nil {
if err := json.Unmarshal(file, framework); err == nil {
if !contains(fwNames, framework.Name) {
fwNames = append(fwNames, framework.Name)
}
}
}
}
return fwNames, nil
}
func (lp *LoadPolicy) ListControls(listType ListType) ([]string, error) {
// TODO - Support
return []string{}, fmt.Errorf("loading controls list from file is not supported")
}
func (lp *LoadPolicy) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
filePath := lp.filePath()
exception := []armotypes.PostureExceptionPolicy{}
f, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
err = json.Unmarshal(f, &exception)
return exception, err
}
func (lp *LoadPolicy) GetControlsInputs(clusterName string) (map[string][]string, error) {
filePath := lp.filePath()
accountConfig := &armotypes.CustomerConfig{}
f, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
if err = json.Unmarshal(f, &accountConfig.Settings.PostureControlInputs); err == nil {
return accountConfig.Settings.PostureControlInputs, nil
}
return nil, err
}
// temporary support for a list of files
func (lp *LoadPolicy) filePath() string {
if len(lp.filePaths) > 0 {
return lp.filePaths[0]
}
return ""
}

View File

@@ -0,0 +1,13 @@
package getter
import (
"path/filepath"
)
var mockFrameworkBasePath = filepath.Join("examples", "mocks", "frameworks")
func MockNewLoadPolicy() *LoadPolicy {
return &LoadPolicy{
filePaths: []string{""},
}
}

23
cautils/jsonutils.go Normal file
View File

@@ -0,0 +1,23 @@
package cautils
import (
"bytes"
"encoding/json"
)
const (
empty = ""
tab = " "
)
func PrettyJson(data interface{}) ([]byte, error) {
buffer := new(bytes.Buffer)
encoder := json.NewEncoder(buffer)
encoder.SetIndent(empty, tab)
err := encoder.Encode(data)
if err != nil {
return nil, err
}
return buffer.Bytes(), nil
}

View File

@@ -1,265 +0,0 @@
package k8sinterface
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/docker/docker/api/types"
)
// For GCR there are some permissions one need to assign in order to allow ARMO to pull images:
// https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity
// gcloud iam service-accounts create armo-controller-sa
// gcloud projects add-iam-policy-binding <PROJECT_NAME> --role roles/storage.objectViewer --member "serviceAccount:armo-controller-sa@<PROJECT_NAME>.iam.gserviceaccount.com"
// gcloud iam service-accounts add-iam-policy-binding --role roles/iam.workloadIdentityUser --member "serviceAccount:<PROJECT_NAME>.svc.id.goog[cyberarmor-system/ca-controller-service-account]" armo-controller-sa@<PROJECT_NAME>.iam.gserviceaccount.com
// kubectl annotate serviceaccount --overwrite --namespace cyberarmor-system ca-controller-service-account iam.gke.io/gcp-service-account=armo-controller-sa@<PROJECT_NAME>.iam.gserviceaccount.com
const (
gcrDefaultServiceAccountName = "default"
// armoServiceAccountName = "ca-controller-service-account"
)
var (
httpClient = http.Client{Timeout: 5 * time.Second}
)
// CheckIsECRImage check if this image is suspected as ECR hosted image
func CheckIsECRImage(imageTag string) bool {
return strings.Contains(imageTag, "dkr.ecr")
}
// GetLoginDetailsForECR return user name + password using the default iam-role OR ~/.aws/config of the machine
func GetLoginDetailsForECR(imageTag string) (string, string, error) {
// imageTag := "015253967648.dkr.ecr.eu-central-1.amazonaws.com/armo:1"
imageTagSlices := strings.Split(imageTag, ".")
repo := imageTagSlices[0]
region := imageTagSlices[3]
mySession := session.Must(session.NewSession())
ecrClient := ecr.New(mySession, aws.NewConfig().WithRegion(region))
input := &ecr.GetAuthorizationTokenInput{
RegistryIds: []*string{&repo},
}
res, err := ecrClient.GetAuthorizationToken(input)
if err != nil {
return "", "", fmt.Errorf("in PullFromECR, failed to GetAuthorizationToken: %v", err)
}
res64 := (*res.AuthorizationData[0].AuthorizationToken)
resB, err := base64.StdEncoding.DecodeString(res64)
if err != nil {
return "", "", fmt.Errorf("in PullFromECR, failed to DecodeString: %v", err)
}
delimiterIdx := bytes.IndexByte(resB, ':')
// userName := resB[:delimiterIdx]
// resB = resB[delimiterIdx+1:]
// resB, err = base64.StdEncoding.DecodeString(string(resB))
// if err != nil {
// t.Errorf("failed to DecodeString #2: %v\n\n", err)
// }
return string(resB[:delimiterIdx]), string(resB[delimiterIdx+1:]), nil
}
func CheckIsACRImage(imageTag string) bool {
// atest1.azurecr.io/go-inf:1
return strings.Contains(imageTag, ".azurecr.io/")
}
type azureADDResponseJson struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn string `json:"expires_in"`
ExpiresOn string `json:"expires_on"`
NotBefore string `json:"not_before"`
Resource string `json:"resource"`
TokenType string `json:"token_type"`
}
func getAzureAADAccessToken() (string, error) {
msi_endpoint, err := url.Parse("http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01")
if err != nil {
return "", fmt.Errorf("creating URL : %v", err)
}
msi_parameters := url.Values{}
msi_parameters.Add("resource", "https://management.azure.com/")
msi_parameters.Add("api-version", "2018-02-01")
msi_endpoint.RawQuery = msi_parameters.Encode()
req, err := http.NewRequest("GET", msi_endpoint.String(), nil)
if err != nil {
return "", fmt.Errorf("creating HTTP request : %v", err)
}
req.Header.Add("Metadata", "true")
// Call managed services for Azure resources token endpoint
resp, err := httpClient.Do(req)
if err != nil {
return "", fmt.Errorf("calling token endpoint : %v", err)
}
// Pull out response body
responseBytes, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
return "", fmt.Errorf("reading response body : %v", err)
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return "", fmt.Errorf("azure ActiveDirectory AT resp: %v, %v", resp.Status, string(responseBytes))
}
// Unmarshall response body into struct
var r azureADDResponseJson
err = json.Unmarshal(responseBytes, &r)
if err != nil {
return "", fmt.Errorf("unmarshalling the response: %v", err)
}
return r.AccessToken, nil
}
// GetLoginDetailsForAzurCR return user name + password to use
func GetLoginDetailsForAzurCR(imageTag string) (string, string, error) {
// imageTag := "atest1.azurecr.io/go-inf:1"
imageTagSlices := strings.Split(imageTag, "/")
azureIdensAT, err := getAzureAADAccessToken()
if err != nil {
return "", "", err
}
atMap := make(map[string]interface{})
azureIdensATSlices := strings.Split(azureIdensAT, ".")
if len(azureIdensATSlices) < 2 {
return "", "", fmt.Errorf("len(azureIdensATSlices) < 2")
}
resB, err := base64.RawStdEncoding.DecodeString(azureIdensATSlices[1])
if err != nil {
return "", "", fmt.Errorf("in GetLoginDetailsForAzurCR, failed to DecodeString: %v, %s", err, azureIdensATSlices[1])
}
if err := json.Unmarshal(resB, &atMap); err != nil {
return "", "", fmt.Errorf("failed to unmarshal azureIdensAT: %v, %s", err, string(resB))
}
// excahnging AAD for ACR refresh token
refreshToken, err := excahngeAzureAADAccessTokenForACRRefreshToken(imageTagSlices[0], fmt.Sprintf("%v", atMap["tid"]), azureIdensAT)
if err != nil {
return "", "", fmt.Errorf("failed to excahngeAzureAADAccessTokenForACRRefreshToken: %v, registry: %s, tenantID: %s, azureAADAT: %s", err, imageTagSlices[0], fmt.Sprintf("%v", atMap["tid"]), azureIdensAT)
}
return "00000000-0000-0000-0000-000000000000", refreshToken, nil
}
func excahngeAzureAADAccessTokenForACRRefreshToken(registry, tenantID, azureAADAT string) (string, error) {
msi_parameters := url.Values{}
msi_parameters.Add("service", registry)
msi_parameters.Add("grant_type", "access_token")
msi_parameters.Add("tenant", tenantID)
msi_parameters.Add("access_token", azureAADAT)
postBodyStr := msi_parameters.Encode()
req, err := http.NewRequest("POST", fmt.Sprintf("https://%v/oauth2/exchange", registry), strings.NewReader(postBodyStr))
if err != nil {
return "", fmt.Errorf("creating HTTP request : %v", err)
}
req.Header.Add("Metadata", "true")
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
// Call managed services for Azure resources token endpoint
resp, err := httpClient.Do(req)
if err != nil {
return "", fmt.Errorf("calling token endpoint : %v", err)
}
// Pull out response body
responseBytes, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
return "", fmt.Errorf("reading response body : %v", err)
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return "", fmt.Errorf("azure exchange AT resp: %v, %v", resp.Status, string(responseBytes))
}
resultMap := make(map[string]string)
err = json.Unmarshal(responseBytes, &resultMap)
if err != nil {
return "", fmt.Errorf("unmarshalling the response: %v", err)
}
return resultMap["refresh_token"], nil
}
func CheckIsGCRImage(imageTag string) bool {
// gcr.io/elated-pottery-310110/golang-inf:2
return strings.Contains(imageTag, "gcr.io/")
}
// GetLoginDetailsForGCR return user name + password to use
func GetLoginDetailsForGCR(imageTag string) (string, string, error) {
msi_endpoint, err := url.Parse(fmt.Sprintf("http://169.254.169.254/computeMetadata/v1/instance/service-accounts/%s/token", gcrDefaultServiceAccountName))
if err != nil {
return "", "", fmt.Errorf("creating URL : %v", err)
}
req, err := http.NewRequest("GET", msi_endpoint.String(), nil)
if err != nil {
return "", "", fmt.Errorf("creating HTTP request : %v", err)
}
req.Header.Add("Metadata-Flavor", "Google")
// Call managed services for Azure resources token endpoint
resp, err := httpClient.Do(req)
if err != nil {
return "", "", fmt.Errorf("calling token endpoint : %v", err)
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return "", "", fmt.Errorf("HTTP Status : %v, make sure the '%s' service account is configured for ARMO pod", resp.Status, gcrDefaultServiceAccountName)
}
defer resp.Body.Close()
respMap := make(map[string]interface{})
if err := json.NewDecoder(resp.Body).Decode(&respMap); err != nil {
return "", "", fmt.Errorf("json Decode : %v", err)
}
return "oauth2accesstoken", fmt.Sprintf("%v", respMap["access_token"]), nil
}
func GetCloudVendorRegistryCredentials(imageTag string) (map[string]types.AuthConfig, error) {
secrets := map[string]types.AuthConfig{}
var errRes error
if CheckIsACRImage(imageTag) {
userName, password, err := GetLoginDetailsForAzurCR(imageTag)
if err != nil {
errRes = fmt.Errorf("failed to GetLoginDetailsForACR(%s): %v", imageTag, err)
} else {
secrets[imageTag] = types.AuthConfig{
Username: userName,
Password: password,
}
}
}
if CheckIsECRImage(imageTag) {
userName, password, err := GetLoginDetailsForECR(imageTag)
if err != nil {
errRes = fmt.Errorf("failed to GetLoginDetailsForECR(%s): %v", imageTag, err)
} else {
secrets[imageTag] = types.AuthConfig{
Username: userName,
Password: password,
}
}
}
if CheckIsGCRImage(imageTag) {
userName, password, err := GetLoginDetailsForGCR(imageTag)
if err != nil {
errRes = fmt.Errorf("failed to GetLoginDetailsForGCR(%s): %v", imageTag, err)
} else {
secrets[imageTag] = types.AuthConfig{
Username: userName,
Password: password,
}
}
}
return secrets, errRes
}

View File

@@ -1,75 +0,0 @@
package k8sinterface
import (
"context"
"fmt"
"os"
"strings"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
// DO NOT REMOVE - load cloud providers auth
_ "k8s.io/client-go/plugin/pkg/client/auth"
"sigs.k8s.io/controller-runtime/pkg/client/config"
)
// K8SConfig pointer to k8s config
var K8SConfig *restclient.Config
// KubernetesApi -
type KubernetesApi struct {
KubernetesClient kubernetes.Interface
DynamicClient dynamic.Interface
Context context.Context
}
// NewKubernetesApi -
func NewKubernetesApi() *KubernetesApi {
kubernetesClient, err := kubernetes.NewForConfig(GetK8sConfig())
if err != nil {
fmt.Printf("Failed to load config file, reason: %s", err.Error())
os.Exit(1)
}
dynamicClient, err := dynamic.NewForConfig(K8SConfig)
if err != nil {
fmt.Printf("Failed to load config file, reason: %s", err.Error())
os.Exit(1)
}
return &KubernetesApi{
KubernetesClient: kubernetesClient,
DynamicClient: dynamicClient,
Context: context.Background(),
}
}
// RunningIncluster whether running in cluster
var RunningIncluster bool
// LoadK8sConfig load config from local file or from cluster
func LoadK8sConfig() error {
kubeconfig, err := config.GetConfig()
if err != nil {
return fmt.Errorf("failed to load kubernetes config: %s\n", strings.ReplaceAll(err.Error(), "KUBERNETES_MASTER", "KUBECONFIG"))
}
if _, err := restclient.InClusterConfig(); err == nil {
RunningIncluster = true
}
K8SConfig = kubeconfig
return nil
}
// GetK8sConfig get config. load if not loaded yet
func GetK8sConfig() *restclient.Config {
if K8SConfig == nil {
if err := LoadK8sConfig(); err != nil {
// print error
fmt.Printf("%s", err.Error())
os.Exit(1)
}
}
return K8SConfig
}

View File

@@ -1,34 +0,0 @@
package k8sinterface
import (
"testing"
"github.com/armosec/kubescape/cautils/cautils"
)
func TestGetGroupVersionResource(t *testing.T) {
wlid := "wlid://cluster-david-v1/namespace-default/deployment-nginx-deployment"
r, err := GetGroupVersionResource(cautils.GetKindFromWlid(wlid))
if err != nil {
t.Error(err)
return
}
if r.Group != "apps" {
t.Errorf("wrong group")
}
if r.Version != "v1" {
t.Errorf("wrong Version")
}
if r.Resource != "deployments" {
t.Errorf("wrong Resource")
}
r2, err := GetGroupVersionResource("NetworkPolicy")
if err != nil {
t.Error(err)
return
}
if r2.Resource != "networkpolicies" {
t.Errorf("wrong Resource")
}
}

View File

@@ -1,145 +0,0 @@
package k8sinterface
import (
"fmt"
"strings"
"github.com/armosec/kubescape/cautils/cautils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
//
// Uncomment to load all auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth
//
// Or uncomment to load specific auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth/azure"
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
// _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
)
func (k8sAPI *KubernetesApi) GetWorkloadByWlid(wlid string) (*Workload, error) {
return k8sAPI.GetWorkload(cautils.GetNamespaceFromWlid(wlid), cautils.GetKindFromWlid(wlid), cautils.GetNameFromWlid(wlid))
}
func (k8sAPI *KubernetesApi) GetWorkload(namespace, kind, name string) (*Workload, error) {
groupVersionResource, err := GetGroupVersionResource(kind)
if err != nil {
return nil, err
}
w, err := k8sAPI.ResourceInterface(&groupVersionResource, namespace).Get(k8sAPI.Context, name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to GET resource, kind: '%s', namespace: '%s', name: '%s', reason: %s", kind, namespace, name, err.Error())
}
return NewWorkloadObj(w.Object), nil
}
func (k8sAPI *KubernetesApi) ListWorkloads(groupVersionResource *schema.GroupVersionResource, namespace string, podLabels, fieldSelector map[string]string) ([]Workload, error) {
listOptions := metav1.ListOptions{}
if podLabels != nil && len(podLabels) > 0 {
set := labels.Set(podLabels)
listOptions.LabelSelector = SelectorToString(set)
}
if fieldSelector != nil && len(fieldSelector) > 0 {
set := labels.Set(fieldSelector)
listOptions.FieldSelector = SelectorToString(set)
}
uList, err := k8sAPI.ResourceInterface(groupVersionResource, namespace).List(k8sAPI.Context, listOptions)
if err != nil {
return nil, fmt.Errorf("failed to LIST resources, reason: %s", err.Error())
}
workloads := make([]Workload, len(uList.Items))
for i := range uList.Items {
workloads[i] = *NewWorkloadObj(uList.Items[i].Object)
}
return workloads, nil
}
func (k8sAPI *KubernetesApi) DeleteWorkloadByWlid(wlid string) error {
groupVersionResource, err := GetGroupVersionResource(cautils.GetKindFromWlid(wlid))
if err != nil {
return err
}
err = k8sAPI.ResourceInterface(&groupVersionResource, cautils.GetNamespaceFromWlid(wlid)).Delete(k8sAPI.Context, cautils.GetNameFromWlid(wlid), metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to DELETE resource, workloadID: '%s', reason: %s", wlid, err.Error())
}
return nil
}
func (k8sAPI *KubernetesApi) CreateWorkload(workload *Workload) (*Workload, error) {
groupVersionResource, err := GetGroupVersionResource(workload.GetKind())
if err != nil {
return nil, err
}
obj, err := workload.ToUnstructured()
if err != nil {
return nil, err
}
w, err := k8sAPI.ResourceInterface(&groupVersionResource, workload.GetNamespace()).Create(k8sAPI.Context, obj, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to CREATE resource, workload: '%s', reason: %s", workload.Json(), err.Error())
}
return NewWorkloadObj(w.Object), nil
}
func (k8sAPI *KubernetesApi) UpdateWorkload(workload *Workload) (*Workload, error) {
groupVersionResource, err := GetGroupVersionResource(workload.GetKind())
if err != nil {
return nil, err
}
obj, err := workload.ToUnstructured()
if err != nil {
return nil, err
}
w, err := k8sAPI.ResourceInterface(&groupVersionResource, workload.GetNamespace()).Update(k8sAPI.Context, obj, metav1.UpdateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to UPDATE resource, workload: '%s', reason: %s", workload.Json(), err.Error())
}
return NewWorkloadObj(w.Object), nil
}
func (k8sAPI *KubernetesApi) GetNamespace(ns string) (*Workload, error) {
groupVersionResource, err := GetGroupVersionResource("namespace")
if err != nil {
return nil, err
}
w, err := k8sAPI.DynamicClient.Resource(groupVersionResource).Get(k8sAPI.Context, ns, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get namespace: '%s', reason: %s", ns, err.Error())
}
return NewWorkloadObj(w.Object), nil
}
func (k8sAPI *KubernetesApi) ResourceInterface(resource *schema.GroupVersionResource, namespace string) dynamic.ResourceInterface {
if IsNamespaceScope(resource.Group, resource.Resource) {
return k8sAPI.DynamicClient.Resource(*resource).Namespace(namespace)
}
return k8sAPI.DynamicClient.Resource(*resource)
}
func (k8sAPI *KubernetesApi) CalculateWorkloadParentRecursive(workload *Workload) (string, string, error) {
ownerReferences, err := workload.GetOwnerReferences() // OwnerReferences in workload
if err != nil {
return workload.GetKind(), workload.GetName(), err
}
if len(ownerReferences) == 0 {
return workload.GetKind(), workload.GetName(), nil // parent found
}
ownerReference := ownerReferences[0]
parentWorkload, err := k8sAPI.GetWorkload(workload.GetNamespace(), ownerReference.Kind, ownerReference.Name)
if err != nil {
if strings.Contains(err.Error(), "not found in resourceMap") { // if parent is RCD
return workload.GetKind(), workload.GetName(), nil // parent found
}
return workload.GetKind(), workload.GetName(), err
}
return k8sAPI.CalculateWorkloadParentRecursive(parentWorkload)
}

View File

@@ -1,43 +0,0 @@
package k8sinterface
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
dynamicfake "k8s.io/client-go/dynamic/fake"
kubernetesfake "k8s.io/client-go/kubernetes/fake"
//
// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// Uncomment to load all auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth
//
// Or uncomment to load specific auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth/azure"
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
// _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
)
// NewKubernetesApi -
func NewKubernetesApiMock() *KubernetesApi {
return &KubernetesApi{
KubernetesClient: kubernetesfake.NewSimpleClientset(),
DynamicClient: dynamicfake.NewSimpleDynamicClient(&runtime.Scheme{}),
Context: context.Background(),
}
}
// func TestListDynamic(t *testing.T) {
// k8s := NewKubernetesApi()
// resource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
// clientResource, err := k8s.DynamicClient.Resource(resource).Namespace("default").List(k8s.Context, metav1.ListOptions{})
// if err != nil {
// t.Errorf("err: %v", err)
// } else {
// bla, _ := json.Marshal(clientResource)
// // t.Errorf("BearerToken: %v", *K8SConfig)
// // ioutil.WriteFile("bla.json", bla, 777)
// t.Errorf("clientResource: %s", string(bla))
// }
// }

View File

@@ -1,66 +0,0 @@
package k8sinterface
import (
"sort"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
)
//
// Uncomment to load all auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth
//
// Or uncomment to load specific auth plugins
// _ "k8s.io/client-go/plugin/pkg/client/auth/azure"
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
// _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
func ConvertUnstructuredSliceToMap(unstructuredSlice []unstructured.Unstructured) []map[string]interface{} {
converted := make([]map[string]interface{}, len(unstructuredSlice))
for i := range unstructuredSlice {
converted[i] = unstructuredSlice[i].Object
}
return converted
}
func FilterOutOwneredResources(result []unstructured.Unstructured) []unstructured.Unstructured {
response := []unstructured.Unstructured{}
recognizedOwners := []string{"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job", "CronJob"}
for i := range result {
ownerReferences := result[i].GetOwnerReferences()
if len(ownerReferences) == 0 {
response = append(response, result[i])
} else if !IsStringInSlice(recognizedOwners, ownerReferences[0].Kind) {
response = append(response, result[i])
}
}
return response
}
func IsStringInSlice(slice []string, val string) bool {
for _, item := range slice {
if item == val {
return true
}
}
return false
}
// String returns all labels listed as a human readable string.
// Conveniently, exactly the format that ParseSelector takes.
func SelectorToString(ls labels.Set) string {
selector := make([]string, 0, len(ls))
for key, value := range ls {
if value != "" {
selector = append(selector, key+"="+value)
} else {
selector = append(selector, key)
}
}
// Sort for determinism.
sort.StringSlice(selector).Sort()
return strings.Join(selector, ",")
}

View File

@@ -1,10 +0,0 @@
package k8sinterface
import "testing"
func TestConvertUnstructuredSliceToMap(t *testing.T) {
converted := ConvertUnstructuredSliceToMap(V1KubeSystemNamespaceMock().Items)
if len(converted) == 0 { // != 7
t.Errorf("len(converted) == 0")
}
}

View File

@@ -1,71 +0,0 @@
package k8sinterface
import (
"context"
"github.com/armosec/kubescape/cautils/cautils"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
func IsAttached(labels map[string]string) *bool {
return IsLabel(labels, cautils.ArmoAttach)
}
func IsAgentCompatibleLabel(labels map[string]string) *bool {
return IsLabel(labels, cautils.ArmoCompatibleLabel)
}
func IsAgentCompatibleAnnotation(annotations map[string]string) *bool {
return IsLabel(annotations, cautils.ArmoCompatibleAnnotation)
}
func SetAgentCompatibleLabel(labels map[string]string, val bool) {
SetLabel(labels, cautils.ArmoCompatibleLabel, val)
}
func SetAgentCompatibleAnnotation(annotations map[string]string, val bool) {
SetLabel(annotations, cautils.ArmoCompatibleAnnotation, val)
}
func IsLabel(labels map[string]string, key string) *bool {
if labels == nil || len(labels) == 0 {
return nil
}
var k bool
if l, ok := labels[key]; ok {
if l == "true" {
k = true
} else if l == "false" {
k = false
}
return &k
}
return nil
}
func SetLabel(labels map[string]string, key string, val bool) {
if labels == nil {
return
}
v := ""
if val {
v = "true"
} else {
v = "false"
}
labels[key] = v
}
func (k8sAPI *KubernetesApi) ListAttachedPods(namespace string) ([]corev1.Pod, error) {
return k8sAPI.ListPods(namespace, map[string]string{cautils.ArmoAttach: cautils.BoolToString(true)})
}
func (k8sAPI *KubernetesApi) ListPods(namespace string, podLabels map[string]string) ([]corev1.Pod, error) {
listOptions := metav1.ListOptions{}
if podLabels != nil && len(podLabels) > 0 {
set := labels.Set(podLabels)
listOptions.LabelSelector = set.AsSelector().String()
}
pods, err := k8sAPI.KubernetesClient.CoreV1().Pods(namespace).List(context.Background(), listOptions)
if err != nil {
return []corev1.Pod{}, err
}
return pods.Items, nil
}

File diff suppressed because one or more lines are too long

View File

@@ -1,142 +0,0 @@
package k8sinterface
import (
"fmt"
"strings"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const ValueNotFound = -1
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#-strong-api-groups-strong-
var ResourceGroupMapping = map[string]string{
"services": "/v1",
"pods": "/v1",
"replicationcontrollers": "/v1",
"podtemplates": "/v1",
"namespaces": "/v1",
"nodes": "/v1",
"configmaps": "/v1",
"secrets": "/v1",
"serviceaccounts": "/v1",
"persistentvolumeclaims": "/v1",
"limitranges": "/v1",
"resourcequotas": "/v1",
"daemonsets": "apps/v1",
"deployments": "apps/v1",
"replicasets": "apps/v1",
"statefulsets": "apps/v1",
"controllerrevisions": "apps/v1",
"jobs": "batch/v1",
"cronjobs": "batch/v1beta1",
"horizontalpodautoscalers": "autoscaling/v1",
"ingresses": "extensions/v1beta1",
"networkpolicies": "networking.k8s.io/v1",
"clusterroles": "rbac.authorization.k8s.io/v1",
"clusterrolebindings": "rbac.authorization.k8s.io/v1",
"roles": "rbac.authorization.k8s.io/v1",
"rolebindings": "rbac.authorization.k8s.io/v1",
"mutatingwebhookconfigurations": "admissionregistration.k8s.io/v1",
"validatingwebhookconfigurations": "admissionregistration.k8s.io/v1",
}
var GroupsClusterScope = []string{}
var ResourceClusterScope = []string{"nodes", "namespaces", "clusterroles", "clusterrolebindings"}
func GetGroupVersionResource(resource string) (schema.GroupVersionResource, error) {
resource = updateResourceKind(resource)
if r, ok := ResourceGroupMapping[resource]; ok {
gv := strings.Split(r, "/")
return schema.GroupVersionResource{Group: gv[0], Version: gv[1], Resource: resource}, nil
}
return schema.GroupVersionResource{}, fmt.Errorf("resource '%s' not found in resourceMap", resource)
}
func IsNamespaceScope(apiGroup, resource string) bool {
return StringInSlice(GroupsClusterScope, apiGroup) == ValueNotFound &&
StringInSlice(ResourceClusterScope, resource) == ValueNotFound
}
func StringInSlice(strSlice []string, str string) int {
for i := range strSlice {
if strSlice[i] == str {
return i
}
}
return ValueNotFound
}
func JoinResourceTriplets(group, version, resource string) string {
return fmt.Sprintf("%s/%s/%s", group, version, resource)
}
func GetResourceTriplets(group, version, resource string) []string {
resourceTriplets := []string{}
if resource == "" {
// load full map
for k, v := range ResourceGroupMapping {
g := strings.Split(v, "/")
resourceTriplets = append(resourceTriplets, JoinResourceTriplets(g[0], g[1], k))
}
} else if version == "" {
// load by resource
if v, ok := ResourceGroupMapping[resource]; ok {
g := strings.Split(v, "/")
if group == "" {
group = g[0]
}
resourceTriplets = append(resourceTriplets, JoinResourceTriplets(group, g[1], resource))
} else {
glog.Errorf("Resource '%s' unknown", resource)
}
} else if group == "" {
// load by resource and version
if v, ok := ResourceGroupMapping[resource]; ok {
g := strings.Split(v, "/")
resourceTriplets = append(resourceTriplets, JoinResourceTriplets(g[0], version, resource))
} else {
glog.Errorf("Resource '%s' unknown", resource)
}
} else {
resourceTriplets = append(resourceTriplets, JoinResourceTriplets(group, version, resource))
}
return resourceTriplets
}
func ResourceGroupToString(group, version, resource string) []string {
if group == "*" {
group = ""
}
if version == "*" {
version = ""
}
if resource == "*" {
resource = ""
}
resource = updateResourceKind(resource)
return GetResourceTriplets(group, version, resource)
}
func StringToResourceGroup(str string) (string, string, string) {
splitted := strings.Split(str, "/")
for i := range splitted {
if splitted[i] == "*" {
splitted[i] = ""
}
}
return splitted[0], splitted[1], splitted[2]
}
func updateResourceKind(resource string) string {
resource = strings.ToLower(resource)
if resource != "" && !strings.HasSuffix(resource, "s") {
if strings.HasSuffix(resource, "y") {
return fmt.Sprintf("%sies", strings.TrimSuffix(resource, "y")) // e.g. NetworkPolicy -> networkpolicies
} else {
return fmt.Sprintf("%ss", resource) // add 's' at the end of a resource
}
}
return resource
}

View File

@@ -1,22 +0,0 @@
package k8sinterface
import "testing"
func TestResourceGroupToString(t *testing.T) {
allResources := ResourceGroupToString("*", "*", "*")
if len(allResources) != len(ResourceGroupMapping) {
t.Errorf("Expected len: %d, received: %d", len(ResourceGroupMapping), len(allResources))
}
pod := ResourceGroupToString("*", "*", "Pod")
if len(pod) == 0 || pod[0] != "/v1/pods" {
t.Errorf("pod: %v", pod)
}
deployments := ResourceGroupToString("*", "*", "Deployment")
if len(deployments) == 0 || deployments[0] != "apps/v1/deployments" {
t.Errorf("deployments: %v", deployments)
}
cronjobs := ResourceGroupToString("*", "*", "cronjobs")
if len(cronjobs) == 0 || cronjobs[0] != "batch/v1beta1/cronjobs" {
t.Errorf("cronjobs: %v", cronjobs)
}
}

View File

@@ -1,161 +0,0 @@
package k8sinterface
import (
"encoding/json"
"github.com/armosec/kubescape/cautils/apis"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
type IWorkload interface {
IBasicWorkload
// Convert
ToUnstructured() (*unstructured.Unstructured, error)
ToString() string
Json() string // DEPRECATED
// GET
GetWlid() string
GetJobID() *apis.JobTracking
GetVersion() string
GetGroup() string
// SET
SetWlid(string)
SetInject()
SetIgnore()
SetUpdateTime()
SetJobID(apis.JobTracking)
SetCompatible()
SetIncompatible()
SetReplaceheaders()
// EXIST
IsIgnore() bool
IsInject() bool
IsAttached() bool
IsCompatible() bool
IsIncompatible() bool
// REMOVE
RemoveWlid()
RemoveSecretData()
RemoveInject()
RemoveIgnore()
RemoveUpdateTime()
RemoveJobID()
RemoveCompatible()
RemoveArmoMetadata()
RemoveArmoLabels()
RemoveArmoAnnotations()
}
type IBasicWorkload interface {
// Set
SetKind(string)
SetWorkload(map[string]interface{})
SetLabel(key, value string)
SetAnnotation(key, value string)
SetNamespace(string)
SetName(string)
// Get
GetNamespace() string
GetName() string
GetGenerateName() string
GetApiVersion() string
GetKind() string
GetInnerAnnotation(string) (string, bool)
GetPodAnnotation(string) (string, bool)
GetAnnotation(string) (string, bool)
GetLabel(string) (string, bool)
GetAnnotations() map[string]string
GetInnerAnnotations() map[string]string
GetPodAnnotations() map[string]string
GetLabels() map[string]string
GetInnerLabels() map[string]string
GetPodLabels() map[string]string
GetVolumes() ([]corev1.Volume, error)
GetReplicas() int
GetContainers() ([]corev1.Container, error)
GetInitContainers() ([]corev1.Container, error)
GetOwnerReferences() ([]metav1.OwnerReference, error)
GetImagePullSecret() ([]corev1.LocalObjectReference, error)
GetServiceAccountName() string
GetSelector() (*metav1.LabelSelector, error)
GetResourceVersion() string
GetUID() string
GetPodSpec() (*corev1.PodSpec, error)
GetWorkload() map[string]interface{}
// REMOVE
RemoveLabel(string)
RemoveAnnotation(string)
RemovePodStatus()
RemoveResourceVersion()
}
type Workload struct {
workload map[string]interface{}
}
func NewWorkload(bWorkload []byte) (*Workload, error) {
workload := make(map[string]interface{})
if bWorkload != nil {
if err := json.Unmarshal(bWorkload, &workload); err != nil {
return nil, err
}
}
return &Workload{
workload: workload,
}, nil
}
func NewWorkloadObj(workload map[string]interface{}) *Workload {
return &Workload{
workload: workload,
}
}
func (w *Workload) Json() string {
return w.ToString()
}
func (w *Workload) ToString() string {
if w.GetWorkload() == nil {
return ""
}
bWorkload, err := json.Marshal(w.GetWorkload())
if err != nil {
return err.Error()
}
return string(bWorkload)
}
func (workload *Workload) DeepCopy(w map[string]interface{}) {
workload.workload = make(map[string]interface{})
byt, _ := json.Marshal(w)
json.Unmarshal(byt, &workload.workload)
}
func (w *Workload) ToUnstructured() (*unstructured.Unstructured, error) {
obj := &unstructured.Unstructured{}
if w.workload == nil {
return obj, nil
}
bWorkload, err := json.Marshal(w.workload)
if err != nil {
return obj, err
}
if err := json.Unmarshal(bWorkload, obj); err != nil {
return obj, err
}
return obj, nil
}

View File

@@ -1,642 +0,0 @@
package k8sinterface
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"github.com/armosec/kubescape/cautils/apis"
"github.com/armosec/kubescape/cautils/cautils"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ======================================= DELETE ========================================
func (w *Workload) RemoveInject() {
w.RemovePodLabel(cautils.CAInject) // DEPRECATED
w.RemovePodLabel(cautils.CAAttachLabel) // DEPRECATED
w.RemovePodLabel(cautils.ArmoAttach)
w.RemoveLabel(cautils.CAInject) // DEPRECATED
w.RemoveLabel(cautils.CAAttachLabel) // DEPRECATED
w.RemoveLabel(cautils.ArmoAttach)
}
func (w *Workload) RemoveIgnore() {
w.RemovePodLabel(cautils.CAIgnore) // DEPRECATED
w.RemovePodLabel(cautils.ArmoAttach)
w.RemoveLabel(cautils.CAIgnore) // DEPRECATED
w.RemoveLabel(cautils.ArmoAttach)
}
func (w *Workload) RemoveWlid() {
w.RemovePodAnnotation(cautils.CAWlid) // DEPRECATED
w.RemovePodAnnotation(cautils.ArmoWlid)
w.RemoveAnnotation(cautils.CAWlid) // DEPRECATED
w.RemoveAnnotation(cautils.ArmoWlid)
}
func (w *Workload) RemoveCompatible() {
w.RemovePodAnnotation(cautils.ArmoCompatibleAnnotation)
}
func (w *Workload) RemoveJobID() {
w.RemovePodAnnotation(cautils.ArmoJobIDPath)
w.RemovePodAnnotation(cautils.ArmoJobParentPath)
w.RemovePodAnnotation(cautils.ArmoJobActionPath)
w.RemoveAnnotation(cautils.ArmoJobIDPath)
w.RemoveAnnotation(cautils.ArmoJobParentPath)
w.RemoveAnnotation(cautils.ArmoJobActionPath)
}
func (w *Workload) RemoveArmoMetadata() {
w.RemoveArmoLabels()
w.RemoveArmoAnnotations()
}
func (w *Workload) RemoveArmoAnnotations() {
l := w.GetAnnotations()
if l != nil {
for k := range l {
if strings.HasPrefix(k, cautils.ArmoPrefix) {
w.RemoveAnnotation(k)
}
if strings.HasPrefix(k, cautils.CAPrefix) { // DEPRECATED
w.RemoveAnnotation(k)
}
}
}
lp := w.GetPodAnnotations()
if lp != nil {
for k := range lp {
if strings.HasPrefix(k, cautils.ArmoPrefix) {
w.RemovePodAnnotation(k)
}
if strings.HasPrefix(k, cautils.CAPrefix) { // DEPRECATED
w.RemovePodAnnotation(k)
}
}
}
}
func (w *Workload) RemoveArmoLabels() {
l := w.GetLabels()
if l != nil {
for k := range l {
if strings.HasPrefix(k, cautils.ArmoPrefix) {
w.RemoveLabel(k)
}
if strings.HasPrefix(k, cautils.CAPrefix) { // DEPRECATED
w.RemoveLabel(k)
}
}
}
lp := w.GetPodLabels()
if lp != nil {
for k := range lp {
if strings.HasPrefix(k, cautils.ArmoPrefix) {
w.RemovePodLabel(k)
}
if strings.HasPrefix(k, cautils.CAPrefix) { // DEPRECATED
w.RemovePodLabel(k)
}
}
}
}
func (w *Workload) RemoveUpdateTime() {
// remove from pod
w.RemovePodAnnotation(cautils.CAUpdate) // DEPRECATED
w.RemovePodAnnotation(cautils.ArmoUpdate)
// remove from workload
w.RemoveAnnotation(cautils.CAUpdate) // DEPRECATED
w.RemoveAnnotation(cautils.ArmoUpdate)
}
func (w *Workload) RemoveSecretData() {
w.RemoveAnnotation("kubectl.kubernetes.io/last-applied-configuration")
delete(w.workload, "data")
}
func (w *Workload) RemovePodStatus() {
delete(w.workload, "status")
}
func (w *Workload) RemoveResourceVersion() {
if _, ok := w.workload["metadata"]; !ok {
return
}
meta, _ := w.workload["metadata"].(map[string]interface{})
delete(meta, "resourceVersion")
}
func (w *Workload) RemoveLabel(key string) {
w.RemoveMetadata([]string{"metadata"}, "labels", key)
}
func (w *Workload) RemoveAnnotation(key string) {
w.RemoveMetadata([]string{"metadata"}, "annotations", key)
}
func (w *Workload) RemovePodAnnotation(key string) {
w.RemoveMetadata(PodMetadata(w.GetKind()), "annotations", key)
}
func (w *Workload) RemovePodLabel(key string) {
w.RemoveMetadata(PodMetadata(w.GetKind()), "labels", key)
}
func (w *Workload) RemoveMetadata(scope []string, metadata, key string) {
workload := w.workload
for i := range scope {
if _, ok := workload[scope[i]]; !ok {
return
}
workload, _ = workload[scope[i]].(map[string]interface{})
}
if _, ok := workload[metadata]; !ok {
return
}
labels, _ := workload[metadata].(map[string]interface{})
delete(labels, key)
}
// ========================================= SET =========================================
func (w *Workload) SetWorkload(workload map[string]interface{}) {
w.workload = workload
}
func (w *Workload) SetKind(kind string) {
w.workload["kind"] = kind
}
func (w *Workload) SetInject() {
w.SetPodLabel(cautils.ArmoAttach, cautils.BoolToString(true))
}
func (w *Workload) SetJobID(jobTracking apis.JobTracking) {
w.SetPodAnnotation(cautils.ArmoJobIDPath, jobTracking.JobID)
w.SetPodAnnotation(cautils.ArmoJobParentPath, jobTracking.ParentID)
w.SetPodAnnotation(cautils.ArmoJobActionPath, fmt.Sprintf("%d", jobTracking.LastActionNumber))
}
func (w *Workload) SetIgnore() {
w.SetPodLabel(cautils.ArmoAttach, cautils.BoolToString(false))
}
func (w *Workload) SetCompatible() {
w.SetPodAnnotation(cautils.ArmoCompatibleAnnotation, cautils.BoolToString(true))
}
func (w *Workload) SetIncompatible() {
w.SetPodAnnotation(cautils.ArmoCompatibleAnnotation, cautils.BoolToString(false))
}
func (w *Workload) SetReplaceheaders() {
w.SetPodAnnotation(cautils.ArmoReplaceheaders, cautils.BoolToString(true))
}
func (w *Workload) SetWlid(wlid string) {
w.SetPodAnnotation(cautils.ArmoWlid, wlid)
}
func (w *Workload) SetUpdateTime() {
w.SetPodAnnotation(cautils.ArmoUpdate, string(time.Now().UTC().Format("02-01-2006 15:04:05")))
}
func (w *Workload) SetNamespace(namespace string) {
w.SetMetadata([]string{"metadata"}, "namespace", namespace)
}
func (w *Workload) SetName(name string) {
w.SetMetadata([]string{"metadata"}, "name", name)
}
func (w *Workload) SetLabel(key, value string) {
w.SetMetadata([]string{"metadata", "labels"}, key, value)
}
func (w *Workload) SetPodLabel(key, value string) {
w.SetMetadata(append(PodMetadata(w.GetKind()), "labels"), key, value)
}
func (w *Workload) SetAnnotation(key, value string) {
w.SetMetadata([]string{"metadata", "annotations"}, key, value)
}
func (w *Workload) SetPodAnnotation(key, value string) {
w.SetMetadata(append(PodMetadata(w.GetKind()), "annotations"), key, value)
}
func (w *Workload) SetMetadata(scope []string, key string, val interface{}) {
workload := w.workload
for i := range scope {
if _, ok := workload[scope[i]]; !ok {
workload[scope[i]] = make(map[string]interface{})
}
workload, _ = workload[scope[i]].(map[string]interface{})
}
workload[key] = val
}
// ========================================= GET =========================================
func (w *Workload) GetWorkload() map[string]interface{} {
return w.workload
}
func (w *Workload) GetNamespace() string {
if v, ok := InspectWorkload(w.workload, "metadata", "namespace"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetName() string {
if v, ok := InspectWorkload(w.workload, "metadata", "name"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetApiVersion() string {
if v, ok := InspectWorkload(w.workload, "apiVersion"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetVersion() string {
apiVersion := w.GetApiVersion()
splitted := strings.Split(apiVersion, "/")
if len(splitted) == 1 {
return splitted[0]
} else if len(splitted) == 2 {
return splitted[1]
}
return ""
}
func (w *Workload) GetGroup() string {
apiVersion := w.GetApiVersion()
splitted := strings.Split(apiVersion, "/")
if len(splitted) == 2 {
return splitted[0]
}
return ""
}
func (w *Workload) GetGenerateName() string {
if v, ok := InspectWorkload(w.workload, "metadata", "generateName"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetReplicas() int {
if v, ok := InspectWorkload(w.workload, "spec", "replicas"); ok {
replicas, isok := v.(float64)
if isok {
return int(replicas)
}
}
return 1
}
func (w *Workload) GetKind() string {
if v, ok := InspectWorkload(w.workload, "kind"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetSelector() (*metav1.LabelSelector, error) {
selector := &metav1.LabelSelector{}
if v, ok := InspectWorkload(w.workload, "spec", "selector", "matchLabels"); ok && v != nil {
b, err := json.Marshal(v)
if err != nil {
return selector, err
}
if err := json.Unmarshal(b, selector); err != nil {
return selector, err
}
return selector, nil
}
return selector, nil
}
func (w *Workload) GetAnnotation(annotation string) (string, bool) {
if v, ok := InspectWorkload(w.workload, "metadata", "annotations", annotation); ok {
return v.(string), ok
}
return "", false
}
func (w *Workload) GetLabel(label string) (string, bool) {
if v, ok := InspectWorkload(w.workload, "metadata", "labels", label); ok {
return v.(string), ok
}
return "", false
}
func (w *Workload) GetPodLabel(label string) (string, bool) {
if v, ok := InspectWorkload(w.workload, append(PodMetadata(w.GetKind()), "labels", label)...); ok && v != nil {
return v.(string), ok
}
return "", false
}
func (w *Workload) GetLabels() map[string]string {
if v, ok := InspectWorkload(w.workload, "metadata", "labels"); ok && v != nil {
labels := make(map[string]string)
for k, i := range v.(map[string]interface{}) {
labels[k] = i.(string)
}
return labels
}
return nil
}
// GetInnerLabels - DEPRECATED
func (w *Workload) GetInnerLabels() map[string]string {
return w.GetPodLabels()
}
func (w *Workload) GetPodLabels() map[string]string {
if v, ok := InspectWorkload(w.workload, append(PodMetadata(w.GetKind()), "labels")...); ok && v != nil {
labels := make(map[string]string)
for k, i := range v.(map[string]interface{}) {
labels[k] = i.(string)
}
return labels
}
return nil
}
// GetInnerAnnotations - DEPRECATED
func (w *Workload) GetInnerAnnotations() map[string]string {
return w.GetPodAnnotations()
}
// GetPodAnnotations
func (w *Workload) GetPodAnnotations() map[string]string {
if v, ok := InspectWorkload(w.workload, append(PodMetadata(w.GetKind()), "annotations")...); ok && v != nil {
annotations := make(map[string]string)
for k, i := range v.(map[string]interface{}) {
annotations[k] = fmt.Sprintf("%v", i)
}
return annotations
}
return nil
}
// GetInnerAnnotation DEPRECATED
func (w *Workload) GetInnerAnnotation(annotation string) (string, bool) {
return w.GetPodAnnotation(annotation)
}
func (w *Workload) GetPodAnnotation(annotation string) (string, bool) {
if v, ok := InspectWorkload(w.workload, append(PodMetadata(w.GetKind()), "annotations", annotation)...); ok && v != nil {
return v.(string), ok
}
return "", false
}
func (w *Workload) GetAnnotations() map[string]string {
if v, ok := InspectWorkload(w.workload, "metadata", "annotations"); ok && v != nil {
annotations := make(map[string]string)
for k, i := range v.(map[string]interface{}) {
annotations[k] = fmt.Sprintf("%v", i)
}
return annotations
}
return nil
}
// GetVolumes -
func (w *Workload) GetVolumes() ([]corev1.Volume, error) {
volumes := []corev1.Volume{}
interVolumes, _ := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "volumes")...)
if interVolumes == nil {
return volumes, nil
}
volumesBytes, err := json.Marshal(interVolumes)
if err != nil {
return volumes, err
}
err = json.Unmarshal(volumesBytes, &volumes)
return volumes, err
}
func (w *Workload) GetServiceAccountName() string {
if v, ok := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "serviceAccountName")...); ok && v != nil {
return v.(string)
}
return ""
}
func (w *Workload) GetPodSpec() (*corev1.PodSpec, error) {
podSpec := &corev1.PodSpec{}
podSepcRaw, _ := InspectWorkload(w.workload, PodSpec(w.GetKind())...)
if podSepcRaw == nil {
return podSpec, fmt.Errorf("no PodSpec for workload: %v", w)
}
b, err := json.Marshal(podSepcRaw)
if err != nil {
return podSpec, err
}
err = json.Unmarshal(b, podSpec)
return podSpec, err
}
func (w *Workload) GetImagePullSecret() ([]corev1.LocalObjectReference, error) {
imgPullSecrets := []corev1.LocalObjectReference{}
iImgPullSecrets, _ := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "imagePullSecrets")...)
b, err := json.Marshal(iImgPullSecrets)
if err != nil {
return imgPullSecrets, err
}
err = json.Unmarshal(b, &imgPullSecrets)
return imgPullSecrets, err
}
// GetContainers -
func (w *Workload) GetContainers() ([]corev1.Container, error) {
containers := []corev1.Container{}
interContainers, _ := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "containers")...)
if interContainers == nil {
return containers, nil
}
containersBytes, err := json.Marshal(interContainers)
if err != nil {
return containers, err
}
err = json.Unmarshal(containersBytes, &containers)
return containers, err
}
// GetInitContainers -
func (w *Workload) GetInitContainers() ([]corev1.Container, error) {
containers := []corev1.Container{}
interContainers, _ := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "initContainers")...)
if interContainers == nil {
return containers, nil
}
containersBytes, err := json.Marshal(interContainers)
if err != nil {
return containers, err
}
err = json.Unmarshal(containersBytes, &containers)
return containers, err
}
// GetOwnerReferences -
func (w *Workload) GetOwnerReferences() ([]metav1.OwnerReference, error) {
ownerReferences := []metav1.OwnerReference{}
interOwnerReferences, ok := InspectWorkload(w.workload, "metadata", "ownerReferences")
if !ok {
return ownerReferences, nil
}
ownerReferencesBytes, err := json.Marshal(interOwnerReferences)
if err != nil {
return ownerReferences, err
}
err = json.Unmarshal(ownerReferencesBytes, &ownerReferences)
if err != nil {
return ownerReferences, err
}
return ownerReferences, nil
}
func (w *Workload) GetResourceVersion() string {
if v, ok := InspectWorkload(w.workload, "metadata", "resourceVersion"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetUID() string {
if v, ok := InspectWorkload(w.workload, "metadata", "uid"); ok {
return v.(string)
}
return ""
}
func (w *Workload) GetWlid() string {
if wlid, ok := w.GetAnnotation(cautils.ArmoWlid); ok {
return wlid
}
return ""
}
func (w *Workload) GetJobID() *apis.JobTracking {
jobTracking := apis.JobTracking{}
if job, ok := w.GetPodAnnotation(cautils.ArmoJobIDPath); ok {
jobTracking.JobID = job
}
if parent, ok := w.GetPodAnnotation(cautils.ArmoJobParentPath); ok {
jobTracking.ParentID = parent
}
if action, ok := w.GetPodAnnotation(cautils.ArmoJobActionPath); ok {
if i, err := strconv.Atoi(action); err == nil {
jobTracking.LastActionNumber = i
}
}
if jobTracking.LastActionNumber == 0 { // start the counter at 1
jobTracking.LastActionNumber = 1
}
return &jobTracking
}
// func (w *Workload) GetJobID() string {
// if status, ok := w.GetAnnotation(cautils.ArmoJobID); ok {
// return status
// }
// return ""
// }
// ========================================= IS =========================================
func (w *Workload) IsInject() bool {
return w.IsAttached()
}
func (w *Workload) IsIgnore() bool {
if attach := cautils.IsAttached(w.GetPodLabels()); attach != nil {
return !(*attach)
}
if attach := cautils.IsAttached(w.GetLabels()); attach != nil {
return !(*attach)
}
return false
}
func (w *Workload) IsCompatible() bool {
if c, ok := w.GetPodAnnotation(cautils.ArmoCompatibleAnnotation); ok {
return cautils.StringToBool(c)
}
if c, ok := w.GetAnnotation(cautils.ArmoCompatibleAnnotation); ok {
return cautils.StringToBool(c)
}
return false
}
func (w *Workload) IsIncompatible() bool {
if c, ok := w.GetPodAnnotation(cautils.ArmoCompatibleAnnotation); ok {
return !cautils.StringToBool(c)
}
if c, ok := w.GetAnnotation(cautils.ArmoCompatibleAnnotation); ok {
return !cautils.StringToBool(c)
}
return false
}
func (w *Workload) IsAttached() bool {
if attach := cautils.IsAttached(w.GetPodLabels()); attach != nil {
return *attach
}
if attach := cautils.IsAttached(w.GetLabels()); attach != nil {
return *attach
}
return false
}
func (w *Workload) IsReplaceheaders() bool {
if c, ok := w.GetPodAnnotation(cautils.ArmoReplaceheaders); ok {
return cautils.StringToBool(c)
}
return false
}
// ======================================= UTILS =========================================
// InspectWorkload -
func InspectWorkload(workload interface{}, scopes ...string) (val interface{}, k bool) {
val, k = nil, false
if len(scopes) == 0 {
if workload != nil {
return workload, true
}
return nil, false
}
if data, ok := workload.(map[string]interface{}); ok {
val, k = InspectWorkload(data[scopes[0]], scopes[1:]...)
}
return val, k
}

View File

@@ -1,155 +0,0 @@
package k8sinterface
import (
"testing"
)
// ========================================= IS =========================================
func TestLabels(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"labels":{"app":"demoservice-server","cyberarmor.inject":"true"},"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
if workload.GetKind() != "Deployment" {
t.Errorf("wrong kind")
}
if workload.GetNamespace() != "default" {
t.Errorf("wrong namespace")
}
if workload.GetName() != "demoservice-server" {
t.Errorf("wrong name")
}
if !workload.IsInject() {
t.Errorf("expect to find inject label")
}
if workload.IsIgnore() {
t.Errorf("expect to find ignore label")
}
}
func TestSetNamespace(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"demoservice-server"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"demoservice-server"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
workload.SetNamespace("default")
if workload.GetNamespace() != "default" {
t.Errorf("wrong namespace")
}
}
func TestSetLabels(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
workload.SetLabel("bla", "daa")
v, ok := workload.GetLabel("bla")
if !ok || v != "daa" {
t.Errorf("expect to find label")
}
workload.RemoveLabel("bla")
v2, ok2 := workload.GetLabel("bla")
if ok2 || v2 == "daa" {
t.Errorf("label not deleted")
}
}
func TestSetAnnotations(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
workload.SetAnnotation("bla", "daa")
v, ok := workload.GetAnnotation("bla")
if !ok || v != "daa" {
t.Errorf("expect to find annotation")
}
workload.RemoveAnnotation("bla")
v2, ok2 := workload.GetAnnotation("bla")
if ok2 || v2 == "daa" {
t.Errorf("annotation not deleted")
}
}
func TestSetPodLabels(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
workload.SetPodLabel("bla", "daa")
v, ok := workload.GetPodLabel("bla")
if !ok || v != "daa" {
t.Errorf("expect to find label")
}
workload.RemovePodLabel("bla")
v2, ok2 := workload.GetPodLabel("bla")
if ok2 || v2 == "daa" {
t.Errorf("label not deleted")
}
}
func TestRemoveArmo(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server", "armo.attach": "true"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
if !workload.IsAttached() {
t.Errorf("expect to be attached")
}
workload.RemoveArmoMetadata()
if workload.IsAttached() {
t.Errorf("expect to be clear")
}
}
func TestSetWlid(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
workload.SetWlid("wlid://bla")
// t.Errorf(workload.Json())
}
func TestGetResourceVersion(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
if workload.GetResourceVersion() != "1016043" {
t.Errorf("wrong resourceVersion")
}
}
func TestGetUID(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
if workload.GetUID() != "e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e" {
t.Errorf("wrong UID")
}
}
func TestIsAttached(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"3"},"creationTimestamp":"2021-06-21T04:52:05Z","generation":3,"name":"emailservice","namespace":"default"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"emailservice"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"annotations":{"armo.last-update":"21-06-2021 06:40:42","armo.wlid":"wlid://cluster-david-demo/namespace-default/deployment-emailservice"},"creationTimestamp":null,"labels":{"app":"emailservice","armo.attach":"true"}},"spec":{"containers":[{"env":[{"name":"PORT","value":"8080"},{"name":"DISABLE_PROFILER","value":"1"}],"image":"gcr.io/google-samples/microservices-demo/emailservice:v0.2.3","imagePullPolicy":"IfNotPresent","livenessProbe":{"exec":{"command":["/bin/grpc_health_probe","-addr=:8080"]},"failureThreshold":3,"periodSeconds":5,"successThreshold":1,"timeoutSeconds":1},"name":"server","ports":[{"containerPort":8080,"protocol":"TCP"}],"readinessProbe":{"exec":{"command":["/bin/grpc_health_probe","-addr=:8080"]},"failureThreshold":3,"periodSeconds":5,"successThreshold":1,"timeoutSeconds":1},"resources":{"limits":{"cpu":"200m","memory":"128Mi"},"requests":{"cpu":"100m","memory":"64Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"serviceAccount":"default","serviceAccountName":"default","terminationGracePeriodSeconds":5}}}}`
workload, err := NewWorkload([]byte(w))
if err != nil {
t.Errorf(err.Error())
}
if !workload.IsAttached() {
t.Errorf("expected attached")
}
}

View File

@@ -1,23 +0,0 @@
package k8sinterface
func PodSpec(kind string) []string {
switch kind {
case "Pod", "Namespace":
return []string{"spec"}
case "CronJob":
return []string{"spec", "jobTemplate", "spec", "template", "spec"}
default:
return []string{"spec", "template", "spec"}
}
}
func PodMetadata(kind string) []string {
switch kind {
case "Pod", "Namespace", "Secret":
return []string{"metadata"}
case "CronJob":
return []string{"spec", "jobTemplate", "spec", "template", "metadata"}
default:
return []string{"spec", "template", "metadata"}
}
}

View File

@@ -0,0 +1,26 @@
package helpers
type StringObj struct {
key string
value string
}
type ErrorObj struct {
key string
value error
}
type IntObj struct {
key string
value int
}
type InterfaceObj struct {
key string
value interface{}
}
func Error(e error) *ErrorObj { return &ErrorObj{key: "error", value: e} }
func Int(k string, v int) *IntObj { return &IntObj{key: k, value: v} }
func String(k, v string) *StringObj { return &StringObj{key: k, value: v} }
func Interface(k string, v interface{}) *InterfaceObj { return &InterfaceObj{key: k, value: v} }

View File

@@ -0,0 +1,69 @@
package helpers
import (
"strings"
)
type Level int8
const (
UnknownLevel Level = iota - -1
DebugLevel
InfoLevel //default
SuccessLevel
WarningLevel
ErrorLevel
FatalLevel
_defaultLevel = InfoLevel
_minLevel = DebugLevel
_maxLevel = FatalLevel
)
func ToLevel(level string) Level {
switch strings.ToLower(level) {
case "debug":
return DebugLevel
case "info":
return InfoLevel
case "success":
return SuccessLevel
case "warning", "warn":
return WarningLevel
case "error":
return ErrorLevel
case "fatal":
return FatalLevel
default:
return UnknownLevel
}
}
func (l Level) String() string {
switch l {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case SuccessLevel:
return "success"
case WarningLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
}
return ""
}
func (l Level) Skip(l2 Level) bool {
return l < l2
}
func SupportedLevels() []string {
levels := []string{}
for i := _minLevel; i <= _maxLevel; i++ {
levels = append(levels, i.String())
}
return levels
}

View File

@@ -0,0 +1,62 @@
package helpers
type IDetails interface {
Key() string
Value() interface{}
}
// ======================================================================================
// ============================== String ================================================
// ======================================================================================
// Key
func (s *StringObj) Key() string {
return s.key
}
// Value
func (s *StringObj) Value() interface{} {
return s.value
}
// ======================================================================================
// =============================== Error ================================================
// ======================================================================================
// Key
func (s *ErrorObj) Key() string {
return s.key
}
// Value
func (s *ErrorObj) Value() interface{} {
return s.value
}
// ======================================================================================
// ================================= Int ================================================
// ======================================================================================
// Key
func (s *IntObj) Key() string {
return s.key
}
// Value
func (s *IntObj) Value() interface{} {
return s.value
}
// ======================================================================================
// =========================== Interface ================================================
// ======================================================================================
// Key
func (s *InterfaceObj) Key() string {
return s.key
}
// Value
func (s *InterfaceObj) Value() interface{} {
return s.value
}

41
cautils/logger/methods.go Normal file
View File

@@ -0,0 +1,41 @@
package logger
import (
"os"
"github.com/armosec/kubescape/cautils/logger/helpers"
"github.com/armosec/kubescape/cautils/logger/prettylogger"
)
type ILogger interface {
Fatal(msg string, details ...helpers.IDetails) // print log and exit 1
Error(msg string, details ...helpers.IDetails)
Success(msg string, details ...helpers.IDetails)
Warning(msg string, details ...helpers.IDetails)
Info(msg string, details ...helpers.IDetails)
Debug(msg string, details ...helpers.IDetails)
SetLevel(level string) error
GetLevel() string
SetWriter(w *os.File)
GetWriter() *os.File
}
var l ILogger
func L() ILogger {
if l == nil {
InitializeLogger()
}
return l
}
func InitializeLogger() {
initializeLogger()
}
func initializeLogger() {
// TODO - support zap logger
l = prettylogger.NewPrettyLogger()
}

View File

@@ -0,0 +1,31 @@
package prettylogger
import (
"io"
"github.com/armosec/kubescape/cautils/logger/helpers"
"github.com/fatih/color"
)
var prefixError = color.New(color.Bold, color.FgHiRed).FprintfFunc()
var prefixWarning = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
var prefixInfo = color.New(color.Bold, color.FgCyan).FprintfFunc()
var prefixSuccess = color.New(color.Bold, color.FgHiGreen).FprintfFunc()
var prefixDebug = color.New(color.Bold, color.FgWhite).FprintfFunc()
var message = color.New().FprintfFunc()
func prefix(l helpers.Level) func(w io.Writer, format string, a ...interface{}) {
switch l {
case helpers.DebugLevel:
return prefixDebug
case helpers.InfoLevel:
return prefixInfo
case helpers.SuccessLevel:
return prefixSuccess
case helpers.WarningLevel:
return prefixWarning
case helpers.ErrorLevel, helpers.FatalLevel:
return prefixError
}
return message
}

View File

@@ -0,0 +1,78 @@
package prettylogger
import (
"fmt"
"os"
"sync"
"github.com/armosec/kubescape/cautils/logger/helpers"
)
type PrettyLogger struct {
writer *os.File
level helpers.Level
mutex sync.Mutex
}
func NewPrettyLogger() *PrettyLogger {
return &PrettyLogger{
writer: os.Stderr, // default to stderr
level: helpers.InfoLevel,
mutex: sync.Mutex{},
}
}
func (pl *PrettyLogger) GetLevel() string { return pl.level.String() }
func (pl *PrettyLogger) SetWriter(w *os.File) { pl.writer = w }
func (pl *PrettyLogger) GetWriter() *os.File { return pl.writer }
func (pl *PrettyLogger) SetLevel(level string) error {
pl.level = helpers.ToLevel(level)
if pl.level == helpers.UnknownLevel {
return fmt.Errorf("level '%s' unknown", level)
}
return nil
}
func (pl *PrettyLogger) Fatal(msg string, details ...helpers.IDetails) {
pl.print(helpers.FatalLevel, msg, details...)
os.Exit(1)
}
func (pl *PrettyLogger) Error(msg string, details ...helpers.IDetails) {
pl.print(helpers.ErrorLevel, msg, details...)
}
func (pl *PrettyLogger) Warning(msg string, details ...helpers.IDetails) {
pl.print(helpers.WarningLevel, msg, details...)
}
func (pl *PrettyLogger) Info(msg string, details ...helpers.IDetails) {
pl.print(helpers.InfoLevel, msg, details...)
}
func (pl *PrettyLogger) Debug(msg string, details ...helpers.IDetails) {
pl.print(helpers.DebugLevel, msg, details...)
}
func (pl *PrettyLogger) Success(msg string, details ...helpers.IDetails) {
pl.print(helpers.SuccessLevel, msg, details...)
}
func (pl *PrettyLogger) print(level helpers.Level, msg string, details ...helpers.IDetails) {
if !level.Skip(pl.level) {
pl.mutex.Lock()
prefix(level)(pl.writer, "[%s] ", level.String())
if d := detailsToString(details); d != "" {
msg = fmt.Sprintf("%s. %s", msg, d)
}
message(pl.writer, fmt.Sprintf("%s\n", msg))
pl.mutex.Unlock()
}
}
func detailsToString(details []helpers.IDetails) string {
s := ""
for i := range details {
s += fmt.Sprintf("%s: %s", details[i].Key(), details[i].Value())
if i < len(details)-1 {
s += "; "
}
}
return s
}

View File

@@ -0,0 +1,58 @@
package zaplogger
import (
"os"
"github.com/armosec/kubescape/cautils/logger/helpers"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
type ZapLogger struct {
zapL *zap.Logger
}
func NewZapLogger() *ZapLogger {
return &ZapLogger{
zapL: zap.L(),
}
}
func (zl *ZapLogger) GetLevel() string { return "" }
func (zl *ZapLogger) SetWriter(w *os.File) {}
func GetWriter() *os.File { return nil }
func (zl *ZapLogger) SetLevel(level string) error {
return nil
}
func (zl *ZapLogger) Fatal(msg string, details ...helpers.IDetails) {
zl.zapL.Fatal(msg, detailsToZapFields(details)...)
}
func (zl *ZapLogger) Error(msg string, details ...helpers.IDetails) {
zl.zapL.Error(msg, detailsToZapFields(details)...)
}
func (zl *ZapLogger) Warning(msg string, details ...helpers.IDetails) {
zl.zapL.Warn(msg, detailsToZapFields(details)...)
}
func (zl *ZapLogger) Success(msg string, details ...helpers.IDetails) {
zl.zapL.Info(msg, detailsToZapFields(details)...)
}
func (zl *ZapLogger) Info(msg string, details ...helpers.IDetails) {
zl.zapL.Info(msg, detailsToZapFields(details)...)
}
func (zl *ZapLogger) Debug(msg string, details ...helpers.IDetails) {
zl.zapL.Debug(msg, detailsToZapFields(details)...)
}
func detailsToZapFields(details []helpers.IDetails) []zapcore.Field {
zapFields := []zapcore.Field{}
for i := range details {
zapFields = append(zapFields, zap.Any(details[i].Key(), details[i].Value()))
}
return zapFields
}

View File

@@ -1,7 +0,0 @@
package opapolicy
const (
PostureRestAPIPathV1 = "/v1/posture"
PostureRedisPrefix = "_postureReportv1"
K8sPostureNotification = "/k8srestapi/v1/newPostureReport"
)

View File

@@ -1,151 +0,0 @@
package opapolicy
import (
"time"
armotypes "github.com/armosec/kubescape/cautils/armotypes"
)
type AlertScore float32
type RuleLanguages string
const (
RegoLanguage RuleLanguages = "Rego"
RegoLanguage2 RuleLanguages = "rego"
)
// RegoResponse the expected response of single run of rego policy
type RuleResponse struct {
AlertMessage string `json:"alertMessage"`
PackageName string `json:"packagename"`
AlertScore AlertScore `json:"alertScore"`
// AlertObject AlertObject `json:"alertObject"`
AlertObject AlertObject `json:"alertObject"` // TODO - replace interface to AlertObject
Context []string `json:"context"` // TODO - Remove
Rulename string `json:"rulename"` // TODO - Remove
ExceptionName string `json:"exceptionName"`
}
type AlertObject struct {
K8SApiObjects []map[string]interface{} `json:"k8sApiObjects,omitempty"`
ExternalObjects map[string]interface{} `json:"externalObjects,omitempty"`
}
type FrameworkReport struct {
Name string `json:"name"`
ControlReports []ControlReport `json:"controlReports"`
}
type ControlReport struct {
Name string `json:"name"`
RuleReports []RuleReport `json:"ruleReports"`
Remediation string `json:"remediation"`
Description string `json:"description"`
}
type RuleReport struct {
Name string `json:"name"`
Remediation string `json:"remediation"`
RuleStatus RuleStatus `json:"ruleStatus"`
RuleResponses []RuleResponse `json:"ruleResponses"`
ListInputResources []map[string]interface{} `json:"-"`
ListInputKinds []string `json:"-"`
}
type RuleStatus struct {
Status string `json:"status"`
Message string `json:"message"`
}
// PostureReport
type PostureReport struct {
CustomerGUID string `json:"customerGUID"`
ClusterName string `json:"clusterName"`
ReportID string `json:"reportID"`
JobID string `json:"jobID"`
ReportGenerationTime time.Time `json:"generationTime"`
FrameworkReports []FrameworkReport `json:"frameworks"`
}
// RuleMatchObjects defines which objects this rule applied on
type RuleMatchObjects struct {
APIGroups []string `json:"apiGroups"` // apps
APIVersions []string `json:"apiVersions"` // v1/ v1beta1 / *
Resources []string `json:"resources"` // dep.., pods,
}
// RuleMatchObjects defines which objects this rule applied on
type RuleDependency struct {
PackageName string `json:"packageName"` // package name
}
// PolicyRule represents single rule, the fundamental executable block of policy
type PolicyRule struct {
armotypes.PortalBase `json:",inline"`
CreationTime string `json:"creationTime"`
Rule string `json:"rule"` // multiline string!
RuleLanguage RuleLanguages `json:"ruleLanguage"`
Match []RuleMatchObjects `json:"match"`
RuleDependencies []RuleDependency `json:"ruleDependencies"`
Description string `json:"description"`
Remediation string `json:"remediation"`
RuleQuery string `json:"ruleQuery"` // default "armo_builtins" - DEPRECATED
}
// Control represents a collection of rules which are combined together to single purpose
type Control struct {
armotypes.PortalBase `json:",inline"`
CreationTime string `json:"creationTime"`
Description string `json:"description"`
Remediation string `json:"remediation"`
Rules []PolicyRule `json:"rules"`
// for new list of rules in POST/UPADTE requests
RulesIDs *[]string `json:"rulesIDs,omitempty"`
}
type UpdatedControl struct {
Control `json:",inline"`
Rules []interface{} `json:"rules"`
}
// Framework represents a collection of controls which are combined together to expose comprehensive behavior
type Framework struct {
armotypes.PortalBase `json:",inline"`
CreationTime string `json:"creationTime"`
Description string `json:"description"`
Controls []Control `json:"controls"`
// for new list of controls in POST/UPADTE requests
ControlsIDs *[]string `json:"controlsIDs,omitempty"`
}
type UpdatedFramework struct {
Framework `json:",inline"`
Controls []interface{} `json:"controls"`
}
type NotificationPolicyType string
type NotificationPolicyKind string
// Supported NotificationTypes
const (
TypeValidateRules NotificationPolicyType = "validateRules"
TypeExecPostureScan NotificationPolicyType = "execPostureScan"
TypeUpdateRules NotificationPolicyType = "updateRules"
)
// Supported NotificationKinds
const (
KindFramework NotificationPolicyKind = "Framework"
KindControl NotificationPolicyKind = "Control"
KindRule NotificationPolicyKind = "Rule"
)
type PolicyNotification struct {
NotificationType NotificationPolicyType `json:"notificationType"`
Rules []PolicyIdentifier `json:"rules"`
ReportID string `json:"reportID"`
JobID string `json:"jobID"`
Designators armotypes.PortalDesignator `json:"designators"`
}
type PolicyIdentifier struct {
Kind NotificationPolicyKind `json:"kind"`
Name string `json:"name"`
}

View File

@@ -1,300 +0,0 @@
package opapolicy
import (
"time"
armotypes "github.com/armosec/kubescape/cautils/armotypes"
)
// Mock A
var (
AMockCustomerGUID = "5d817063-096f-4d91-b39b-8665240080af"
AMockJobID = "36b6f9e1-3b63-4628-994d-cbe16f81e9c7"
AMockReportID = "2c31e4da-c6fe-440d-9b8a-785b80c8576a"
AMockClusterName = "clusterA"
AMockFrameworkName = "testFrameworkA"
AMockControlName = "testControlA"
AMockRuleName = "testRuleA"
AMockPortalBase = *armotypes.MockPortalBase(AMockCustomerGUID, "", nil)
)
func MockRuleResponseA() *RuleResponse {
return &RuleResponse{
AlertMessage: "test alert message A",
AlertScore: 0,
Rulename: AMockRuleName,
PackageName: "test.package.name.A",
Context: []string{},
}
}
func MockFrameworkReportA() *FrameworkReport {
return &FrameworkReport{
Name: AMockFrameworkName,
ControlReports: []ControlReport{
{
Name: AMockControlName,
RuleReports: []RuleReport{
{
Name: AMockRuleName,
Remediation: "remove privilegedContainer: True flag from your pod spec",
RuleResponses: []RuleResponse{
*MockRuleResponseA(),
},
},
},
},
},
}
}
func MockPostureReportA() *PostureReport {
return &PostureReport{
CustomerGUID: AMockCustomerGUID,
ClusterName: AMockClusterName,
ReportID: AMockReportID,
JobID: AMockJobID,
ReportGenerationTime: time.Now().UTC(),
FrameworkReports: []FrameworkReport{*MockFrameworkReportA()},
}
}
func MockFrameworkA() *Framework {
return &Framework{
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-096f-4d91-b39b-8665240080af", AMockFrameworkName, nil),
CreationTime: "",
Description: "mock framework descryption",
Controls: []Control{
{
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-aaaa-4d91-b39b-8665240080af", AMockControlName, nil),
Rules: []PolicyRule{
*MockRuleA(),
},
},
},
}
}
func MockRuleUntrustedRegistries() *PolicyRule {
return &PolicyRule{
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-aaaa-aaaa-b39b-8665240080af", AMockControlName, nil),
Rule: `
package armo_builtins
# Check for images from blacklisted repos
untrusted_registries(z) = x {
x := ["015253967648.dkr.ecr.eu-central-1.amazonaws.com/"]
}
public_registries(z) = y{
y := ["quay.io/kiali/","quay.io/datawire/","quay.io/keycloak/","quay.io/bitnami/"]
}
untrustedImageRepo[msga] {
pod := input[_]
k := pod.kind
k == "Pod"
container := pod.spec.containers[_]
image := container.image
repo_prefix := untrusted_registries(image)[_]
startswith(image, repo_prefix)
selfLink := pod.metadata.selfLink
containerName := container.name
msga := {
"alertMessage": sprintf("image '%v' in container '%s' in [%s] comes from untrusted registry", [image, containerName, selfLink]),
"alert": true,
"prevent": false,
"alertScore": 2,
"alertObject": [{"pod":pod}]
}
}
untrustedImageRepo[msga] {
pod := input[_]
k := pod.kind
k == "Pod"
container := pod.spec.containers[_]
image := container.image
repo_prefix := public_registries(image)[_]
startswith(pod, repo_prefix)
selfLink := input.metadata.selfLink
containerName := container.name
msga := {
"alertMessage": sprintf("image '%v' in container '%s' in [%s] comes from public registry", [image, containerName, selfLink]),
"alert": true,
"prevent": false,
"alertScore": 1,
"alertObject": [{"pod":pod}]
}
}
`,
RuleLanguage: RegoLanguage,
Match: []RuleMatchObjects{
{
APIVersions: []string{"v1"},
APIGroups: []string{"*"},
Resources: []string{"pods"},
},
},
RuleDependencies: []RuleDependency{
{
PackageName: "kubernetes.api.client",
},
},
}
}
func MockRuleA() *PolicyRule {
return &PolicyRule{
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-aaaa-aaaa-b39b-8665240080af", AMockControlName, nil),
Rule: MockRegoPrivilegedPods(), //
RuleLanguage: RegoLanguage,
Match: []RuleMatchObjects{
{
APIVersions: []string{"v1"},
APIGroups: []string{"*"},
Resources: []string{"pods"},
},
},
RuleDependencies: []RuleDependency{
{
PackageName: "kubernetes.api.client",
},
},
}
}
func MockRuleB() *PolicyRule {
return &PolicyRule{
PortalBase: *armotypes.MockPortalBase("bbbbbbbb-aaaa-aaaa-b39b-8665240080af", AMockControlName, nil),
Rule: MockExternalFacingService(), //
RuleLanguage: RegoLanguage,
Match: []RuleMatchObjects{
{
APIVersions: []string{"v1"},
APIGroups: []string{""},
Resources: []string{"pods"},
},
},
RuleDependencies: []RuleDependency{
{
PackageName: "kubernetes.api.client",
},
},
}
}
func MockPolicyNotificationA() *PolicyNotification {
return &PolicyNotification{
NotificationType: TypeExecPostureScan,
ReportID: AMockReportID,
JobID: AMockJobID,
Designators: armotypes.PortalDesignator{},
Rules: []PolicyIdentifier{
{
Kind: KindFramework,
Name: AMockFrameworkName,
}},
}
}
func MockTemp() string {
return `
package armo_builtins
import data.kubernetes.api.client as client
deny[msga] {
#object := input[_]
object := client.query_all("pods")
obj := object.body.items[_]
msga := {
"packagename": "armo_builtins",
"alertMessage": "found object",
"alertScore": 3,
"alertObject": {"object": obj},
}
}
`
}
func MockRegoPrivilegedPods() string {
return `package armo_builtins
import data.kubernetes.api.client as client
# Deny mutating action unless user is in group owning the resource
#privileged pods
deny[msga] {
pod := input[_]
containers := pod.spec.containers[_]
containers.securityContext.privileged == true
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("the following pods are defined as privileged: %v", [pod]),
"alertScore": 3,
"alertObject": pod,
}
}
#handles majority of workload resources
deny[msga] {
wl := input[_]
spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"}
spec_template_spec_patterns[wl.kind]
containers := wl.spec.template.spec.containers[_]
containers.securityContext.privileged == true
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("the following workloads are defined as privileged: %v", [wl]),
"alertScore": 3,
"alertObject": wl,
}
}
#handles cronjob
deny[msga] {
wl := input[_]
wl.kind == "CronJob"
containers := wl.spec.jobTemplate.spec.template.spec.containers[_]
containers.securityContext.privileged == true
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("the following cronjobs are defined as privileged: %v", [wl]),
"alertScore": 3,
"alertObject": wl,
}
}
`
}
func MockExternalFacingService() string {
return "\n\tpackage armo_builtins\n\n\timport data.kubernetes.api.client as client\n\timport data.cautils as cautils\n\ndeny[msga] {\n\n\twl := input[_]\n\tcluster_resource := client.query_all(\n\t\t\"services\"\n\t)\n\n\tlabels := wl.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n \n#service := cluster_resource.body.items[i]\nservices := [svc | cluster_resource.body.items[i].metadata.namespace == wl.metadata.namespace; svc := cluster_resource.body.items[i]]\nservice := services[_]\nnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\nnp_or_lb[service.spec.type]\ncautils.is_subobject(service.spec.selector,filtered_labels)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v pod %v expose external facing service: %v\",[wl.metadata.namespace, wl.metadata.name, service.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"srvc\":service}\n\t}\n}\n\t"
}
func GetRuntimePods() string {
return `
package armo_builtins
import data.kubernetes.api.client as client
deny[msga] {
cluster_resource := client.query_all(
"pods"
)
pod := cluster_resource.body.items[i]
msga := {
"alertMessage": "got something",
"alertScore": 2,
"packagename": "armo_builtins",
"alertObject": {"pod": pod}
}
}
`
}

View File

@@ -1,42 +0,0 @@
package opapolicy
import (
"encoding/json"
"testing"
)
func TestMockPolicyNotificationA(t *testing.T) {
policy := MockPolicyNotificationA()
bp, err := json.Marshal(policy)
if err != nil {
t.Error(err)
} else {
t.Logf("%s\n", string(bp))
// t.Errorf("%s\n", string(bp))
}
}
func TestMockFrameworkA(t *testing.T) {
policy := MockFrameworkA()
bp, err := json.Marshal(policy)
if err != nil {
t.Error(err)
} else {
t.Logf("%s\n", string(bp))
// t.Errorf("%s\n", string(bp))
}
}
func TestMockPostureReportA(t *testing.T) {
policy := MockPostureReportA()
bp, err := json.Marshal(policy)
if err != nil {
t.Error(err)
} else {
// t.Errorf("%s\n", string(bp))
t.Logf("%s\n", string(bp))
}
}

View File

@@ -1,99 +0,0 @@
package opapolicy
import (
"bytes"
"encoding/json"
"fmt"
"github.com/golang/glog"
"github.com/open-policy-agent/opa/rego"
)
func (pn *PolicyNotification) ToJSONBytesBuffer() (*bytes.Buffer, error) {
res, err := json.Marshal(pn)
if err != nil {
return nil, err
}
return bytes.NewBuffer(res), err
}
func (ruleReport *RuleReport) GetRuleStatus() (string, []RuleResponse, []RuleResponse) {
if len(ruleReport.RuleResponses) == 0 {
return "success", nil, nil
}
exceptions := make([]RuleResponse, 0)
failed := make([]RuleResponse, 0)
for _, rule := range ruleReport.RuleResponses {
if rule.ExceptionName != "" {
failed = append(failed, rule)
} else {
exceptions = append(exceptions, rule)
}
}
status := "failed"
if len(failed) == 0 && len(exceptions) > 0 {
status = "warning"
}
return status, failed, exceptions
}
func ParseRegoResult(regoResult *rego.ResultSet) ([]RuleResponse, error) {
var errs error
ruleResponses := []RuleResponse{}
for _, result := range *regoResult {
for desicionIdx := range result.Expressions {
if resMap, ok := result.Expressions[desicionIdx].Value.(map[string]interface{}); ok {
for objName := range resMap {
jsonBytes, err := json.Marshal(resMap[objName])
if err != nil {
err = fmt.Errorf("in parseRegoResult, json.Marshal failed. name: %s, obj: %v, reason: %s", objName, resMap[objName], err)
glog.Error(err)
errs = fmt.Errorf("%s\n%s", errs, err)
continue
}
desObj := make([]RuleResponse, 0)
if err := json.Unmarshal(jsonBytes, &desObj); err != nil {
err = fmt.Errorf("in parseRegoResult, json.Unmarshal failed. name: %s, obj: %v, reason: %s", objName, resMap[objName], err)
glog.Error(err)
errs = fmt.Errorf("%s\n%s", errs, err)
continue
}
ruleResponses = append(ruleResponses, desObj...)
}
}
}
}
return ruleResponses, errs
}
func (controlReport *ControlReport) GetNumberOfResources() int {
sum := 0
for i := range controlReport.RuleReports {
if controlReport.RuleReports[i].ListInputResources == nil {
continue
}
sum += len(controlReport.RuleReports[i].ListInputResources)
}
return sum
}
func (controlReport *ControlReport) ListControlsInputKinds() []string {
listControlsInputKinds := []string{}
for i := range controlReport.RuleReports {
listControlsInputKinds = append(listControlsInputKinds, controlReport.RuleReports[i].ListInputKinds...)
}
return listControlsInputKinds
}
func (controlReport *ControlReport) Passed() bool {
for i := range controlReport.RuleReports {
if len(controlReport.RuleReports[i].RuleResponses) > 0 {
return false
}
}
return true
}
func (controlReport *ControlReport) Failed() bool {
return !controlReport.Passed()
}

View File

@@ -1,47 +0,0 @@
package opapolicy
import (
"github.com/francoispqt/gojay"
"time"
)
/*
responsible on fast unmarshaling of various COMMON containerscan structures and substructures
*/
// UnmarshalJSONObject - File inside a pkg
func (r *PostureReport) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
switch key {
case "customerGUID":
err = dec.String(&(r.CustomerGUID))
case "clusterName":
err = dec.String(&(r.ClusterName))
case "reportID":
err = dec.String(&(r.ReportID))
case "jobID":
err = dec.String(&(r.JobID))
case "generationTime":
err = dec.Time(&(r.ReportGenerationTime), time.RFC3339)
r.ReportGenerationTime = r.ReportGenerationTime.Local()
}
return err
}
// func (files *PkgFiles) UnmarshalJSONArray(dec *gojay.Decoder) error {
// lae := PackageFile{}
// if err := dec.Object(&lae); err != nil {
// return err
// }
// *files = append(*files, lae)
// return nil
// }
func (file *PostureReport) NKeys() int {
return 0
}
//------------------------

View File

@@ -1,219 +0,0 @@
package resources
var RegoCAUtils = `
package cautils
list_contains(lista,element) {
some i
lista[i] == element
}
# getPodName(metadata) = name {
# name := metadata.generateName
#}
getPodName(metadata) = name {
name := metadata.name
}
#returns subobject ,sub1 is partial to parent, e.g parent = {a:a,b:b,c:c,d:d}
# sub1 = {b:b,c:c} - result is {b:b,c:c}, if sub1={b:b,e:f} returns {b:b}
object_intersection(parent,sub1) = r{
r := {k:p | p := sub1[k]
parent[k]== p
}
}
#returns if parent contains sub(both are objects not sets!!)
is_subobject(sub,parent) {
object_intersection(sub,parent) == sub
}
`
var RegoDesignators = `
package designators
import data.cautils
#functions that related to designators
#allowed_namespace
#@input@: receive as part of the input object "included_namespaces" list
#@input@: item's namespace as "namespace"
#returns true if namespace exists in that list
included_namespaces(namespace){
cautils.list_contains(["default"],namespace)
}
#forbidden_namespaces
#@input@: receive as part of the input object "forbidden_namespaces" list
#@input@: item's namespace as "namespace"
#returns true if namespace exists in that list
excluded_namespaces(namespace){
not cautils.list_contains(["excluded"],namespace)
}
forbidden_wlids(wlid){
input.forbidden_wlids[_] == wlid
}
filter_k8s_object(obj) = filtered {
#put
filtered := obj
#filtered := [ x | cautils.list_contains(["default"],obj[i].metadata.namespace) ; x := obj[i] ]
# filtered := [ x | not cautils.list_contains([],filter1Set[i].metadata.namespace); x := filter1Set[i]]
}
`
var RegoKubernetesApiClient = `
package kubernetes.api.client
# service account token
token := data.k8sconfig.token
# Cluster host
host := data.k8sconfig.host
# default certificate path
# crt_file := "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
crt_file := data.k8sconfig.crtfile
client_crt_file := data.k8sconfig.clientcrtfile
client_key_file := data.k8sconfig.clientkeyfile
# This information could be retrieved from the kubernetes API
# too, but would essentially require a request per API group,
# so for now use a lookup table for the most common resources.
resource_group_mapping := {
"services": "api/v1",
"pods": "api/v1",
"configmaps": "api/v1",
"secrets": "api/v1",
"persistentvolumeclaims": "api/v1",
"daemonsets": "apis/apps/v1",
"deployments": "apis/apps/v1",
"statefulsets": "apis/apps/v1",
"horizontalpodautoscalers": "api/autoscaling/v1",
"jobs": "apis/batch/v1",
"cronjobs": "apis/batch/v1beta1",
"ingresses": "api/extensions/v1beta1",
"replicasets": "apis/apps/v1",
"networkpolicies": "apis/networking.k8s.io/v1",
"clusterroles": "apis/rbac.authorization.k8s.io/v1",
"clusterrolebindings": "apis/rbac.authorization.k8s.io/v1",
"roles": "apis/rbac.authorization.k8s.io/v1",
"rolebindings": "apis/rbac.authorization.k8s.io/v1",
"serviceaccounts": "api/v1"
}
# Query for given resource/name in provided namespace
# Example: query_ns("deployments", "my-app", "default")
query_name_ns(resource, name, namespace) = http.send({
"url": sprintf("%v/%v/namespaces/%v/%v/%v", [
host,
resource_group_mapping[resource],
namespace,
resource,
name,
]),
"method": "get",
"headers": {"authorization": token},
"tls_client_cert_file": client_crt_file,
"tls_client_key_file": client_key_file,
"tls_ca_cert_file": crt_file,
"raise_error": true,
})
# Query for given resource type using label selectors
# https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api
# Example: query_label_selector_ns("deployments", {"app": "opa-kubernetes-api-client"}, "default")
query_label_selector_ns(resource, selector, namespace) = http.send({
"url": sprintf("%v/%v/namespaces/%v/%v?labelSelector=%v", [
host,
resource_group_mapping[resource],
namespace,
resource,
label_map_to_query_string(selector),
]),
"method": "get",
"headers": {"authorization": token},
"tls_client_cert_file": client_crt_file,
"tls_client_key_file": client_key_file,
"tls_ca_cert_file": crt_file,
"raise_error": true,
})
# x := field_transform_to_qry_param("spec.selector",input)
# input = {"app": "acmefit", "service": "catalog-db"}
# result: "spec.selector.app%3Dacmefit,spec.selector.service%3Dcatalog-db"
query_field_selector_ns(resource, field, selector, namespace) = http.send({
"url": sprintf("%v/%v/namespaces/%v/%v?fieldSelector=%v", [
host,
resource_group_mapping[resource],
namespace,
resource,
field_transform_to_qry_param(field,selector),
]),
"method": "get",
"headers": {"authorization": token},
"tls_client_cert_file": client_crt_file,
"tls_client_key_file": client_key_file,
"tls_ca_cert_file": crt_file,
"raise_error": true,
})
# # Query for all resources of type resource in all namespaces
# # Example: query_all("deployments")
# query_all(resource) = http.send({
# "url": sprintf("https://%v:%v/%v/%v", [
# ip,
# port,
# resource_group_mapping[resource],
# resource,
# ]),
# "method": "get",
# "headers": {"authorization": sprintf("Bearer %v", [token])},
# "tls_client_cert_file": crt_file,
# "raise_error": true,
# })
# Query for all resources of type resource in all namespaces
# Example: query_all("deployments")
query_all(resource) = http.send({
"url": sprintf("%v/%v/%v", [
host,
resource_group_mapping[resource],
resource,
]),
"method": "get",
"headers": {"authorization": token},
"tls_client_cert_file": client_crt_file,
"tls_client_key_file": client_key_file,
"tls_ca_cert_file": crt_file,
"raise_error": true,
})
# Query for all resources of type resource in all namespaces - without authentication
# Example: query_all("deployments")
query_all_no_auth(resource) = http.send({
"url": sprintf("%v/%v/namespaces/default/%v", [
host,
resource_group_mapping[resource],
resource,
]),
"method": "get",
"raise_error": true,
"tls_insecure_skip_verify" : true,
})
field_transform_to_qry_param(field,map) = finala {
mid := {concat(".",[field,key]): val | val := map[key]}
finala := label_map_to_query_string(mid)
}
label_map_to_query_string(map) = concat(",", [str | val := map[key]; str := concat("%3D", [key, val])])
`

View File

@@ -1,20 +0,0 @@
package armo_builtins
# import data.kubernetes.api.client as client
import data.cautils as cautils
# alert cronjobs
#handles cronjob
deny[msga] {
wl := input[_]
wl.kind == "CronJob"
msga := {
"alertMessage": sprintf("the following cronjobs are defined: %v", [wl]),
"alertScore": 2,
"packagename": "armo_builtins",
"alertObject": wl
}
}

View File

@@ -1,44 +0,0 @@
package armo_builtins
import data.kubernetes.api.client as client
# input: pod
# apiversion: v1
# does:
# returns the external facing services of that pod
#
#
deny[msga] {
pod := input[_]
podns := pod.metadata.namespace
podname := getName(pod.metadata)
# pod := client.query_name_ns("pods","frontend-86c5ffb485-kfp9d", "default")
labels := pod.body.metadata.labels
filtered_labels := json.remove(labels, ["pod-template-hash"])
cluster_resource := client.query_all(
"services"
)
services := [svc | cluster_resource.body.items[i].metadata.namespace == podns; svc := cluster_resource.body.items[i]]
service := services[_]
np_or_lb := {"NodePort", "LoadBalancer"}
np_or_lb[service.spec.type]
service.spec.selector == filtered_labels
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("pod %v/%v exposed services: %v\n", [podns,podname,service]),
"alertScore": 7,
"alertObject": {"service":service,"labels":filtered_labels, "podname":podname,"namespace":podns}
}
}
getName(metadata) = name {
name := metadata.generateName
}
getName(metadata) = name {
name := metadata.name
}

View File

@@ -1,57 +0,0 @@
package armo_builtins
#import data.kubernetes.api.client as client
import data.cautils as cautils
# input: pod
# apiversion: v1
# does:
# returns hostPath volumes
#
#
deny[msga] {
pod := input[_]
pod.kind == "Pod"
volumes := pod.spec.volumes
volume := volumes[_]
# crsrcs.body.spec.containers[_].volumeMounts[_].name = volume.name
volume.hostPath
podname := cautils.getPodName(pod.metadata)
obj := {"volume":volume,"podname": podname}
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("pod: %v has {%v,%v} ashostPath volume \n\n\n", [podname, volume]),
"alertScore": 7,
"alertObject": [obj]
}
}
isRWMount(mount) {
not mount.readOnly
}
isRWMount(mount) {
mount.readOnly == false
}
#handles majority of workload resources
deny[msga] {
wl := input[_]
spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"}
spec_template_spec_patterns[wl.kind]
volumes := wl.spec.template.spec.volumes
volume := volumes[_]
volume.hostPath
wlname := cautils.getPodName(wl.metadata)
obj := {"volume":volume,"podname": wlname}
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("%v: %v has {%v,%v} as hostPath volume\n\n\n", [wl.kind,wlname, volume]),
"alertScore": 7,
"alertObject": [obj]
}
}

View File

@@ -1,56 +0,0 @@
package armo_builtins
#import data.kubernetes.api.client as client
# Deny mutating action unless user is in group owning the resource
#privileged pods
deny[msga] {
pod := input[_]
containers := pod.spec.containers[_]
containers.securityContext.privileged == true
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("the following pods are defined as privileged: %v", [pod]),
"alertScore": 3,
"alertObject": pod,
}
}
#handles majority of workload resources
deny[msga] {
wl := input[_]
spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"}
spec_template_spec_patterns[wl.kind]
containers := wl.spec.template.spec.containers[_]
containers.securityContext.privileged == true
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("the following workloads are defined as privileged: %v", [wl]),
"alertScore": 3,
"alertObject": wl,
}
}
#handles cronjob
deny[msga] {
wl := input[_]
wl.kind == "CronJob"
containers := wl.spec.jobTemplate.spec.template.spec.containers[_]
containers.securityContext.privileged == true
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("the following cronjobs are defined as privileged: %v", [wl]),
"alertScore": 3,
"alertObject": wl,
}
}

View File

@@ -1,98 +0,0 @@
package armo_builtins
import data.kubernetes.api.client as client
import data.cautils as cautils
# input: None
# apiversion: v1
# does:
# returns roles+ related subjects in rolebinding
deny[msga] {
# rsrc := client.query_all("roles")
# role := rsrc.body.items[_]
role := input[_]
role.kind == "Role"
rule := role.rules[_]
cautils.list_contains(rule.resources,"secrets")
canViewSecrets(rule)
rbsrc := client.query_all("rolebindings")
rolebinding := rbsrc.body.items[_]
rolebinding.roleRef.kind == "Role"
rolebinding.roleRef.name == role.metadata.name
msga := {
"alertMessage": sprintf("the following users: %v , got read secret access roles", [rolebinding.subjects]),
"alertScore": 9,
"packagename": "armo_builtins",
"alertObject": {"role":role,"users":rolebinding.subjects}
}
}
# input: None
# apiversion: v1
# does:
# returns clusterroles+ related subjects in rolebinding
deny[msga] {
# rsrc := client.query_all("clusterroles")
# role := rsrc.body.items[_]
role := input[_]
role.kind == "ClusterRole"
rule := role.rules[_]
cautils.list_contains(rule.resources,"secrets")
canViewSecrets(rule)
rbsrc := client.query_all("rolebindings")
rolebinding := rbsrc.body.items[_]
rolebinding.roleRef.kind == "ClusterRole"
rolebinding.roleRef.name == role.metadata.name
msga := {
"alertMessage": sprintf("the following users: %v , got read secret access roles", [rolebinding.subjects]),
"alertScore": 9,
"packagename": "armo_builtins",
"alertObject": {"clusterrole":role,"users":rolebinding.subjects}
}
}
# input: None
# apiversion: v1
# does:
# returns clusterroles+ related subjects in clusterrolebinding
#
#
deny[msga] {
# rsrc := client.query_all("clusterroles")
# role := rsrc.body.items[_]
role := input[_]
role.kind == "ClusterRole"
rule := role.rules[_]
cautils.list_contains(rule.resources,"secrets")
canViewSecrets(rule)
rbsrc := client.query_all("clusterrolebindings")
rolebinding := rbsrc.body.items[_]
rolebinding.roleRef.kind == "ClusterRole"
rolebinding.roleRef.name == role.metadata.name
msga := {
"alertMessage": sprintf("the following users: %v , got read secret access roles", [rolebinding.subjects]),
"alertScore": 9,
"packagename": "armo_builtins",
"alertObject": {"clusterrole":role,"users":rolebinding.subjects}
}
}
canViewSecrets(rule) {
cautils.list_contains(rule.verbs,"get")
}
canViewSecrets(rule) {
cautils.list_contains(rule.verbs,"watch")
}

View File

@@ -1,64 +0,0 @@
package armo_builtins
#import data.kubernetes.api.client as client
import data.cautils as cautils
# input: pod
# apiversion: v1
# does:
# returns rw hostpath volumes of that pod
#
#
deny[msga] {
pod := input[_]
pod.kind == "Pod"
volumes := pod.spec.volumes
volume := volumes[_]
# crsrcs.body.spec.containers[_].volumeMounts[_].name = volume.name
mount := pod.spec.containers[_].volumeMounts[_]
mount.name == volume.name
volume.hostPath
isRWMount(mount)
podname := cautils.getPodName(pod.metadata)
obj := {"volume":volume,"mount":mount,"podname": podname}
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("pod: %v has {%v,%v} as rw hostPath volume and volumemount pair\n\n\n", [podname, volume,mount]),
"alertScore": 7,
"alertObject": [obj],
}
}
isRWMount(mount) {
not mount.readOnly
}
isRWMount(mount) {
mount.readOnly == false
}
#handles majority of workload resources
deny[msga] {
wl := input[_]
spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"}
spec_template_spec_patterns[wl.kind]
volumes := wl.spec.template.spec.volumes
volume := volumes[_]
mount := wl.spec.template.spec.containers[_].volumeMounts[_]
mount.name == volume.name
volume.hostPath
isRWMount(mount)
wlname := cautils.getPodName(wl.metadata)
obj := {"volume":volume,"mount":mount,"podname": wlname}
msga := {
"packagename": "armo_builtins",
"alertMessage": sprintf("%v: %v has {%v,%v} as rw hostPath volume and volumemount pair\n\n\n", [wl.kind,wlname, volume,mount]),
"alertScore": 7,
"alertObject": [obj],
}
}

View File

@@ -1,57 +0,0 @@
package armo_builtins
import data.kubernetes.api.client as client
import data.cautils as cautils
# input: pod
# apiversion: v1
# does:
# returns the external facing services of that pod
#
#
deny[msga] {
pod := input[_]
podns := pod.metadata.namespace
podname := cautils.getPodName(pod.metadata)
# pod := client.query_name_ns("pods", "catalog-mongo-6f468d99b4-pn242", "default")
labels := pod.body.metadata.labels
filtered_labels := json.remove(labels, ["pod-template-hash"])
cluster_resource := client.query_all(
"services"
)
services := [svc | cluster_resource.body.items[i].metadata.namespace == podns; svc := cluster_resource.body.items[i]]
service := services[_]
service.spec.selector == filtered_labels
hasSSHPorts(service)
msga := {
"alertMessage": sprintf("pod %v/%v exposed by SSH services: %v\n", [podns,podname,service]),
"packagename": "armo_builtins",
"alertScore": 7,
"alertObject": [{"pod":pod,"service":{service}}]
}
}
hasSSHPorts(service) {
port := service.spec.ports[_]
port.port == 22
}
hasSSHPorts(service) {
port := service.spec.ports[_]
port.port == 2222
}
hasSSHPorts(service) {
port := service.spec.ports[_]
port.targetPort == 22
}
hasSSHPorts(service) {
port := service.spec.ports[_]
port.targetPort == 2222
}

View File

@@ -1,33 +0,0 @@
{
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
"name": "[Builtin] rule-deny-access-to-secrets",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "determines which users can get/list/watch secrets",
"attributes": {
"m$K8sThreatMatrix": "Credential Access::List k8s Secrets"
},
"ruleDependencies": [
{
"packageName":"cautils"
},
{
"packageName":"kubernetes.api.client"
}
],
"remediation": "",
"match": [
{
"resources": [
"Role","ClusterRole"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"rbac.authorization.k8s.io"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\nimport data.kubernetes.api.client as client\nimport data.cautils as cautils\n\n\n# input: None\n# apiversion: v1\n# does: \n#\treturns roles+ related subjects in rolebinding\n\n\ndeny[msga] {\n\t# rsrc := client.query_all(\"roles\")\n\t# role := rsrc.body.items[_]\n\trole := input[_]\n\trole.kind == \"Role\"\n\trule := role.rules[_]\n\tcautils.list_contains(rule.resources,\"secrets\")\n\tcanViewSecrets(rule)\n\trbsrc := client.query_all(\"rolebindings\")\n\trolebinding := rbsrc.body.items[_]\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following users: %v , got read secret access roles\", [rolebinding.subjects]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 9,\n\t\t\"alertObject\": {\"role\":role,\"users\":rolebinding.subjects}\n\t\n\t}\n}\n\n\n\n# input: None\n# apiversion: v1\n# does: \n#\treturns clusterroles+ related subjects in rolebinding\n\n\ndeny[msga] {\n\t# rsrc := client.query_all(\"clusterroles\")\n\t# role := rsrc.body.items[_]\n\trole := input[_]\n\trole.kind == \"ClusterRole\"\n\trule := role.rules[_]\n\tcautils.list_contains(rule.resources,\"secrets\")\n\tcanViewSecrets(rule)\n\trbsrc := client.query_all(\"rolebindings\")\n\trolebinding := rbsrc.body.items[_]\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following users: %v , got read secret access roles\", [rolebinding.subjects]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 9,\n\t\t\"alertObject\": {\"clusterrole\":role,\"users\":rolebinding.subjects}\n\t\n\t}\n}\n\n\n# input: None\n# apiversion: v1\n# does: \n#\treturns clusterroles+ related subjects in clusterrolebinding\n#\n#\ndeny[msga] {\n\t# rsrc := client.query_all(\"clusterroles\")\n\t# role := rsrc.body.items[_]\n\trole := input[_]\n\trole.kind == \"ClusterRole\"\n\trule := role.rules[_]\n\tcautils.list_contains(rule.resources,\"secrets\")\n\tcanViewSecrets(rule)\n\trbsrc := client.query_all(\"clusterrolebindings\")\n\trolebinding := rbsrc.body.items[_]\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following users: %v , got read secret access roles\", [rolebinding.subjects]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 9,\n\t\t\"alertObject\": {\"clusterrole\":role,\"users\":rolebinding.subjects}\n\t\n\t}\n}\n\ncanViewSecrets(rule) {\n\tcautils.list_contains(rule.verbs,\"get\")\n}\ncanViewSecrets(rule) {\n\tcautils.list_contains(rule.verbs,\"watch\")\n}\n"
}

View File

@@ -1,34 +0,0 @@
{
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
"name": "[Builtin] rule-can-ssh-to-pod",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "denies pods with SSH ports opened(22/222)",
"attributes": {
"microsoftK8sThreatMatrix": "val1"
},
"ruleDependencies": [
{
"packageName":"cautils"
},
{
"packageName":"kubernetes.api.client"
}
],
"remediation": "create a network policy that protects SSH ports",
"match": [
{
"resources": [
"Pods"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"*"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\nimport data.kubernetes.api.client as client\nimport data.cautils as cautils\n\n# input: pod\n# apiversion: v1\n# does: \n#\treturns the external facing services of that pod\n#\n#\ndeny[msga] {\n\tpod := input[_]\n\tpodns := pod.metadata.namespace\n\tpodname := cautils.getPodName(pod.metadata)\n\t# pod := client.query_name_ns(\"pods\", \"catalog-mongo-6f468d99b4-pn242\", \"default\")\n\tlabels := pod.body.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n \n\t cluster_resource := client.query_all(\n\t \t\"services\"\n\t )\n\n\tservices := [svc | cluster_resource.body.items[i].metadata.namespace == podns; svc := cluster_resource.body.items[i]]\n\tservice := \tservices[_]\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\n\", [podns,podname,service]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": [{\"pod\":pod,\"service\":{service}}]\n\t\n\t}\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n"
}

View File

@@ -1,33 +0,0 @@
{
"guid": "",
"name": "[Builtin] rule-identify-blacklisted-image-registries",
"creationTime": "",
"description": "Identifying if pod container images are from unallowed registries",
"attributes": {
"m$K8sThreatMatrix": "Initial Access::Compromised images in registry"
},
"ruleDependencies": [
{
"packageName": "cautils"
},
{
"packageName": "kubernetes.api.client"
}
],
"remediation": "Use images from safe registry",
"match": [
{
"resources": [
"Pods"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"*"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\n# Check for images from blacklisted repos\n\nuntrusted_registries(z) = x {\n\tx := [\"015253967648.dkr.ecr.eu-central-1.amazonaws.com/\"]\t\n}\n\npublic_registries(z) = y{\n\ty := [\"quay.io/kiali/\",\"quay.io/datawire/\",\"quay.io/keycloak/\",\"quay.io/bitnami/\"]\n}\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[_]\n\timage := container.image\n repo_prefix := untrusted_registries(image)[_]\n\tstartswith(image, repo_prefix)\n\tselfLink := pod.metadata.selfLink\n\tcontainerName := container.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' in [%s] comes from untrusted registry\", [image, containerName, selfLink]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 2,\n\t\t\"alertObject\": [{\"pod\":pod}]\n\t}\n}\n\nuntrustedImageRepo[msga] {\n pod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[_]\n\timage := container.image\n repo_prefix := public_registries(image)[_]\n\tstartswith(pod, repo_prefix)\n\tselfLink := input.metadata.selfLink\n\tcontainerName := container.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' in [%s] comes from public registry\", [image, containerName, selfLink]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 1,\n\t\t\"alertObject\": [{\"pod\":pod}]\n\t}\n}"
}

View File

@@ -1,31 +0,0 @@
{
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
"name": "[Builtin] rule-pod-external-facing",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "denies pods with external facing services, grabs related services",
"attributes": {
"microsoftK8sThreatMatrix": "val1"
},
"ruleDependencies": [
{
"packageName":"kubernetes.api.client"
}
],
"remediation": "create a network policy that controls which protect your cluster from unwanted connections and the outside world",
"match": [
{
"resources": [
"Pods"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"*"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\n\nimport data.kubernetes.api.client as client\n\n\n# input: pod\n# apiversion: v1\n# does: \n#\treturns the external facing services of that pod\n#\n#\ndeny[msga] {\n\tpod := input[_]\n\tpodns := pod.metadata.namespace\n\tpodname := getName(pod.metadata)\n\t# pod := client.query_name_ns(\"pods\",\"frontend-86c5ffb485-kfp9d\", \"default\")\n\tlabels := pod.body.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n \n\t cluster_resource := client.query_all(\n\t \t\"services\"\n\t )\n\n\n\tservices := [svc | cluster_resource.body.items[i].metadata.namespace == podns; svc := cluster_resource.body.items[i]]\n\tservice := \tservices[_]\n\tnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\n\tnp_or_lb[service.spec.type]\n\tservice.spec.selector == filtered_labels\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed services: %v\n\", [podns,podname,service]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\"service\":service,\"labels\":filtered_labels, \"podname\":podname,\"namespace\":podns}\n\t\n\t}\n}\n\ngetName(metadata) = name {\n\tname := metadata.generateName\n}\ngetName(metadata) = name {\n\tname := metadata.name\n}\n"
}

View File

@@ -1,31 +0,0 @@
{
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
"name": "[Builtin] alert-any-hostpath",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "determines if any workload contains a hostPath volume",
"attributes": {
"m$K8sThreatMatrix": "Privilege Escalation::hostPath mount"
},
"ruleDependencies": [
{
"packageName":"cautils"
}
],
"remediation": "consider if hostPath is really necessary - reading sensitive data like hostPath credentials might endanger cluster, if so consider encrypting the data",
"match": [
{
"resources": [
"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"*"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\nimport data.kubernetes.api.client as client\nimport data.cautils as cautils\n\n# input: pod\n# apiversion: v1\n# does: \n#\treturns hostPath volumes\n#\n#\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n # crsrcs.body.spec.containers[_].volumeMounts[_].name = volume.name\n volume.hostPath\n podname := cautils.getPodName(pod.metadata)\n obj := {\"volume\":volume,\"podname\": podname}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has {%v,%v} ashostPath volume \n\n\n\", [podname, volume]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": [obj],\n\t\n\t}\n}\n\nisRWMount(mount) {\n not mount.readOnly\n}\nisRWMount(mount) {\n mount.readOnly == false\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n wlname := cautils.getPodName(wl.metadata)\n obj := {\"volume\":volume,\"podname\": wlname}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has {%v,%v} as hostPath volume\n\n\n\", [wl.kind,wlname, volume]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": [obj],\n\t\n\t}\n}\n\n\n"
}

View File

@@ -1,27 +0,0 @@
{
"guid": "82f19070-2826-4fe4-a079-f5f7e7a1b04d",
"name": "[Builtin] instance-metadata-api-access",
"attributes": {
"m$K8sThreatMatrix": "Credential Access::Instance Metadata API"
},
"creationTime": "2021-04-25T10:48:48.861806",
"rule": "package armo_builtins\n# Check for images from blacklisted repos\n\nmetadata_azure(z) = http.send({\n\t\"url\": \"http://169.254.169.254/metadata/instance?api-version=2020-09-01\",\n\t\"method\": \"get\",\n\t\"headers\": {\"Metadata\": \"true\"},\n\t\"raise_error\": true,\t\n})\n\nmetadata_gcp(z) = http.send({\n\t\"url\": \"http://169.254.169.254/computeMetadata/v1/?alt=json&recursive=true\",\n\t\"method\": \"get\",\n\t\"headers\": {\"Metadata-Flavor\": \"Google\"},\n\t\"raise_error\": true,\t\n})\n\nmetadata_aws(z) = metadata_object { \n\thostname := http.send({\n\t\"url\": \"http://169.254.169.254/latest/meta-data/local-hostname\",\n\t\"method\": \"get\",\n\t\"raise_error\": true,\t\n })\n\tmetadata_object := {\n\t\t\"raw_body\": hostname.raw_body,\n\t\t\"hostname\" : hostname.raw_body,\n\t\t\"status_code\" : hostname.status_code\n\t}\n}\n\nazure_metadata[msga] {\t\n\tmetadata_object := metadata_azure(\"aaa\")\n\tmetadata_object.status_code == 200\n\tnode_name := metadata_object.body.compute.name\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of Azure.\", [node_name]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 1,\n\t\t\"alertObject\": [{\"nodeMetadata\":metadata_object.body}]\n\t}\n}\n\ngcp_metadata[msga] {\t\n\tmetadata_object := metadata_gcp(\"aaa\")\n\tmetadata_object.status_code == 200\n\tnode_name := metadata_object.body.instance.hostname\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of GCP.\", [node_name]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 1,\n\t\t\"alertObject\": [{\"nodeMetadata\": metadata_object.raw_body}]\n\t}\n}\n\naws_metadata[msga] {\t\n\tmetadata_object := metadata_aws(\"aaa\")\n\tmetadata_object.status_code == 200\n\tnode_name := metadata_object.hostname\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of AWS.\", [node_name]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 1,\n\t\t\"alertObject\": [{\"nodeMetadata\": metadata_object.raw_body}]\n\t}\n}",
"ruleLanguage": "Rego",
"match": [
{
"apiGroups": [
"*"
],
"apiVersions": [
"*"
],
"resources": [
"nodes"
]
}
],
"ruleDependencies": [],
"description": "Checks if there is access from the nodes to cloud prividers instance metadata services",
"remediation": "From https://attack.mitre.org/techniques/T1552/005/ :Option A: Disable or Remove Feature or Program, Option B: Filter Network Traffic",
"ruleQuery": ""
}

View File

@@ -1,30 +0,0 @@
{
"guid": "[Builtin] 3b0467c9-488d-c244-99d0-90fbf600aaff",
"name": "rule-deny-cronjobs",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "determines if it's cronjob",
"attributes": {
"m$K8sThreatMatrix": "Persistence::Cronjob"
},
"ruleDependencies": [
{
"packageName":"cautils"
}
],
"remediation": "",
"match": [
{
"resources": [
"CronJob"
],
"apiVersions": [
"v1beta1"
],
"apiGroups": [
"batch"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\n\n# import data.kubernetes.api.client as client\nimport data.cautils as cautils\n\n\n# alert cronjobs\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 2,\n\t\t\"alertObject\": wl\n\t\n\t}\n}\n"
}

View File

@@ -1,29 +0,0 @@
{
"guid": "",
"name": "[Builtin] rule-privilege-escalation",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "determines if pods/deployments defined as privileged true",
"attributes": {
"mitre": "Privilege Escalation",
"mitreCode": "TA0004",
"m$K8sThreatMatrix": "Privilege Escalation::privileged container"
},
"ruleDependencies": [
],
"remediation": "avoid defining pods as privilleged",
"match": [
{
"resources": [
"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod","CronJob"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"*"
]
}
],
"ruleLanguage": "Rego",
"rule": "\npackage armo_builtins\n\nimport data.kubernetes.api.client as client\nimport data.designators as scope\nimport data.cautils as cautils\n\n\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n \n\tpod := input[_]\n\tcontainers := pod.spec.containers[_]\n\tcontainers.securityContext.privileged == true\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 3,\n\t\t\"alertObject\": pod,\n\t\n\t}\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainers := wl.spec.template.spec.containers[_]\n\tcontainers.securityContext.privileged == true\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following workloads are defined as privileged: %v\", [wl]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 3,\n\t\t\"alertObject\": wl,\n\t\n\t}\n}\n\n\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainers := wl.spec.jobTemplate.spec.template.spec.containers[_]\n\tcontainers.securityContext.privileged == true\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 3,\n\t\t\"alertObject\": wl,\n\t\n\t}\n}\n\n"
}

View File

@@ -1,31 +0,0 @@
{
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
"name": "[Builtin] alert-rw-hostpath",
"creationTime": "2019-09-04T12:04:58.461455",
"description": "determines if any workload contains a hostPath volume with rw permissions",
"attributes": {
"m$K8sThreatMatrix": "Persistance::Writable hostPath mount"
},
"ruleDependencies": [
{
"packageName":"cautils"
}
],
"remediation": "consider if hostPath is really necessary- sensitive data like hostPath credentials might endanger cluster, if so consider encrypting the data",
"match": [
{
"resources": [
"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod"
],
"apiVersions": [
"v1"
],
"apiGroups": [
"*"
]
}
],
"ruleLanguage": "Rego",
"rule": "\"\\npackage armo_builtins\\nimport data.kubernetes.api.client as client\\nimport data.cautils as cautils\\n\\n# input: pod\\n# apiversion: v1\\n# does: \\n#\\treturns hostPath volumes\\n#\\n#\\ndeny[msga] {\\n pod := input[_]\\n pod.kind == \\\"Pod\\\"\\n volumes := pod.spec.volumes\\n volume := volumes[_]\\n # crsrcs.body.spec.containers[_].volumeMounts[_].name = volume.name\\n volume.hostPath\\n podname := cautils.getPodName(pod.metadata)\\n obj := {\\\"volume\\\":volume,\\\"podname\\\": podname}\\n\\n\\tmsga := {\\n\\t\\t\\\"alertMessage\\\": sprintf(\\\"pod: %v has {%v,%v} ashostPath volume \\n\\n\\n\\\", [podname, volume]),\\n\\t\\t\\\"alert\\\": true,\\n\\t\\t\\\"prevent\\\": false,\\n\\t\\t\\\"alertScore\\\": 7,\\n\\t\\t\\\"alertObject\\\": [obj],\\n\\t\\n\\t}\\n}\\n\\nisRWMount(mount) {\\n not mount.readOnly\\n}\\nisRWMount(mount) {\\n mount.readOnly == false\\n}\\n\\n\\n#handles majority of workload resources\\ndeny[msga] {\\n\\n\\twl := input[_]\\n\\tspec_template_spec_patterns := {\\\"Deployment\\\",\\\"ReplicaSet\\\",\\\"DaemonSet\\\",\\\"StatefulSet\\\",\\\"Job\\\"}\\n\\tspec_template_spec_patterns[wl.kind]\\n volumes := wl.spec.template.spec.volumes\\n volume := volumes[_]\\n volume.hostPath\\n wlname := cautils.getPodName(wl.metadata)\\n obj := {\\\"volume\\\":volume,\\\"podname\\\": wlname}\\n\\n\\tmsga := {\\n\\t\\t\\\"alertMessage\\\": sprintf(\\\"%v: %v has {%v,%v} as hostPath volume\\n\\n\\n\\\", [wl.kind,wlname, volume]),\\n\\t\\t\\\"alert\\\": true,\\n\\t\\t\\\"prevent\\\": false,\\n\\t\\t\\\"alertScore\\\": 7,\\n\\t\\t\\\"alertObject\\\": [obj],\\n\\t\\n\\t}\\n}\\n\\n\\n\""
}

View File

@@ -1,119 +0,0 @@
package resources
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/armosec/kubescape/cautils/k8sinterface"
"github.com/golang/glog"
"github.com/open-policy-agent/opa/storage"
"github.com/open-policy-agent/opa/storage/inmem"
"github.com/open-policy-agent/opa/util"
"k8s.io/client-go/rest"
)
var (
RegoDependenciesPath = "/resources/rego/dependencies"
)
type RegoDependenciesData struct {
K8sConfig RegoK8sConfig `json:"k8sconfig"`
}
type RegoK8sConfig struct {
Token string `json:"token"`
IP string `json:"ip"`
Host string `json:"host"`
Port string `json:"port"`
CrtFile string `json:"crtfile"`
ClientCrtFile string `json:"clientcrtfile"`
ClientKeyFile string `json:"clientkeyfile"`
// ClientKeyFile string `json:"crtfile"`
}
func NewRegoDependenciesDataMock() *RegoDependenciesData {
return NewRegoDependenciesData(k8sinterface.GetK8sConfig())
}
func NewRegoDependenciesData(k8sConfig *rest.Config) *RegoDependenciesData {
regoDependenciesData := RegoDependenciesData{}
if k8sConfig != nil {
regoDependenciesData.K8sConfig = *NewRegoK8sConfig(k8sConfig)
}
return &regoDependenciesData
}
func NewRegoK8sConfig(k8sConfig *rest.Config) *RegoK8sConfig {
host := k8sConfig.Host
if host == "" {
ip := os.Getenv("KUBERNETES_SERVICE_HOST")
port := os.Getenv("KUBERNETES_SERVICE_PORT")
host = fmt.Sprintf("https://%s:%s", ip, port)
}
token := ""
if k8sConfig.BearerToken != "" {
token = fmt.Sprintf("Bearer %s", k8sConfig.BearerToken)
}
regoK8sConfig := RegoK8sConfig{
Token: token,
Host: host,
CrtFile: k8sConfig.CAFile,
ClientCrtFile: k8sConfig.CertFile,
ClientKeyFile: k8sConfig.KeyFile,
}
return &regoK8sConfig
}
func (data *RegoDependenciesData) TOStorage() (storage.Store, error) {
var jsonObj map[string]interface{}
bytesData, err := json.Marshal(*data)
if err != nil {
return nil, err
}
// glog.Infof("RegoDependenciesData: %s", bytesData)
if err := util.UnmarshalJSON(bytesData, &jsonObj); err != nil {
return nil, err
}
return inmem.NewFromObject(jsonObj), nil
}
// LoadRegoDependenciesFromDir loads the policies list from *.rego file in given directory
func LoadRegoFiles(dir string) map[string]string {
modules := make(map[string]string)
// Compile the module. The keys are used as identifiers in error messages.
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err == nil && strings.HasSuffix(path, ".rego") && !info.IsDir() {
content, err := ioutil.ReadFile(path)
if err != nil {
glog.Errorf("LoadRegoFiles, Failed to load: %s: %v", path, err)
} else {
modules[strings.Trim(filepath.Base(path), ".rego")] = string(content)
}
}
return nil
})
return modules
}
// LoadRegoModules loads the policies from variables
func LoadRegoModules() map[string]string {
modules := make(map[string]string)
modules["cautils"] = RegoCAUtils
modules["designators"] = RegoDesignators
modules["kubernetes.api.client"] = RegoKubernetesApiClient
return modules
}

View File

@@ -1,17 +0,0 @@
package resources
import (
"os"
"path/filepath"
"testing"
)
func TestLoadRegoDependenciesFromDir(t *testing.T) {
dir, _ := os.Getwd()
t.Errorf("%s", filepath.Join(dir, "rego/dependencies"))
return
// modules := LoadRegoDependenciesFromDir("")
// if len(modules) == 0 {
// t.Errorf("modules len == 0")
// }
}

125
cautils/rbac.go Normal file
View File

@@ -0,0 +1,125 @@
package cautils
import (
"encoding/json"
"time"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/rbac-utils/rbacscanner"
"github.com/armosec/rbac-utils/rbacutils"
uuid "github.com/satori/go.uuid"
)
type RBACObjects struct {
scanner *rbacscanner.RbacScannerFromK8sAPI
}
func NewRBACObjects(scanner *rbacscanner.RbacScannerFromK8sAPI) *RBACObjects {
return &RBACObjects{scanner: scanner}
}
func (rbacObjects *RBACObjects) SetResourcesReport() (*reporthandling.PostureReport, error) {
return &reporthandling.PostureReport{
ReportID: uuid.NewV4().String(),
ReportGenerationTime: time.Now().UTC(),
CustomerGUID: rbacObjects.scanner.CustomerGUID,
ClusterName: rbacObjects.scanner.ClusterName,
}, nil
}
func (rbacObjects *RBACObjects) ListAllResources() (map[string]workloadinterface.IMetadata, error) {
resources, err := rbacObjects.scanner.ListResources()
if err != nil {
return nil, err
}
allresources, err := rbacObjects.rbacObjectsToResources(resources)
if err != nil {
return nil, err
}
return allresources, nil
}
func (rbacObjects *RBACObjects) rbacObjectsToResources(resources *rbacutils.RbacObjects) (map[string]workloadinterface.IMetadata, error) {
allresources := map[string]workloadinterface.IMetadata{}
/*
************************************************************************************************************************
This code is adding a non valid ID ->
(github.com/armosec/rbac-utils v0.0.11): "//SA2WLIDmap/SA2WLIDmap"
(github.com/armosec/rbac-utils v0.0.12): "armo.rbac.com/v0beta1//SAID2WLIDmap/SAID2WLIDmap"
Should be investigated
************************************************************************************************************************
*/
// wrap rbac aggregated objects in IMetadata and add to allresources
// TODO - DEPRECATE SA2WLIDmap
SA2WLIDmapIMeta, err := rbacutils.SA2WLIDmapIMetadataWrapper(resources.SA2WLIDmap)
if err != nil {
return nil, err
}
allresources[SA2WLIDmapIMeta.GetID()] = SA2WLIDmapIMeta
SAID2WLIDmapIMeta, err := rbacutils.SAID2WLIDmapIMetadataWrapper(resources.SAID2WLIDmap)
if err != nil {
return nil, err
}
allresources[SAID2WLIDmapIMeta.GetID()] = SAID2WLIDmapIMeta
// convert rbac k8s resources to IMetadata and add to allresources
for _, cr := range resources.ClusterRoles.Items {
crmap, err := convertToMap(cr)
if err != nil {
return nil, err
}
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
crIMeta := workloadinterface.NewWorkloadObj(crmap)
crIMeta.SetKind("ClusterRole")
allresources[crIMeta.GetID()] = crIMeta
}
for _, cr := range resources.Roles.Items {
crmap, err := convertToMap(cr)
if err != nil {
return nil, err
}
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
crIMeta := workloadinterface.NewWorkloadObj(crmap)
crIMeta.SetKind("Role")
allresources[crIMeta.GetID()] = crIMeta
}
for _, cr := range resources.ClusterRoleBindings.Items {
crmap, err := convertToMap(cr)
if err != nil {
return nil, err
}
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
crIMeta := workloadinterface.NewWorkloadObj(crmap)
crIMeta.SetKind("ClusterRoleBinding")
allresources[crIMeta.GetID()] = crIMeta
}
for _, cr := range resources.RoleBindings.Items {
crmap, err := convertToMap(cr)
if err != nil {
return nil, err
}
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
crIMeta := workloadinterface.NewWorkloadObj(crmap)
crIMeta.SetKind("RoleBinding")
allresources[crIMeta.GetID()] = crIMeta
}
return allresources, nil
}
func convertToMap(obj interface{}) (map[string]interface{}, error) {
var inInterface map[string]interface{}
inrec, err := json.Marshal(obj)
if err != nil {
return nil, err
}
err = json.Unmarshal(inrec, &inInterface)
if err != nil {
return nil, err
}
return inInterface, nil
}

View File

@@ -1,5 +0,0 @@
package cautils
const (
ComponentIdentifier = "Posture"
)

159
cautils/reportv2tov1.go Normal file
View File

@@ -0,0 +1,159 @@
package cautils
import (
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/opa-utils/reporthandling"
helpersv1 "github.com/armosec/opa-utils/reporthandling/helpers/v1"
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/armosec/opa-utils/score"
)
func ReportV2ToV1(opaSessionObj *OPASessionObj) {
if len(opaSessionObj.PostureReport.FrameworkReports) > 0 {
return // report already converted
}
opaSessionObj.PostureReport.ClusterCloudProvider = opaSessionObj.Report.ClusterCloudProvider
frameworks := []reporthandling.FrameworkReport{}
if len(opaSessionObj.Report.SummaryDetails.Frameworks) > 0 {
for _, fwv2 := range opaSessionObj.Report.SummaryDetails.Frameworks {
fwv1 := reporthandling.FrameworkReport{}
fwv1.Name = fwv2.GetName()
fwv1.Score = fwv2.GetScore()
fwv1.ControlReports = append(fwv1.ControlReports, controlReportV2ToV1(opaSessionObj, fwv2.GetName(), fwv2.Controls)...)
frameworks = append(frameworks, fwv1)
}
} else {
fwv1 := reporthandling.FrameworkReport{}
fwv1.Name = ""
fwv1.Score = 0
fwv1.ControlReports = append(fwv1.ControlReports, controlReportV2ToV1(opaSessionObj, "", opaSessionObj.Report.SummaryDetails.Controls)...)
frameworks = append(frameworks, fwv1)
}
// // remove unused data
// opaSessionObj.Report = nil
// opaSessionObj.ResourcesResult = nil
// setup counters and score
for f := range frameworks {
// // set exceptions
// exceptions.SetFrameworkExceptions(frameworks, opap.Exceptions, cautils.ClusterName)
// set counters
reporthandling.SetUniqueResourcesCounter(&frameworks[f])
// set default score
reporthandling.SetDefaultScore(&frameworks[f])
}
// update score
scoreutil := score.NewScore(opaSessionObj.AllResources)
scoreutil.Calculate(frameworks)
opaSessionObj.PostureReport.FrameworkReports = frameworks
// opaSessionObj.Report.SummaryDetails.Score = 0
// for i := range frameworks {
// for j := range frameworks[i].ControlReports {
// // frameworks[i].ControlReports[j].Score
// for w := range opaSessionObj.Report.SummaryDetails.Frameworks {
// if opaSessionObj.Report.SummaryDetails.Frameworks[w].Name == frameworks[i].Name {
// opaSessionObj.Report.SummaryDetails.Frameworks[w].Score = frameworks[i].Score
// }
// if c, ok := opaSessionObj.Report.SummaryDetails.Frameworks[w].Controls[frameworks[i].ControlReports[j].ControlID]; ok {
// c.Score = frameworks[i].ControlReports[j].Score
// opaSessionObj.Report.SummaryDetails.Frameworks[w].Controls[frameworks[i].ControlReports[j].ControlID] = c
// }
// }
// if c, ok := opaSessionObj.Report.SummaryDetails.Controls[frameworks[i].ControlReports[j].ControlID]; ok {
// c.Score = frameworks[i].ControlReports[j].Score
// opaSessionObj.Report.SummaryDetails.Controls[frameworks[i].ControlReports[j].ControlID] = c
// }
// }
// opaSessionObj.Report.SummaryDetails.Score += opaSessionObj.PostureReport.FrameworkReports[i].Score
// }
// opaSessionObj.Report.SummaryDetails.Score /= float32(len(opaSessionObj.Report.SummaryDetails.Frameworks))
}
func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, controls map[string]reportsummary.ControlSummary) []reporthandling.ControlReport {
controlRepors := []reporthandling.ControlReport{}
for controlID, crv2 := range controls {
crv1 := reporthandling.ControlReport{}
crv1.ControlID = controlID
crv1.BaseScore = crv2.ScoreFactor
crv1.Name = crv2.GetName()
crv1.Control_ID = controlID
// crv1.Attributes = crv2.
crv1.Score = crv2.GetScore()
// TODO - add fields
crv1.Description = crv2.Description
crv1.Remediation = crv2.Remediation
rulesv1 := map[string]reporthandling.RuleReport{}
for _, resourceID := range crv2.ListResourcesIDs().All() {
if result, ok := opaSessionObj.ResourcesResult[resourceID]; ok {
for _, rulev2 := range result.ListRulesOfControl(crv2.GetID(), "") {
if _, ok := rulesv1[rulev2.GetName()]; !ok {
rulesv1[rulev2.GetName()] = reporthandling.RuleReport{
Name: rulev2.GetName(),
RuleStatus: reporthandling.RuleStatus{
Status: "success",
},
}
}
rulev1 := rulesv1[rulev2.GetName()]
status := rulev2.GetStatus(&helpersv1.Filters{FrameworkNames: []string{frameworkName}})
if status.IsFailed() || status.IsExcluded() {
// rule response
ruleResponse := reporthandling.RuleResponse{}
ruleResponse.Rulename = rulev2.GetName()
for i := range rulev2.Paths {
if rulev2.Paths[i].FailedPath != "" {
ruleResponse.FailedPaths = append(ruleResponse.FailedPaths, rulev2.Paths[i].FailedPath)
}
if rulev2.Paths[i].FixPath.Path != "" {
ruleResponse.FixPaths = append(ruleResponse.FixPaths, rulev2.Paths[i].FixPath)
}
}
ruleResponse.RuleStatus = string(status.Status())
if len(rulev2.Exception) > 0 {
ruleResponse.Exception = &rulev2.Exception[0]
}
if fullRessource, ok := opaSessionObj.AllResources[resourceID]; ok {
tmp := fullRessource.GetObject()
workloadinterface.RemoveFromMap(tmp, "spec")
ruleResponse.AlertObject.K8SApiObjects = append(ruleResponse.AlertObject.K8SApiObjects, tmp)
}
rulev1.RuleResponses = append(rulev1.RuleResponses, ruleResponse)
}
rulev1.ListInputKinds = append(rulev1.ListInputKinds, resourceID)
rulesv1[rulev2.GetName()] = rulev1
}
}
}
if len(rulesv1) > 0 {
for i := range rulesv1 {
crv1.RuleReports = append(crv1.RuleReports, rulesv1[i])
}
}
if len(crv1.RuleReports) == 0 {
crv1.RuleReports = []reporthandling.RuleReport{}
}
controlRepors = append(controlRepors, crv1)
}
return controlRepors
}

View File

@@ -1,56 +1,141 @@
package cautils
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/kubescape/cautils/opapolicy"
"github.com/armosec/opa-utils/reporthandling"
)
const (
ScanCluster string = "cluster"
ScanLocalFiles string = "yaml"
localControlInputsFilename string = "controls-inputs.json"
localExceptionsFilename string = "exceptions.json"
)
type BoolPtrFlag struct {
valPtr *bool
}
func (bpf *BoolPtrFlag) Type() string {
return "bool"
}
func (bpf *BoolPtrFlag) String() string {
if bpf.valPtr != nil {
return fmt.Sprintf("%v", *bpf.valPtr)
}
return ""
}
func (bpf *BoolPtrFlag) Get() *bool {
return bpf.valPtr
}
func (bpf *BoolPtrFlag) SetBool(val bool) {
bpf.valPtr = &val
}
func (bpf *BoolPtrFlag) Set(val string) error {
switch val {
case "true":
bpf.SetBool(true)
case "false":
bpf.SetBool(false)
}
return nil
}
type ScanInfo struct {
PolicyGetter getter.IPolicyGetter
PolicyIdentifier opapolicy.PolicyIdentifier
UseFrom string
UseDefault bool
Format string
Output string
ExcludedNamespaces string
InputPatterns []string
Silent bool
FailThreshold uint16
Getters
PolicyIdentifier []reporthandling.PolicyIdentifier
UseExceptions string // Load file with exceptions configuration
ControlsInputs string // Load file with inputs for controls
UseFrom []string // Load framework from local file (instead of download). Use when running offline
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
VerboseMode bool // Display all of the input resources and not only failed resources
Format string // Format results (table, json, junit ...)
Output string // Store results in an output file, Output file name
ExcludedNamespaces string // used for host sensor namespace
IncludeNamespaces string // DEPRECATED?
InputPatterns []string // Yaml files input patterns
Silent bool // Silent mode - Do not print progress logs
FailThreshold uint16 // Failure score threshold
Submit bool // Submit results to Armo BE
HostSensor BoolPtrFlag // Deploy ARMO K8s host sensor to collect data from certain controls
Local bool // Do not submit results
Account string // account ID
Logger string // logger level
KubeContext string // context name
FrameworkScan bool // false if scanning control
ScanAll bool // true if scan all frameworks
}
type Getters struct {
ExceptionsGetter getter.IExceptionsGetter
ControlsInputsGetter getter.IControlsInputsGetter
PolicyGetter getter.IPolicyGetter
}
func (scanInfo *ScanInfo) Init() {
// scanInfo.setSilentMode()
scanInfo.setUseFrom()
scanInfo.setOutputFile()
scanInfo.setGetter()
scanInfo.setUseArtifactsFrom()
}
func (scanInfo *ScanInfo) setUseFrom() {
if scanInfo.UseFrom != "" {
func (scanInfo *ScanInfo) setUseArtifactsFrom() {
if scanInfo.UseArtifactsFrom == "" {
return
}
if scanInfo.UseDefault {
scanInfo.UseFrom = getter.GetDefaultPath(scanInfo.PolicyIdentifier.Name)
// UseArtifactsFrom must be a path without a filename
dir, file := filepath.Split(scanInfo.UseArtifactsFrom)
if dir == "" {
scanInfo.UseArtifactsFrom = file
} else if strings.Contains(file, ".json") {
scanInfo.UseArtifactsFrom = dir
}
// set frameworks files
files, err := ioutil.ReadDir(scanInfo.UseArtifactsFrom)
if err != nil {
log.Fatal(err)
}
framework := &reporthandling.Framework{}
for _, f := range files {
filePath := filepath.Join(scanInfo.UseArtifactsFrom, f.Name())
file, err := os.ReadFile(filePath)
if err == nil {
if err := json.Unmarshal(file, framework); err == nil {
scanInfo.UseFrom = append(scanInfo.UseFrom, filepath.Join(scanInfo.UseArtifactsFrom, f.Name()))
}
}
}
// set config-inputs file
scanInfo.ControlsInputs = filepath.Join(scanInfo.UseArtifactsFrom, localControlInputsFilename)
// set exceptions
scanInfo.UseExceptions = filepath.Join(scanInfo.UseArtifactsFrom, localExceptionsFilename)
}
func (scanInfo *ScanInfo) setGetter() {
if scanInfo.UseFrom != "" {
// load from file
scanInfo.PolicyGetter = getter.NewLoadPolicy(scanInfo.UseFrom)
func (scanInfo *ScanInfo) setUseExceptions() {
if scanInfo.UseExceptions != "" {
// load exceptions from file
scanInfo.ExceptionsGetter = getter.NewLoadPolicy([]string{scanInfo.UseExceptions})
} else {
scanInfo.PolicyGetter = getter.NewArmoAPI()
scanInfo.ExceptionsGetter = getter.GetArmoAPIConnector()
}
}
func (scanInfo *ScanInfo) setSilentMode() {
if scanInfo.Format == "json" || scanInfo.Format == "junit" {
scanInfo.Silent = true
}
if scanInfo.Output != "" {
scanInfo.Silent = true
func (scanInfo *ScanInfo) setUseFrom() {
if scanInfo.UseDefault {
for _, policy := range scanInfo.PolicyIdentifier {
scanInfo.UseFrom = append(scanInfo.UseFrom, getter.GetDefaultPath(policy.Name+".json"))
}
}
}
@@ -59,17 +144,40 @@ func (scanInfo *ScanInfo) setOutputFile() {
return
}
if scanInfo.Format == "json" {
if filepath.Ext(scanInfo.Output) != "json" {
if filepath.Ext(scanInfo.Output) != ".json" {
scanInfo.Output += ".json"
}
}
if scanInfo.Format == "junit" {
if filepath.Ext(scanInfo.Output) != "xml" {
if filepath.Ext(scanInfo.Output) != ".xml" {
scanInfo.Output += ".xml"
}
}
}
func (scanInfo *ScanInfo) ScanRunningCluster() bool {
return len(scanInfo.InputPatterns) == 0
func (scanInfo *ScanInfo) GetScanningEnvironment() string {
if len(scanInfo.InputPatterns) != 0 {
return ScanLocalFiles
}
return ScanCluster
}
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind reporthandling.NotificationPolicyKind) {
for _, policy := range policies {
if !scanInfo.contains(policy) {
newPolicy := reporthandling.PolicyIdentifier{}
newPolicy.Kind = kind // reporthandling.KindFramework
newPolicy.Name = policy
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
}
}
}
func (scanInfo *ScanInfo) contains(policyName string) bool {
for _, policy := range scanInfo.PolicyIdentifier {
if policy.Name == policyName {
return true
}
}
return false
}

View File

@@ -13,7 +13,13 @@ func TestConvertLabelsToString(t *testing.T) {
spilltedA := strings.Split(rsrt, ";")
spilltedB := strings.Split(str, ";")
for i := range spilltedA {
if spilltedA[i] != spilltedB[i] {
exists := false
for j := range spilltedB {
if spilltedB[j] == spilltedA[i] {
exists = true
}
}
if !exists {
t.Errorf("%s != %s", spilltedA[i], spilltedB[i])
}
}

138
cautils/versioncheck.go Normal file
View File

@@ -0,0 +1,138 @@
package cautils
import (
"encoding/json"
"fmt"
"net/http"
"os"
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/kubescape/cautils/logger"
"github.com/armosec/kubescape/cautils/logger/helpers"
pkgutils "github.com/armosec/utils-go/utils"
)
const SKIP_VERSION_CHECK = "KUBESCAPE_SKIP_UPDATE_CHECK"
var BuildNumber string
const UnknownBuildNumber = "unknown"
type IVersionCheckHandler interface {
CheckLatestVersion(*VersionCheckRequest) error
}
func NewIVersionCheckHandler() IVersionCheckHandler {
if BuildNumber == "" {
logger.L().Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
}
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && pkgutils.StringToBool(v) {
return NewVersionCheckHandlerMock()
}
return NewVersionCheckHandler()
}
type VersionCheckHandlerMock struct {
}
func NewVersionCheckHandlerMock() *VersionCheckHandlerMock {
return &VersionCheckHandlerMock{}
}
type VersionCheckHandler struct {
versionURL string
}
type VersionCheckRequest struct {
Client string `json:"client"` // kubescape
ClientVersion string `json:"clientVersion"` // kubescape version
Framework string `json:"framework"` // framework name
FrameworkVersion string `json:"frameworkVersion"` // framework version
ScanningTarget string `json:"target"` // scanning target- cluster/yaml
}
type VersionCheckResponse struct {
Client string `json:"client"` // kubescape
ClientUpdate string `json:"clientUpdate"` // kubescape latest version
Framework string `json:"framework"` // framework name
FrameworkUpdate string `json:"frameworkUpdate"` // framework latest version
Message string `json:"message"` // alert message
}
func NewVersionCheckHandler() *VersionCheckHandler {
return &VersionCheckHandler{
versionURL: "https://us-central1-elated-pottery-310110.cloudfunctions.net/ksgf1v1",
}
}
func NewVersionCheckRequest(buildNumber, frameworkName, frameworkVersion, scanningTarget string) *VersionCheckRequest {
if buildNumber == "" {
buildNumber = UnknownBuildNumber
}
if scanningTarget == "" {
scanningTarget = "unknown"
}
return &VersionCheckRequest{
Client: "kubescape",
ClientVersion: buildNumber,
Framework: frameworkName,
FrameworkVersion: frameworkVersion,
ScanningTarget: scanningTarget,
}
}
func (v *VersionCheckHandlerMock) CheckLatestVersion(versionData *VersionCheckRequest) error {
logger.L().Info("Skipping version check")
return nil
}
func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckRequest) error {
defer func() {
if err := recover(); err != nil {
logger.L().Warning("failed to get latest version", helpers.Interface("error", err))
}
}()
latestVersion, err := v.getLatestVersion(versionData)
if err != nil || latestVersion == nil {
return fmt.Errorf("failed to get latest version")
}
if latestVersion.ClientUpdate != "" {
if BuildNumber != "" && BuildNumber < latestVersion.ClientUpdate {
logger.L().Warning(warningMessage(latestVersion.Client, latestVersion.ClientUpdate))
}
}
// TODO - Enable after supporting framework version
// if latestVersion.FrameworkUpdate != "" {
// fmt.Println(warningMessage(latestVersion.Framework, latestVersion.FrameworkUpdate))
// }
if latestVersion.Message != "" {
logger.L().Info(latestVersion.Message)
}
return nil
}
func (v *VersionCheckHandler) getLatestVersion(versionData *VersionCheckRequest) (*VersionCheckResponse, error) {
reqBody, err := json.Marshal(*versionData)
if err != nil {
return nil, fmt.Errorf("in 'CheckLatestVersion' failed to json.Marshal, reason: %s", err.Error())
}
resp, err := getter.HttpPost(http.DefaultClient, v.versionURL, map[string]string{"Content-Type": "application/json"}, reqBody)
if err != nil {
return nil, err
}
vResp := &VersionCheckResponse{}
if err = getter.JSONDecoder(resp).Decode(vResp); err != nil {
return nil, err
}
return vResp, nil
}
func warningMessage(kind, release string) string {
return fmt.Sprintf("'%s' is not updated to the latest release: '%s'", kind, release)
}

7
clihandler/clidelete.go Normal file
View File

@@ -0,0 +1,7 @@
package clihandler
func CliDelete() error {
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
return tenant.DeleteCachedConfig()
}

178
clihandler/clidownload.go Normal file
View File

@@ -0,0 +1,178 @@
package clihandler
import (
"fmt"
"path/filepath"
"strings"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/kubescape/cautils/logger"
"github.com/armosec/kubescape/cautils/logger/helpers"
)
var downloadFunc = map[string]func(*cautils.DownloadInfo) error{
"controls-inputs": downloadConfigInputs,
"exceptions": downloadExceptions,
"control": downloadControl,
"framework": downloadFramework,
"artifacts": downloadArtifacts,
}
func DownloadSupportCommands() []string {
commands := []string{}
for k := range downloadFunc {
commands = append(commands, k)
}
return commands
}
func CliDownload(downloadInfo *cautils.DownloadInfo) error {
setPathandFilename(downloadInfo)
if err := downloadArtifact(downloadInfo, downloadFunc); err != nil {
return err
}
return nil
}
func downloadArtifact(downloadInfo *cautils.DownloadInfo, downloadArtifactFunc map[string]func(*cautils.DownloadInfo) error) error {
if f, ok := downloadArtifactFunc[downloadInfo.Target]; ok {
if err := f(downloadInfo); err != nil {
return err
}
return nil
}
return fmt.Errorf("unknown command to download")
}
func setPathandFilename(downloadInfo *cautils.DownloadInfo) {
if downloadInfo.Path == "" {
downloadInfo.Path = getter.GetDefaultPath("")
} else {
dir, file := filepath.Split(downloadInfo.Path)
if dir == "" {
downloadInfo.Path = file
} else if strings.Contains(file, ".json") {
downloadInfo.Path = dir
downloadInfo.FileName = file
}
}
}
func downloadArtifacts(downloadInfo *cautils.DownloadInfo) error {
downloadInfo.FileName = ""
var artifacts = map[string]func(*cautils.DownloadInfo) error{
"controls-inputs": downloadConfigInputs,
"exceptions": downloadExceptions,
"framework": downloadFramework,
}
for artifact := range artifacts {
if err := downloadArtifact(&cautils.DownloadInfo{Target: artifact, Path: downloadInfo.Path, FileName: fmt.Sprintf("%s.json", artifact)}, artifacts); err != nil {
logger.L().Error("error downloading", helpers.String("artifact", artifact), helpers.Error(err))
}
}
return nil
}
func downloadConfigInputs(downloadInfo *cautils.DownloadInfo) error {
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetAccountID(), nil)
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetClusterName())
if err != nil {
return err
}
if downloadInfo.FileName == "" {
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
}
// save in file
err = getter.SaveInFile(controlInputs, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
if err != nil {
return err
}
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
return nil
}
func downloadExceptions(downloadInfo *cautils.DownloadInfo) error {
var err error
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
exceptionsGetter := getExceptionsGetter("")
exceptions := []armotypes.PostureExceptionPolicy{}
if tenant.GetAccountID() != "" {
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetClusterName())
if err != nil {
return err
}
}
if downloadInfo.FileName == "" {
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
}
// save in file
err = getter.SaveInFile(exceptions, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
if err != nil {
return err
}
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
return nil
}
func downloadFramework(downloadInfo *cautils.DownloadInfo) error {
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
g := getPolicyGetter(nil, tenant.GetAccountID(), true, nil)
if downloadInfo.Name == "" {
// if framework name not specified - download all frameworks
frameworks, err := g.GetFrameworks()
if err != nil {
return err
}
for _, fw := range frameworks {
err = getter.SaveInFile(fw, filepath.Join(downloadInfo.Path, (strings.ToLower(fw.Name)+".json")))
if err != nil {
return err
}
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("name", fw.Name), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
}
// return fmt.Errorf("missing framework name")
} else {
if downloadInfo.FileName == "" {
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Name)
}
framework, err := g.GetFramework(downloadInfo.Name)
if err != nil {
return err
}
err = getter.SaveInFile(framework, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
if err != nil {
return err
}
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("name", framework.Name), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
}
return nil
}
func downloadControl(downloadInfo *cautils.DownloadInfo) error {
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
g := getPolicyGetter(nil, tenant.GetAccountID(), false, nil)
if downloadInfo.Name == "" {
// TODO - support
return fmt.Errorf("missing control name")
}
if downloadInfo.FileName == "" {
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Name)
}
controls, err := g.GetControl(downloadInfo.Name)
if err != nil {
return err
}
err = getter.SaveInFile(controls, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
if err != nil {
return err
}
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("name", downloadInfo.Name), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
return nil
}

View File

@@ -0,0 +1,19 @@
package cliinterfaces
import (
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/resultshandling/reporter"
"github.com/armosec/opa-utils/reporthandling"
)
type ISubmitObjects interface {
SetResourcesReport() (*reporthandling.PostureReport, error)
ListAllResources() (map[string]workloadinterface.IMetadata, error)
}
type SubmitInterfaces struct {
SubmitObjects ISubmitObjects
Reporter reporter.IReport
ClusterConfig cautils.ITenantConfig
}

Some files were not shown because too many files have changed in this diff Show More