Compare commits

...

225 Commits

Author SHA1 Message Date
David Wertenteil
e09bb2e310 revert dockerfile
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-08-17 14:36:58 +03:00
Daniel Grunberger
f7b3cdcf35 Improve logs (#1349)
* use stop-success

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>

* improve logger

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>

* RBAC

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>

---------

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>
Co-authored-by: Daniel Grunberger <danielgrunberger@armosec.io>
2023-08-17 14:18:40 +03:00
Daniel Grunberger
d6a47a82d2 improve cli output (#1347)
Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>
Co-authored-by: Daniel Grunberger <danielgrunberger@armosec.io>
2023-08-16 13:01:32 +03:00
Daniel Grunberger
936cb26c06 fix panic and improve logs (#1344)
Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>
Co-authored-by: Daniel Grunberger <danielgrunberger@armosec.io>
2023-08-16 13:00:52 +03:00
DRAGON2002
9265a5d6d0 fix: icons formatting (#1343)
Signed-off-by: DRAGON <anantvijay3@gmail.com>
2023-08-16 12:58:55 +03:00
Daniel Grunberger
e6f5c7e0dd bump k8s-interface version (#1345)
* bump version

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>

* bump httphandler

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>

---------

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>
Co-authored-by: Daniel Grunberger <danielgrunberger@armosec.io>
2023-08-15 10:34:45 +03:00
rcohencyberarmor
4e48148d40 Support unified configuration (#1304)
* support scanning scope

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update go mod

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update white list

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update go mod

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* scope empty return control should tested

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update rego scope for system test

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update test + mock

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* add comment

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update rego library

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update k8s-interface

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update opa utils - lots of file changes in this commit since armoapi-go bump up in opa-utils

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* move to temp k8s-interface - till PR in k8s-interface repo will approved

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update k8s-interface with released tag

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update go mod in httphandler

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* support unified configuration

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* unitest adjustment

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* config-unified

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* CR corrections

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* remove system test till it will be merged

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* add relevant system test

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* remove delete test

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* return config delete system test

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

---------

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>
Co-authored-by: rcohencyberarmor <rcohen@armosec.io>
2023-08-15 10:34:23 +03:00
David Wertenteil
3648ef286d Merge pull request #1341 from XDRAGON2002/issue_1339
feat: migrate fatih/color to gchalk
2023-08-13 07:56:05 +03:00
DRAGON
d946662e57 feat: migrate fatih/color to gchalk
Signed-off-by: DRAGON <anantvijay3@gmail.com>
2023-08-11 04:31:39 +05:30
David Wertenteil
51b37d5cbf Update logs (#1340)
* update logger

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* fixed logger

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* bump go-logger version

Signed-off-by: David Wertenteil <dwertent@armosec.io>

---------

Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-08-10 19:15:14 +03:00
DRAGON2002
9afae713ba feat: add table heading colors (#1321)
Signed-off-by: DRAGON <anantvijay3@gmail.com>
2023-08-10 19:14:38 +03:00
Matthias Bertschy
1d64522607 use distroless base image (#1338)
* use distroless base image

Signed-off-by: Matthias Bertschy <matthias.bertschy@gmail.com>

* bump cosign to v2

Signed-off-by: Matthias Bertschy <matthias.bertschy@gmail.com>

---------

Signed-off-by: Matthias Bertschy <matthias.bertschy@gmail.com>
2023-08-10 15:46:07 +03:00
DRAGON2002
225a923006 feat: improve pretty logger (#1311)
* feat: improve pretty logger

Signed-off-by: DRAGON <anantvijay3@gmail.com>

* fixed logger

Signed-off-by: David Wertenteil <dwertent@armosec.io>

---------

Signed-off-by: DRAGON <anantvijay3@gmail.com>
Signed-off-by: Craig Box <craigb@armosec.io>
Signed-off-by: David Wertenteil <dwertent@armosec.io>
Co-authored-by: Craig Box <craigb@armosec.io>
Co-authored-by: David Wertenteil <dwertent@armosec.io>
2023-08-09 17:30:04 +03:00
DRAGON2002
6c1a3fb89b feat: add short table (#1292)
Signed-off-by: DRAGON <anantvijay3@gmail.com>
Signed-off-by: DRAGON2002 <81813720+XDRAGON2002@users.noreply.github.com>
2023-08-09 16:56:58 +03:00
DRAGON2002
df5f7db51d feat: change colors library (#1316)
Signed-off-by: DRAGON <anantvijay3@gmail.com>
2023-08-09 09:48:34 +03:00
DRAGON2002
35c593a624 chore: update docs build.ps1 (#1299)
* chore: update docs build.ps1

Signed-off-by: DRAGON <anantvijay3@gmail.com>

* Fix build.ps1 for CI

Signed-off-by: Songlin Jiang <songlin.jiang@csc.fi>

---------

Signed-off-by: DRAGON <anantvijay3@gmail.com>
Signed-off-by: Songlin Jiang <songlin.jiang@csc.fi>
Co-authored-by: Songlin Jiang <songlin.jiang@csc.fi>
2023-08-09 09:27:35 +03:00
DRAGON2002
869f0ea109 feat: add unicode table (#1285)
Signed-off-by: DRAGON <anantvijay3@gmail.com>
2023-08-09 09:26:37 +03:00
David Wertenteil
cf08daf7fb scan per namespace (#1337)
* scan per namespace

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* disable unit test

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* Adding build image wf

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* removing unused channels

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* adding scopes

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* update

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* fixed cluster size

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* update rbac deps

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* aggregate resources

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* Delete build-image.yaml

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* adding scan image logs

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* update cmd message

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* update logs

Signed-off-by: David Wertenteil <dwertent@armosec.io>

---------

Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-08-08 10:47:15 +03:00
Ben Hirschberg
266029eb23 Implementing container image name normalization built-in function for Rego (#1334)
* Implementing container image name normalization built-in function for Rego

Signed-off-by: Ben <ben@armosec.io>

* updating go.mod t include docker/distribution

Signed-off-by: Ben <ben@armosec.io>

* fix test

Signed-off-by: Ben <ben@armosec.io>

---------

Signed-off-by: Ben <ben@armosec.io>
2023-08-08 09:35:32 +03:00
rcohencyberarmor
4c9fec8ef4 Support scanning scope (#1293)
* support scanning scope

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update go mod

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update white list

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update go mod

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* scope empty return control should tested

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update rego scope for system test

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update test + mock

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* add comment

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update rego library

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update k8s-interface

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update opa utils - lots of file changes in this commit since armoapi-go bump up in opa-utils

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* move to temp k8s-interface - till PR in k8s-interface repo will approved

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update k8s-interface with released tag

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update go mod in httphandler

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* PR review corrections

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* change test name

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* scanning scope support for framework

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* test/mock adjustments after merge

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* add more informative log to the user

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update go.mod and go.sum of the http handler

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* remove framework just scanning scope not matched to framework config scope

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* add system tests to workflow

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* add system test to github workflow

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

---------

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>
Signed-off-by: David Wertenteil <dwertent@armosec.io>
Co-authored-by: rcohencyberarmor <rcohen@armosec.io>
Co-authored-by: David Wertenteil <dwertent@armosec.io>
2023-08-07 19:11:14 +03:00
David Wertenteil
6f07e63d3f Hotfix for version 2.3.8 (#1333)
* update wf

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* fixed tag

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* build arm64

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* wip: revert release changes

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* wip: adding build-image wf

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* adding platforms to wf

Signed-off-by: David Wertenteil <dwertent@armosec.io>

---------

Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-08-06 12:23:49 +03:00
David Wertenteil
addd66bf72 Merge pull request #1327 from dwertent/hot-fix-submit-timestamp
Fix submit time
2023-08-04 19:24:27 +03:00
Amir Malka
e2f96200e0 Code refactor (follow up to PR #1300) (#1323)
* code refactor

Signed-off-by: Amir Malka <amirm@armosec.io>

* use scaninfo object in resource handler

Signed-off-by: Amir Malka <amirm@armosec.io>

---------

Signed-off-by: Amir Malka <amirm@armosec.io>
2023-08-03 17:50:33 +03:00
David Wertenteil
f799b63684 Merge pull request #1331 from kubescape/fix-httphandler-go-mod-anchore
fix(httphandler): pin breaking anchore dependency
2023-08-03 17:49:41 +03:00
Vlad Klokun
a088219954 fix(httphandler): pin breaking anchore dependency
Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>
2023-08-03 17:36:27 +03:00
David Wertenteil
1a2e16b895 Update PR workflow (#1330)
* fixed wf call

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* do not wait for pr checks

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* fixed typo

Signed-off-by: David Wertenteil <dwertent@armosec.io>

---------

Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-08-03 17:25:22 +03:00
David Wertenteil
7444acae11 Merge pull request #1312 from XDRAGON2002/issue_1282
fix: negative compliance score
2023-08-03 14:32:47 +03:00
David Wertenteil
8294694e09 Merge pull request #1277 from XDRAGON2002/issue_1176
fix: kubescape list controls
2023-08-03 14:30:12 +03:00
David Wertenteil
12d7f18b79 Merge pull request #1329 from kubescape/codesee-wf
Update codesee-arch-diagram.yml
2023-08-03 14:05:34 +03:00
David Wertenteil
83279484bd Merge pull request #1328 from kubescape/remove-label-condition
Remove label condition in PR scanner workflow
2023-08-03 14:05:08 +03:00
David Wertenteil
ba134ebc32 Update codesee-arch-diagram.yml
Run codesee only on `.go` files

Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-08-03 13:52:27 +03:00
David Wertenteil
b44f0a76c9 Update 00-pr-scanner.yaml
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-08-03 13:49:34 +03:00
David Wertenteil
226b4772a2 fix submit time
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-08-03 13:26:49 +03:00
Daniel Grunberger
5379b9b0a6 New output (#1320)
* phase-1

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>

* factory

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>

* wip: feat(cli): add an image scanning command

Add a CLI command that launches an image scan. Does not scan images yet.

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

* wip: feat: add image scanning service

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

* chore: include dependencies

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

* wip: adjust image scanning service

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

* wip: feat: use scanning service in CLI

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

* use iface

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>

* touches

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>

* continue

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>

* add cmd

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>

* support single workload scan

Signed-off-by: Amir Malka <amirm@armosec.io>

* fix conflict

Signed-off-by: Amir Malka <amirm@armosec.io>

* identifiers

* go mod

* feat(imagescan): add an image scanning command

This commit adds a CLI command and an associated package that scan
images for vulnerabilities.

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

feat(imagescan): fail on exceeding the severity threshold

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

* chore(imagescan): include dependencies

This commit adds the dependencies necessary for image scanning.

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

* chore(imagescan): add dependencies to httphandler

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

* added unit tests

Signed-off-by: Amir Malka <amirm@armosec.io>

* merge

* more

* integrate img scan

* added unit tests

Signed-off-by: Amir Malka <amirm@armosec.io>

* more refactoring

Signed-off-by: Amir Malka <amirm@armosec.io>

* add scanned workload reference to opasessionobj

Signed-off-by: Amir Malka <amirm@armosec.io>

* fix GetWorkloadParentKind

Signed-off-by: Amir Malka <amirm@armosec.io>

* remove namespace argument from pullSingleResource, using field selector instead

Signed-off-by: Amir Malka <amirm@armosec.io>

* removed designators (unused) field from PolicyIdentifier, and designators argument from GetResources function

Signed-off-by: Amir Malka <amirm@armosec.io>

* changes

* changes

* fixes

* changes

* feat(imagescan): add an image scanning command

This commit adds a CLI command and an associated package that scan
images for vulnerabilities.

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

feat(imagescan): fail on exceeding the severity threshold

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

* chore(imagescan): include dependencies

This commit adds the dependencies necessary for image scanning.

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

* chore(imagescan): add dependencies to httphandler

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

* chore(imagescan): create vuln db with dedicated function

Remove commented out code, too.

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

* docs(imagescan): provide package-level docs

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

* finish merge

* image scan tests

* continue

* fixes

* refactor

* rm duplicate

* start fixes

* update gh actions

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* pr fixes

* fix test

* improvements

---------

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>
Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>
Signed-off-by: Amir Malka <amirm@armosec.io>
Signed-off-by: David Wertenteil <dwertent@armosec.io>
Co-authored-by: Daniel Grunberger <danielgrunberger@armosec.io>
Co-authored-by: Vlad Klokun <vklokun@protonmail.ch>
Co-authored-by: Amir Malka <amirm@armosec.io>
Co-authored-by: David Wertenteil <dwertent@armosec.io>
2023-08-03 12:09:33 +03:00
David Wertenteil
98f68d8097 Merge pull request #1319 from kubescape/codesee-arch-diagram-workflow-1690964652908
Install the CodeSee workflow.
2023-08-03 10:14:47 +03:00
David Wertenteil
f8057b5c79 Merge pull request #1322 from kubescape/add-ai-workflow
Adding pr-agent
2023-08-02 16:29:25 +03:00
David Wertenteil
f36d8c31b0 Adding pr-agent
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-08-02 16:27:16 +03:00
Vlad Klokun
3abf18acb7 Merge pull request #1288 from kubescape/feat-image-scan-svc
feat: add an image scanning service and CLI command
2023-08-02 14:03:50 +03:00
codesee-maps[bot]
28200b2744 Install the CodeSee workflow. Learn more at https://docs.codesee.io 2023-08-02 08:24:13 +00:00
David Wertenteil
678f21e33c Merge pull request #1317 from kubescape/add-prints-to-smoketest
add prints to smoketest
2023-08-02 09:55:43 +03:00
Amir Malka
467a84ddac add prints to smoketest
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-08-02 09:52:01 +03:00
Vlad Klokun
925145724e docs(imagescan): provide package-level docs
Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>
2023-08-02 09:50:18 +03:00
Vlad Klokun
e3677fc45c chore(imagescan): create vuln db with dedicated function
Remove commented out code, too.

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>
2023-08-02 09:50:17 +03:00
Vlad Klokun
704de5bfc1 chore(imagescan): add dependencies to httphandler
Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>
2023-08-02 09:50:17 +03:00
Vlad Klokun
2494c1971c chore(imagescan): include dependencies
This commit adds the dependencies necessary for image scanning.

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>
2023-08-02 09:50:17 +03:00
Vlad Klokun
3b8bd7735e feat(imagescan): add an image scanning command
This commit adds a CLI command and an associated package that scan
images for vulnerabilities.

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>

feat(imagescan): fail on exceeding the severity threshold

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>
2023-08-02 09:50:17 +03:00
David Wertenteil
602591e7f2 Merge pull request #1315 from kubescape/remove-workload-cmd
remove scan workload command
2023-08-02 08:44:15 +03:00
Amir Malka
e276e54d2b remove scan workload command
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-08-01 14:11:16 +03:00
Amir Malka
0c019819ff Scanning a single resource (#1300)
* add cmd

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>

* support single workload scan

Signed-off-by: Amir Malka <amirm@armosec.io>

* fix conflict

Signed-off-by: Amir Malka <amirm@armosec.io>

* added unit tests

Signed-off-by: Amir Malka <amirm@armosec.io>

* added unit tests

Signed-off-by: Amir Malka <amirm@armosec.io>

* more refactoring

Signed-off-by: Amir Malka <amirm@armosec.io>

* add scanned workload reference to opasessionobj

Signed-off-by: Amir Malka <amirm@armosec.io>

* fix GetWorkloadParentKind

Signed-off-by: Amir Malka <amirm@armosec.io>

* remove namespace argument from pullSingleResource, using field selector instead

Signed-off-by: Amir Malka <amirm@armosec.io>

* removed designators (unused) field from PolicyIdentifier, and designators argument from GetResources function

Signed-off-by: Amir Malka <amirm@armosec.io>

* fix tests

Signed-off-by: Amir Malka <amirm@armosec.io>

* use ScanObject instead of workload identifier

Signed-off-by: Amir Malka <amirm@armosec.io>

* refactor logic after CR

Signed-off-by: Amir Malka <amirm@armosec.io>

---------

Signed-off-by: Daniel Grunberger <danielgrunberger@armosec.io>
Signed-off-by: Amir Malka <amirm@armosec.io>
Co-authored-by: Daniel Grunberger <danielgrunberger@armosec.io>
2023-08-01 14:07:31 +03:00
David Wertenteil
d9e946cf6d reset head (#1306)
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-08-01 10:47:07 +03:00
David Wertenteil
e3a8ebfe05 Merge pull request #1297 from dwertent/update-armo-docs
docs(providers): Update ARMO docs
2023-07-31 19:37:01 +03:00
David Wertenteil
fd3703b21b Merge pull request #1296 from kubescape/error-handle-for-empty-resource-scan
Error handle for empty resource scan
2023-07-31 16:13:42 +03:00
David Wertenteil
6bcdda7d56 Merge pull request #1309 from amirmalka/bump-dependencies
bump opa-utils
2023-07-31 12:11:26 +03:00
Amir Malka
981430d65f bump opa-utils
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-07-31 12:00:52 +03:00
David Wertenteil
e91ec69832 Merge pull request #1307 from amirmalka/bump-dependencies
Bump dependencies
2023-07-31 11:02:05 +03:00
Amir Malka
bbfa5d356a bump opa-utils, k8s-interface and armoapi-go
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-07-31 10:39:03 +03:00
DRAGON
d2af7f47db fix: negative compliance score
Signed-off-by: DRAGON <anantvijay3@gmail.com>
2023-07-31 00:21:01 +05:30
rcohencyberarmor
d28afcb00c linter correction
Signed-off-by: rcohencyberarmor <rcohen@armosec.io>
2023-07-30 15:46:54 +03:00
rcohencyberarmor
ca6bdb0bef review corrections
Signed-off-by: rcohencyberarmor <rcohen@armosec.io>
2023-07-30 12:06:03 +03:00
Raziel Cohen
e424bfa81b Merge branch 'master' of github.com:kubescape/kubescape into error-handle-for-empty-resource-scan 2023-07-30 11:21:53 +03:00
David Wertenteil
9f1ff4c090 Merge pull request #1279 from XDRAGON2002/issue_760
feat: add build.ps1
2023-07-25 14:41:00 +03:00
David Wertenteil
1a2dda700b Merge pull request #1291 from XDRAGON2002/issue_1290
fix: yamlhandler error handling
2023-07-25 14:39:26 +03:00
rcohencyberarmor
c4e5611c7f add print in the cli to which version the kubescape was update (#1295)
* add print in the cli to which version the kubescape was update

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

* update will suggest to our user to update by following kubescape installation guide

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>

---------

Signed-off-by: rcohencyberarmor <rcohen@armosec.io>
Co-authored-by: rcohencyberarmor <rcohen@armosec.io>
2023-07-25 14:37:44 +03:00
DRAGON
d8e913fb9f feat: add build.ps1
Signed-off-by: DRAGON <anantvijay3@gmail.com>
2023-07-25 14:36:57 +05:30
David Wertenteil
a37b1f7319 update armo docs
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-25 11:01:48 +03:00
rcohencyberarmor
b730ef5154 git recognition for empty directory for test
Signed-off-by: rcohencyberarmor <rcohen@armosec.io>
2023-07-24 17:24:13 +03:00
rcohencyberarmor
3280173e95 add error handle when there are no scan to trigger since the directory not contain any relevant scanning files
Signed-off-by: rcohencyberarmor <rcohen@armosec.io>
2023-07-24 17:17:06 +03:00
DRAGON
d0ae4f1c1a fix: yamlhandler error handling
Signed-off-by: DRAGON <anantvijay3@gmail.com>
2023-07-22 13:26:40 +05:30
Vlad Klokun
e4faad8284 Merge pull request #1287 from XDRAGON2002/issue_1255
fix: --- kubescape fix
2023-07-21 21:19:04 +03:00
Vlad Klokun
bc131efd91 tests(fixhandler): remove tests of an unexported sanitization method
Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>
2023-07-21 20:29:04 +03:00
Vlad Klokun
4763f0d69d docs(fixhandler): follow Go Doc comments convention in sanitization func
Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>
2023-07-21 20:28:18 +03:00
Vlad Klokun
22c412ce7f refactor(fixhandler): sanitize YAML inside ApplyFixToContent
External observers don’t need to be aware of the fact we need to
sanitize leading document separators in YAML files. This should be
hidden inside our public function - `ApplyFixToContent()`.

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>
2023-07-21 20:17:33 +03:00
Vlad Klokun
1503e984f8 tests(fixhandler): fail test if unable to open test data file
Previously when there was a typo in a test file name, we silently
failed. This commit makes the test explicitly fail if a test data file
was not found.

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>
2023-07-21 19:55:03 +03:00
Vlad Klokun
a4478ba899 style(fixhandler): newlines and spacing
Ran with `go fmt`.

Signed-off-by: Vlad Klokun <vklokun@protonmail.ch>
2023-07-21 19:45:43 +03:00
David Wertenteil
fcbcb53995 Merge pull request #1276 from amirmalka/time-based-cached-policies
Time-based cached policies
2023-07-20 16:56:39 +03:00
YiscahLevySilas1
17c43fd366 support related objects (#1272)
* support related objects

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update pkg versions

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update go mod

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* fix test

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* fix test

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* only add ids of related resource

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* fixes following review

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* add test for processRule

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

---------

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-07-20 16:23:58 +03:00
YiscahLevySilas1
d44746cb85 allow adding a fw name when running all (#1286)
* allow adding a fw name when running all

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

clean code

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* fix following review

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

---------

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-07-20 14:07:38 +03:00
DRAGON
912035662b fix: --- kubescape fix
Signed-off-by: DRAGON <anantvijay3@gmail.com>
2023-07-20 00:05:23 +05:30
Matthias Bertschy
61dac76369 Merge pull request #1283 from kubescape/remove-website
Remove website folder
2023-07-19 16:29:34 +02:00
Amir Malka
bacf15eeb8 cache control inputs
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-07-18 15:56:16 +03:00
Craig Box
0a5af235e3 Remove website folder
Signed-off-by: Craig Box <craigb@armosec.io>
2023-07-17 20:09:34 +12:00
David Wertenteil
6fec02caff Merge pull request #1281 from XDRAGON2002/issue_1280
fix: stuck spinner
2023-07-17 09:27:26 +03:00
DRAGON
067655d003 fix: stuck spinner
Signed-off-by: DRAGON <anantvijay3@gmail.com>
2023-07-14 01:24:46 +05:30
DRAGON
d55a74c6b2 fix: kubescape list controls
Signed-off-by: DRAGON <anantvijay3@gmail.com>
2023-07-11 21:08:24 +05:30
Amir Malka
e470fce6ed initial implementation of OpenTelemetry metrics collection (#1269)
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-07-10 14:22:26 +03:00
Amir Malka
ea3172eda6 time-based cached policies
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-07-10 10:54:56 +03:00
David Wertenteil
f060d02fbc Merge pull request #1267 from dwertent/submit-untracked-files
feat(file scanning): Submit untracked files
2023-07-06 09:40:21 +03:00
David Wertenteil
43975ddafe Merge pull request #1266 from batazor/patch-1
Update grafana-kubescape-dashboard.json
2023-07-06 09:40:06 +03:00
David Wertenteil
abe0477249 Merge pull request #1265 from dwertent/update-submit-message
Update submit message
2023-07-06 09:39:04 +03:00
David Wertenteil
5f197eb27c submit file scanning
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-06 09:25:34 +03:00
Victor Login
84b43d2b03 Update grafana-kubescape-dashboard.json
Signed-off-by: Victor Login <batazor@evrone.com>
2023-07-05 19:03:27 +02:00
David Wertenteil
b149e00d1a Merge pull request #1264 from dwertent/deprecate-image-controls
core(adaptors): Ignore adaptors when credentials are not set
2023-07-05 17:48:12 +03:00
David Wertenteil
f98b394ec2 Merge pull request #1254 from kubescape/rbac-fix
initialize ns in case we don't have one in YAML
2023-07-05 17:47:42 +03:00
David Wertenteil
492b08c995 Merge pull request #1259 from kubescape/update_regolibrary_version
Update regolibrary version
2023-07-05 17:46:35 +03:00
David Wertenteil
8fa15688fb Merge pull request #1260 from dwertent/deprecate-host-scanner
Deprecated host-scanner from CLI
2023-07-05 17:46:12 +03:00
David Wertenteil
1a3e140e56 Merge pull request #1261 from Oshratn/master
English language fix on Kubescape output
2023-07-05 12:59:19 +03:00
David Wertenteil
72f6988bb4 update messaging based on Oshrats comments
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-05 10:40:22 +03:00
David Wertenteil
780be45392 update submit message
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-05 10:38:59 +03:00
David Wertenteil
676771e8b3 deprecate the login flags
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-05 10:16:09 +03:00
David Wertenteil
06f5c24b7d ignore adaptors if credentials are not set
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-05 10:13:21 +03:00
David Wertenteil
c17415d6e9 deprecate host-scan-yaml flag
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-05 09:02:21 +03:00
David Wertenteil
b5bed7bfbb remove unused file
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-05 08:59:57 +03:00
Oshrat Nir
3c38021f7c Changed Assistance Remediation to Assited Remediation
Signed-off-by: Oshrat Nir <oshratn@gmail.com>
2023-07-04 13:13:50 +03:00
David Wertenteil
8989cc1679 Deprecated host-scanner
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-07-04 09:43:10 +03:00
kooomix
0ab9c32715 fxi test jsons 2023-07-02 13:29:28 +03:00
kooomix
868db91801 update regolibrary to v1.0.286-rc.0 2023-07-02 13:25:37 +03:00
Craig Box
aa0fe21a2e Merge pull request #1257 from dwertent/update-armo-landing-page
docs(Installation): update installation steps
2023-06-27 08:22:58 +01:00
David Wertenteil
1b181a47ef Update docs/providers/armo.md
Co-authored-by: Craig Box <craig.box@gmail.com>
2023-06-27 07:42:42 +03:00
David Wertenteil
30487dcd0e Update docs/providers/armo.md
Co-authored-by: Craig Box <craig.box@gmail.com>
2023-06-27 07:42:33 +03:00
David Wertenteil
46ad069fe5 Updating overview
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-06-26 13:54:00 +03:00
David Wertenteil
05d5de17d5 fixed wording
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-06-26 09:49:00 +03:00
David Wertenteil
6bc79458b0 Split the installation command from scanning
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-06-26 09:46:13 +03:00
David Wertenteil
ab85ca2b28 update installation steps
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-06-26 09:40:21 +03:00
Matthias Bertschy
99938ecbee initialize ns in case we don't have one in YAML
Signed-off-by: Matthias Bertschy <matthias.bertschy@gmail.com>
2023-06-19 07:47:29 +02:00
Matthias Bertschy
e2f8e273ad Merge pull request #1252 from testwill/ioutil
chore: remove refs to deprecated io/ioutil
2023-06-15 08:00:05 +02:00
guoguangwu
be63e1ef7c chore: remove refs to deprecated io/ioutil
Signed-off-by: guoguangwu <guoguangwu@magic-shield.com>
2023-06-14 16:33:24 +08:00
guangwu
5e5b9d564c fix: CVE-2023-28840 CVE-2023-28841 CVE-2023-28842 CVE-2022-41723 etc. (#1221)
* fix: CVE-2023-28840 CVE-2023-28841 CVE-2023-28842 CVE-2022-41723GHSA-vvpx-j8f3-3w6h CVE-2022-23524 CVE-2022-23525 CVE-2022-23526 CVE-2022-36055 CVE-2023-25165

Signed-off-by: guoguangwu <guoguangwu@magic-shield.com>

* restore go.sum

Signed-off-by: David Wertenteil <dwertent@armosec.io>

---------

Signed-off-by: guoguangwu <guoguangwu@magic-shield.com>
Signed-off-by: David Wertenteil <dwertent@armosec.io>
Co-authored-by: David Wertenteil <dwertent@armosec.io>
2023-06-13 11:39:25 +03:00
YiscahLevySilas1
8ee72895b9 Fix statuses - Manual review and Requires configuration (#1251)
* fix statuses - req. review, configurations, manual

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update opa-utils version

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update opa-utils version

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update opa-utils version

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* use const for inner info

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

---------

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-06-12 10:38:35 +03:00
Nitish Chauhan
6cefada215 correcting the formating of the table in pdf output (#1244)
* correcting the formatting of the table in pdf output

Signed-off-by: ntishchauhan0022 <nitishchauhan0022@gmail.com>

* adding some starting unit tests

Signed-off-by: ntishchauhan0022 <nitishchauhan0022@gmail.com>

* resolving the mod error

Signed-off-by: ntishchauhan0022 <nitishchauhan0022@gmail.com>

---------

Signed-off-by: ntishchauhan0022 <nitishchauhan0022@gmail.com>
2023-06-04 15:21:07 +03:00
David Wertenteil
211ee487b3 core(metrics api): Update API default behavior (#1250)
* scan default frameworks

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* wip: update context

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* do not trigger host scan

Signed-off-by: David Wertenteil <dwertent@armosec.io>

* adding unitests

Signed-off-by: David Wertenteil <dwertent@armosec.io>

---------

Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-06-04 15:12:31 +03:00
David Wertenteil
bbe46c9fab Merge pull request #1247 from kubescape/fix/remove-kubelet-command-line-endpoint
fix: remove deprecated endpoint
2023-05-31 17:17:09 +03:00
Alessio Greggi
ce7fde582c fix: update host-scanner version
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-05-31 14:14:29 +02:00
David Wertenteil
1c31e1f015 Merge pull request #1246 from dwertent/cli-updates
core(cmd): Minor CLI updates
2023-05-30 11:55:38 +03:00
Alessio Greggi
9e2fe607d8 fix: remove deprecated endpoint
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-05-30 10:50:31 +02:00
David Wertenteil
5a5ec9b641 wip: remove secretKey and clientID from list cmd
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-05-28 22:37:48 +03:00
David Wertenteil
24c608e204 wip: add example for exclude-namespaces flag
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-05-28 22:35:45 +03:00
David Wertenteil
ac43036b4a Merge pull request #1241 from kubescape/hostsensor-improvements
host-scanner improvements
2023-05-28 08:57:11 +03:00
David Wertenteil
03b89047f8 Merge pull request #1243 from anubhav06/update-go-git-url
update kubescape/go-git-url version
2023-05-28 08:46:22 +03:00
Anubhav Gupta
07a5c6488b update kubescape/go-git-url version
Signed-off-by: Anubhav Gupta <mail.anubhav06@gmail.com>
2023-05-26 18:13:39 +05:30
Alessio Greggi
c486b4fed7 feat: add log coupling for hostsensorutils
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-05-24 14:46:34 +02:00
Alessio Greggi
00c48d756d fix(hostsensorutils): add finalizers deletion
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-05-24 11:49:15 +02:00
Alessio Greggi
b49563ae8c fix(hostsensorutils): reduce periods of readiness probe
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-05-24 11:34:04 +02:00
Alessio Greggi
7840ecb5da fix: move host-scanner to kubescape namespace
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-05-24 09:45:12 +02:00
David Wertenteil
e151c5bf81 Merge pull request #1240 from amirmalka/memory-optimizations
bump opa-utils version for memory optimizations
2023-05-23 20:37:27 +03:00
Amir Malka
225545476c update opa-utils
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-05-23 19:18:45 +03:00
Amir Malka
987f97102d bump opa-utils version for memory optimizations
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-05-22 16:44:11 +03:00
Craig Box
7bffed2afe Merge pull request #1239 from yrs147/master 2023-05-18 16:39:25 +03:00
Hollow Man
3357713903 Add back new line at the end of the file
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-18 13:38:35 +03:00
Yash Raj Singh
efd48eab08 Removed instructions and added the wiki link
Co-authored-by: Hollow Man <hollowman@opensuse.org>

Signed-off-by: Yash Raj Singh <98258627+yrs147@users.noreply.github.com>
2023-05-18 13:38:27 +03:00
Yash Raj
231d9c231a Added instructions to setup kubescape locally
Signed-off-by: Yash Raj <yashraj14700728@gmail.com>
2023-05-18 13:34:13 +05:30
Craig Box
91e705a3eb Merge pull request #1238 from HollowMan6/macm1 2023-05-17 16:37:13 +03:00
Songlin Jiang
a92d573cb8 Fix downloading arm64 binary for kubescape
Signed-off-by: Songlin Jiang <songlin.jiang@csc.fi>
2023-05-17 15:34:32 +03:00
David Wertenteil
e8c72b9883 Merge pull request #1235 from kubescape/revert-1213-windows-latest
Revert "Deprecate kubescape-windows-latest"
2023-05-16 10:07:39 +03:00
David Wertenteil
d380b2cb00 Revert "Deprecate kubescape-windows-latest" 2023-05-16 10:03:41 +03:00
Yuval Leibovich
50b3d0f313 Merge pull request #1233 from kubescape/update-compliance-score-with-readme
updating readme file to support compliance
2023-05-15 17:31:40 +03:00
David Wertenteil
474b6d07ed Merge pull request #1234 from kubescape/timeout
start with a new context, extracting span from request
2023-05-15 16:47:25 +03:00
Matthias Bertschy
2cddc4b395 start with a new context, extracting span from request
Signed-off-by: Matthias Bertschy <matthias.bertschy@gmail.com>
2023-05-15 14:55:46 +02:00
Yuval Leibovich
b5fb355a22 updating readme file to support compliance 2023-05-15 15:31:14 +03:00
David Wertenteil
d1bc6d0190 Merge pull request #1213 from HollowMan6/windows-latest
Deprecate kubescape-windows-latest
2023-05-15 13:23:32 +03:00
Amir Malka
0a0ef10d50 Control parallelism of opa rule processing by env var (#1230)
* control parallelism of opa rule processing by env var

Signed-off-by: Amir Malka <amirm@armosec.io>

* go 1.20

Signed-off-by: Amir Malka <amirm@armosec.io>

* update go.mod go.sum

Signed-off-by: Amir Malka <amirm@armosec.io>

---------

Signed-off-by: Amir Malka <amirm@armosec.io>
2023-05-14 14:59:21 +03:00
David Wertenteil
4523dc8456 Merge pull request #1232 from amirmalka/update-golang-version
Update go version 1.19->1.20
2023-05-14 11:18:18 +03:00
Amir Malka
b26f83d0bd update go version 1.19->1.20
Signed-off-by: Amir Malka <amirm@armosec.io>
2023-05-14 10:04:30 +03:00
David Wertenteil
9cc3053d74 Merge pull request #1214 from HollowMan6/dispatch
Add ref to workflow dispatch
2023-05-11 15:13:24 +01:00
Yuval Leibovich
84842a6a91 Merge pull request #1219 from kubescape/add-system-test
add compliance score system test
2023-05-08 10:26:00 +03:00
YiscahLevySilas1
aff8cc480e add compliance score system test
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-05-07 16:07:39 +03:00
David Wertenteil
7feea43421 Merge pull request #1216 from HollowMan6/install
Make powershell Windows installation user path available immediately
2023-05-03 10:58:47 +03:00
David Wertenteil
04ec32c9f4 Merge pull request #1218 from dwertent/hot-fix-2.3.0
fix(adaptors) Check response size
2023-05-03 10:05:17 +03:00
David Wertenteil
b805f22038 add test
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-05-03 08:58:12 +03:00
David Wertenteil
092f37a636 if the response is empty, return an empty string
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-05-03 08:39:59 +03:00
Hollow Man
9a2eb46f65 Make powershell Windows installation user path available immediately
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-02 00:23:01 +03:00
Hollow Man
c637c1a589 Add ref to workflow dispatch
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-01 22:07:50 +03:00
David Wertenteil
7609a4aa5d Merge pull request #1199 from HollowMan6/install
Update installation script
2023-05-01 21:53:16 +03:00
Hollow Man
75d31c22d9 Deprecate kubescape-windows-latest
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-01 17:38:46 +03:00
David Wertenteil
b93a97a8c8 Merge pull request #1186 from HollowMan6/dispatch
Invoke packaging workflow to update after release
2023-05-01 16:45:10 +03:00
David Wertenteil
88696ca233 Merge pull request #1169 from HollowMan6/exe
Add kubescape.exe to the release assets
2023-05-01 16:40:04 +03:00
David Wertenteil
87d94d16ff Merge pull request #1212 from kubescape/token
change basic auth username to x-token-auth
2023-05-01 14:56:36 +03:00
Hollow Man
1843bcdaf8 invoke only if the repository owner is kubescape
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-01 13:46:39 +03:00
Hollow Man
cdaff7ddbe Revert install.ps1 change, to update after release
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-01 13:24:40 +03:00
Hollow Man
ec7bc26f64 Add kubescape.exe to the release assets
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-05-01 13:24:35 +03:00
Matthias Bertschy
75b64d58f3 change basic auth username to x-token-auth
Signed-off-by: Matthias Bertschy <matthias.bertschy@gmail.com>
2023-05-01 10:55:07 +02:00
David Wertenteil
dce1d762c6 Merge pull request #1209 from kubescape/new-threshold-flag
Add compliance score to controls
2023-05-01 11:23:20 +03:00
YiscahLevySilas1
f3225855d0 rerun workflows
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-05-01 09:19:09 +03:00
David Wertenteil
5ae421dbc2 Merge pull request #1210 from HollowMan6/master
ci: update before install packages
2023-04-30 15:09:55 +03:00
Hollow Man
d4b75dcb0c ci: update before install packages
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-27 15:50:21 +00:00
YiscahLevySilas1
b7935276e3 Merge branch 'master' of github.com:kubescape/kubescape into new-threshold-flag
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-27 15:56:12 +03:00
YiscahLevySilas1
d6edd818b8 add compliance score to new field in controls for backward compatibility
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-27 15:53:47 +03:00
David Wertenteil
a73081c816 Merge pull request #1203 from kubescape/fix/remove-outdated-endpoints
fix: remove outdated enpoints
2023-04-27 11:23:18 +03:00
David Wertenteil
dd961b9e55 Merge pull request #1208 from kubescape/fix/hostsensor-http-probe-attributes
fix(hostsensorutils): fix indentation of probe attributes
2023-04-27 11:22:50 +03:00
Alessio Greggi
76ced13a26 fix(hostsensorutils): fix indentation of probe attributes
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-04-26 16:16:29 +02:00
YiscahLevySilas1
95e88f8581 add compliance-threshold, deprecate fail-threshold (#1197)
* add compliance-threshold, deprecate fail-threshold

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update opa-utils version

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

* update opa-utils version for fix in compliance score

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>

---------

Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-24 15:33:30 +03:00
David Wertenteil
5955247f01 Merge pull request #1207 from dwertent/fix-workflow
fix(workflow): Fix workflow
2023-04-23 13:26:48 +03:00
David Wertenteil
c0530b4f88 wip: fixed github actions
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-04-23 11:47:54 +03:00
David Wertenteil
c23d6a17cc wip: update fix command example
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-04-23 11:45:46 +03:00
David Wertenteil
d448de131f Merge pull request #1148 from HollowMan6/master
arm64 release binaries for CI and Krew
2023-04-23 09:48:05 +03:00
Alessio Greggi
b48c04da63 fix: remove outdated enpoints
Signed-off-by: Alessio Greggi <ale_grey_91@hotmail.it>
2023-04-21 19:47:24 +02:00
David Wertenteil
ecf770c756 Merge pull request #1189 from dwertent/hotfix-2.2.6-add_data
fix(submit): set default report time
2023-04-20 16:39:51 +03:00
Hollow Man
6e33f37aee Update installation script
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-20 13:05:51 +03:00
Hollow Man
03f792e968 Revert change to install.sh
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-20 11:21:34 +03:00
David Wertenteil
b017d77b86 Merge pull request #1184 from HollowMan6/sarif-fix
feat(sarif): add fix object in generated reports
2023-04-20 11:15:49 +03:00
Craig Box
2cde591180 Merge pull request #1196 from HollowMan6/doc
Move building instructions to wiki, add more installation instructions
2023-04-20 14:55:52 +12:00
YiscahLevySilas1
f25d573f32 update opa-utils version for fix in compliance score
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-19 18:34:10 +03:00
Hollow Man
ebf3e49f53 Update snap installation
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-18 02:02:43 +03:00
YiscahLevySilas1
acaf6e78da update opa-utils version
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-17 20:25:27 +03:00
YiscahLevySilas1
344e9188f6 add compliance-threshold, deprecate fail-threshold
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-17 16:08:38 +03:00
Hollow Man
3f69f06df1 Move Building to wiki and installation back to docs
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-17 14:10:47 +03:00
Hollow Man
e0b296c124 Move installation instructions to wiki
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-16 19:35:11 +03:00
David Wertenteil
108bbd8bc4 Merge pull request #1193 from suhasgumma/master
Fix: Empty Frameworks Column when listing controls
2023-04-16 09:13:38 +03:00
Hollow Man
5c1a41e920 nit
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-13 10:54:25 +03:00
Hollow Man
0b8d207615 Add more error check
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-13 10:54:25 +03:00
Hollow Man
539b6c51b9 Add unit testcase
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-13 10:54:25 +03:00
Hollow Man
19ca590e2f S1023: redundant break statement (gosimple)
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-13 10:54:25 +03:00
Hollow Man
4de50f82c0 feat(sarif): add fix object in generated reports
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-13 10:54:19 +03:00
Hollow Man
ab41d5dbf4 Invoke workflow to update github action
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-12 22:33:02 +03:00
Matthias Bertschy
fa6de6dc3f Merge pull request #1192 from Sindhuinti/Sindhuinti/broken-link
fix: broken link
2023-04-11 14:14:27 +02:00
Suhas Gumma
96e959c3b7 Fix: Empty Frameworks Column when listing controls
Signed-off-by: Suhas Gumma <suhasgumma2001@gmail.com>
2023-04-11 15:20:07 +05:30
Sindhu Inti
28fdee0dd2 fix: broken link
Signed-off-by: Sindhuinti <iamsindhuinti23@gmail.com>
2023-04-11 13:10:31 +05:30
Sindhu Inti
9ce25c45fe fix: broken link 2023-04-11 13:01:49 +05:30
MathoAvito
d44b9f7a31 Change wf (#1190)
* added coveralls coverage tests 

Signed-off-by: Matan Avital <matavital13@gmail.com>
2023-04-09 18:29:23 +03:00
David Wertenteil
c7af6266fd Merge pull request #1185 from HollowMan6/fix-changes
fix(fix): mixed up change summary list
2023-04-09 11:19:44 +02:00
David Wertenteil
91c13381b2 set default report time
Signed-off-by: David Wertenteil <dwertent@armosec.io>
2023-04-09 11:15:06 +02:00
Hollow Man
30ad3adbb6 Invoke workflow to update after release
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 15:49:25 +03:00
Hollow Man
64e3b08641 fix(fix): mixed up change list
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 01:07:49 +03:00
Hollow Man
6d7a89bb74 Add ARM64 binary installation
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
e8d92ffd43 Resume test core pkg under ubuntu arm64
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
48a15e1a8d Disable multi-platform test with commits
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
d02f15ef6f merge pr scanner build into binary-build-and-e2e-tests
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
9327f70e1a Fix naming
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
c498026208 Disable core pkg test for ubuntu arm64
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
3238555df3 add cross compilation for ubuntu arm64
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
Hollow Man
0c77d89bfc add cross compilation for mac m1
Signed-off-by: Hollow Man <hollowman@opensuse.org>
2023-04-07 00:47:40 +03:00
yuleib
875deb7ec3 adding compliance score updates (#1181)
Signed-off-by: Yuval Leibovich <yuvall@armosec.io>
2023-04-04 16:03:40 +03:00
David Wertenteil
eae234136b Merge pull request #1178 from YiscahLevySilas1/update-k8s-interface
update version k8s-interface for cloud resources
2023-04-03 13:54:02 +03:00
YiscahLevySilas1
93a35fffbd comment failing test because of many requests
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-03 10:45:51 +03:00
YiscahLevySilas1
9a3767ef72 update version k8s-interface for cloud resources
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-03 09:53:22 +03:00
YiscahLevySilas1
9420fd5e79 update version k8s-interface for cloud resources
Signed-off-by: YiscahLevySilas1 <yiscahls@armosec.io>
2023-04-03 09:34:22 +03:00
218 changed files with 57054 additions and 5864 deletions

2
.dockerignore Normal file
View File

@@ -0,0 +1,2 @@
git2go
kubescape

View File

@@ -17,7 +17,14 @@ runs:
using: "composite"
steps:
- run: |
SUB='-rc'
if [[ -z "${{ inputs.ORIGINAL_TAG }}" ]]; then
echo "The value of ORIGINAL_TAG is ${{ inputs.ORIGINAL_TAG }}"
echo "Setting the value of ORIGINAL_TAG to ${{ github.ref_name }}"
echo ORIGINAL_TAG="${{ github.ref_name }}" >> $GITHUB_ENV
fi
shell: bash
- run: |
if [[ "${{ inputs.ORIGINAL_TAG }}" == *"${{ inputs.SUB_STRING }}"* ]]; then
echo "Release candidate tag found."
else

View File

@@ -2,12 +2,9 @@ name: 00-pr_scanner
on:
pull_request:
types: [opened, reopened, synchronize, ready_for_review]
branches:
- 'master'
- 'main'
- 'dev'
paths-ignore:
- '**.yaml'
- '**.yml'
- '**.md'
- '**.sh'
- 'website/*'
@@ -29,3 +26,16 @@ jobs:
RELEASE: ""
CLIENT: test
secrets: inherit
binary-build:
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
with:
COMPONENT_NAME: kubescape
CGO_ENABLED: 1
GO111MODULE: ""
GO_VERSION: "1.20"
RELEASE: ""
CLIENT: test
ARCH_MATRIX: '[ "" ]'
OS_MATRIX: '[ "ubuntu-20.04" ]'
secrets: inherit

View File

@@ -1,34 +0,0 @@
name: 01-pr-merged
on:
pull_request_target:
types: [closed]
branches:
- 'master'
- 'main'
paths-ignore:
- '**.yaml'
- '**.md'
- '**.sh'
- 'website/*'
- 'examples/*'
- 'docs/*'
- 'build/*'
- '.github/*'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
binary-build:
if: ${{ github.event.pull_request.merged == true && contains( github.event.pull_request.labels.*.name, 'trigger-integration-test') && github.event.pull_request.base.ref == 'master' }} ## run only if labeled as "trigger-integration-test" and base branch is master
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
with:
COMPONENT_NAME: kubescape
CGO_ENABLED: 1
GO111MODULE: ""
GO_VERSION: "1.19"
RELEASE: ""
CLIENT: test
secrets: inherit

View File

@@ -21,7 +21,7 @@ jobs:
COMPONENT_NAME: kubescape
CGO_ENABLED: 1
GO111MODULE: ""
GO_VERSION: "1.19"
GO_VERSION: "1.20"
RELEASE: ${{ needs.retag.outputs.NEW_TAG }}
CLIENT: release
secrets: inherit

View File

@@ -1,4 +1,4 @@
name: 03-create_release_digests
name: 03-post_release
on:
release:
types: [published]
@@ -6,8 +6,8 @@ on:
- 'master'
- 'main'
jobs:
create_release_digests:
name: Creating digests
post_release:
name: Post release jobs
runs-on: ubuntu-latest
steps:
- name: Digest
@@ -15,3 +15,27 @@ jobs:
with:
hash-type: sha1
file-name: kubescape-release-digests
- name: Invoke workflow to update packaging
uses: benc-uk/workflow-dispatch@v1
if: github.repository_owner == 'kubescape'
with:
workflow: release.yml
repo: kubescape/packaging
ref: refs/heads/main
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
- name: Invoke workflow to update homebrew tap
uses: benc-uk/workflow-dispatch@v1
if: github.repository_owner == 'kubescape'
with:
workflow: release.yml
repo: kubescape/homebrew-tap
ref: refs/heads/main
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
- name: Invoke workflow to update github action
uses: benc-uk/workflow-dispatch@v1
if: github.repository_owner == 'kubescape'
with:
workflow: release.yaml
repo: kubescape/github-action
ref: refs/heads/main
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}

View File

@@ -10,6 +10,10 @@ on:
description: 'Client name'
required: true
type: string
UNIT_TESTS_PATH:
required: false
type: string
default: "./..."
jobs:
scanners:
env:
@@ -25,7 +29,7 @@ jobs:
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # Install go because go-licenses use it ratchet:actions/setup-go@v3
name: Installing go
with:
go-version: '1.19'
go-version: '1.20'
cache: true
- name: Scanning - Forbidden Licenses (go-licenses)
id: licenses-scan
@@ -57,6 +61,21 @@ jobs:
command: test --all-projects
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
- name: Test coverage
id: unit-test
run: go test -v ${{ inputs.UNIT_TESTS_PATH }} -covermode=count -coverprofile=coverage.out
- name: Convert coverage count to lcov format
uses: jandelgado/gcov2lcov-action@v1
- name: Submit coverage tests to Coveralls
continue-on-error: true
uses: coverallsapp/github-action@v1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
path-to-lcov: coverage.lcov
- name: Comment results to PR
continue-on-error: true # Warning: This might break opening PRs from forks
uses: peter-evans/create-or-update-comment@5adcb0bb0f9fb3f95ef05400558bdb3f329ee808 # ratchet:peter-evans/create-or-update-comment@v2.1.0
@@ -68,92 +87,3 @@ jobs:
- Credentials scan: ${{ steps.credentials-scan.outcome }}
- Vulnerabilities scan: ${{ steps.vulnerabilities-scan.outcome }}
reactions: 'eyes'
basic-tests:
needs: scanners
name: Create cross-platform build
runs-on: ${{ matrix.os }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
RELEASE: ${{ inputs.RELEASE }}
CLIENT: ${{ inputs.CLIENT }}
strategy:
matrix:
os: [ubuntu-20.04, macos-latest, windows-latest]
steps:
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
with:
submodules: recursive
- name: Cache Go modules (Linux)
if: matrix.os == 'ubuntu-latest'
uses: actions/cache@69d9d449aced6a2ede0bc19182fadc3a0a42d2b0 # ratchet:actions/cache@v3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Cache Go modules (macOS)
if: matrix.os == 'macos-latest'
uses: actions/cache@69d9d449aced6a2ede0bc19182fadc3a0a42d2b0 # ratchet:actions/cache@v3
with:
path: |
~/Library/Caches/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Cache Go modules (Windows)
if: matrix.os == 'windows-latest'
uses: actions/cache@69d9d449aced6a2ede0bc19182fadc3a0a42d2b0 # ratchet:actions/cache@v3
with:
path: |
~\AppData\Local\go-build
~\go\pkg\mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set up Go
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # ratchet:actions/setup-go@v3
with:
go-version: 1.19
- name: Install MSYS2 & libgit2 (Windows)
shell: cmd
run: .\build.bat all
if: matrix.os == 'windows-latest'
- name: Install pkg-config (macOS)
run: brew install pkg-config
if: matrix.os == 'macos-latest'
- name: Install libgit2 (Linux/macOS)
run: make libgit2
if: matrix.os != 'windows-latest'
- name: Test core pkg
run: go test "-tags=static,gitenabled" -v ./...
- name: Test httphandler pkg
run: cd httphandler && go test "-tags=static,gitenabled" -v ./...
- name: Build
env:
RELEASE: ${{ inputs.RELEASE }}
CLIENT: ${{ inputs.CLIENT }}
CGO_ENABLED: 1
run: python3 --version && python3 build.py
- name: Smoke Testing (Windows / MacOS)
env:
RELEASE: ${{ inputs.RELEASE }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-${{ matrix.os }}
if: matrix.os != 'ubuntu-20.04'
- name: Smoke Testing (Linux)
env:
RELEASE: ${{ inputs.RELEASE }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-ubuntu-latest
if: matrix.os == 'ubuntu-20.04'
- name: golangci-lint
if: matrix.os == 'ubuntu-20.04'
continue-on-error: true
uses: golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5 # ratchet:golangci/golangci-lint-action@v3
with:
version: latest
args: --timeout 10m --build-tags=static
only-new-issues: true

View File

@@ -1,5 +1,45 @@
name: b-binary-build-and-e2e-tests
on:
workflow_dispatch:
inputs:
COMPONENT_NAME:
required: false
type: string
default: "kubescape"
RELEASE:
required: false
type: string
default: ""
CLIENT:
required: false
type: string
default: "test"
GO_VERSION:
required: false
type: string
default: "1.20"
GO111MODULE:
required: false
type: string
default: ""
CGO_ENABLED:
type: number
default: 1
required: false
OS_MATRIX:
type: string
required: false
default: '[ "ubuntu-20.04", "macos-latest", "windows-latest"]'
ARCH_MATRIX:
type: string
required: false
default: '[ "", "arm64"]'
BINARY_TESTS:
type: string
required: false
default: '[ "scan_nsa", "scan_mitre", "scan_with_exceptions", "scan_repository", "scan_local_file", "scan_local_glob_files", "scan_local_list_of_files", "scan_nsa_and_submit_to_backend", "scan_mitre_and_submit_to_backend", "scan_local_repository_and_submit_to_backend", "scan_repository_from_url_and_submit_to_backend", "scan_with_exception_to_backend", "scan_with_custom_framework", "scan_customer_configuration", "host_scanner", "scan_compliance_score" ]'
workflow_call:
inputs:
COMPONENT_NAME:
@@ -13,7 +53,7 @@ on:
type: string
GO_VERSION:
type: string
default: "1.19"
default: "1.20"
GO111MODULE:
required: true
type: string
@@ -22,20 +62,26 @@ on:
default: 1
BINARY_TESTS:
type: string
default: '[ "scan_nsa", "scan_mitre", "scan_with_exceptions", "scan_repository", "scan_local_file", "scan_local_glob_files", "scan_local_list_of_files", "scan_nsa_and_submit_to_backend", "scan_mitre_and_submit_to_backend", "scan_local_repository_and_submit_to_backend", "scan_repository_from_url_and_submit_to_backend", "scan_with_exception_to_backend", "scan_with_custom_framework", "scan_customer_configuration", "host_scanner" ]'
CHECKOUT_REPO:
required: false
default: '[ "scan_nsa", "scan_mitre", "scan_with_exceptions", "scan_repository", "scan_local_file", "scan_local_glob_files", "scan_local_list_of_files", "scan_nsa_and_submit_to_backend", "scan_mitre_and_submit_to_backend", "scan_local_repository_and_submit_to_backend", "scan_repository_from_url_and_submit_to_backend", "scan_with_exception_to_backend", "scan_with_custom_framework", "scan_customer_configuration", "host_scanner", "scan_compliance_score", "scan_custom_framework_scanning_file_scope_testing", "scan_custom_framework_scanning_cluster_scope_testing", "scan_custom_framework_scanning_cluster_and_file_scope_testing", "unified_configuration_config_view", "unified_configuration_config_set", "unified_configuration_config_delete" ]'
OS_MATRIX:
type: string
required: false
default: '[ "ubuntu-20.04", "macos-latest", "windows-latest"]'
ARCH_MATRIX:
type: string
required: false
default: '[ "", "arm64"]'
jobs:
wf-preparation:
name: secret-validator
runs-on: ubuntu-latest
outputs:
TEST_NAMES: ${{ steps.export_tests_to_env.outputs.TEST_NAMES }}
OS_MATRIX: ${{ steps.export_os_to_env.outputs.OS_MATRIX }}
ARCH_MATRIX: ${{ steps.export_arch_to_env.outputs.ARCH_MATRIX }}
is-secret-set: ${{ steps.check-secret-set.outputs.is-secret-set }}
steps:
- name: check if the necessary secrets are set in github secrets
id: check-secret-set
@@ -49,27 +95,46 @@ jobs:
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
run: "echo \"is-secret-set=${{ env.CUSTOMER != '' && \n env.USERNAME != '' &&\n env.PASSWORD != '' &&\n env.CLIENT_ID != '' &&\n env.SECRET_KEY != '' &&\n env.REGISTRY_USERNAME != '' &&\n env.REGISTRY_PASSWORD != ''\n }}\" >> $GITHUB_OUTPUT\n"
- id: export_os_to_env
name: set test name
run: |
echo "OS_MATRIX=$input" >> $GITHUB_OUTPUT
env:
input: ${{ inputs.OS_MATRIX }}
- id: export_tests_to_env
name: set test name
run: |
echo "TEST_NAMES=$input" >> $GITHUB_OUTPUT
env:
input: ${{ inputs.BINARY_TESTS }}
- id: export_arch_to_env
name: set test name
run: |
echo "ARCH_MATRIX=$input" >> $GITHUB_OUTPUT
env:
input: ${{ inputs.ARCH_MATRIX }}
binary-build:
name: Create cross-platform build
needs: wf-preparation
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GOARCH: ${{ matrix.arch }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-20.04, macos-latest, windows-latest]
os: ${{ fromJson(needs.wf-preparation.outputs.OS_MATRIX) }}
arch: ${{ fromJson(needs.wf-preparation.outputs.ARCH_MATRIX) }}
exclude:
- os: windows-latest
arch: arm64
steps:
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
with:
repository: ${{inputs.CHECKOUT_REPO}}
fetch-depth: 0
submodules: recursive
@@ -112,9 +177,26 @@ jobs:
go-version: ${{ inputs.GO_VERSION }}
cache: true
- name: start ${{ matrix.arch }} environment in container
run: |
sudo apt-get update
sudo apt-get install -y binfmt-support qemu-user-static
sudo docker run --platform linux/${{ matrix.arch }} -e RELEASE=${{ inputs.RELEASE }} \
-e CLIENT=${{ inputs.CLIENT }} -e CGO_ENABLED=${{ inputs.CGO_ENABLED }} \
-e KUBESCAPE_SKIP_UPDATE_CHECK=true -e GOARCH=${{ matrix.arch }} -v ${PWD}:/work \
-w /work -v ~/go/pkg/mod:/root/go/pkg/mod -v ~/.cache/go-build:/root/.cache/go-build \
-d --name build golang:${{ inputs.GO_VERSION }}-bullseye sleep 21600
sudo docker ps
DOCKER_CMD="sudo docker exec build"
${DOCKER_CMD} apt update
${DOCKER_CMD} apt install -y cmake python3
${DOCKER_CMD} git config --global --add safe.directory '*'
echo "DOCKER_CMD=${DOCKER_CMD}" >> $GITHUB_ENV;
if: matrix.os == 'ubuntu-20.04' && matrix.arch != ''
- name: Install MSYS2 & libgit2 (Windows)
shell: cmd
run: .\build.bat all
shell: pwsh
run: .\build.ps1 all
if: matrix.os == 'windows-latest'
- name: Install pkg-config (macOS)
@@ -122,35 +204,44 @@ jobs:
if: matrix.os == 'macos-latest'
- name: Install libgit2 (Linux/macOS)
run: make libgit2
run: ${{ env.DOCKER_CMD }} make libgit2${{ matrix.arch }}
if: matrix.os != 'windows-latest'
- name: Test core pkg
run: go test "-tags=static,gitenabled" -v ./...
run: ${{ env.DOCKER_CMD }} go test "-tags=static,gitenabled" -v ./...
if: "!startsWith(github.ref, 'refs/tags') && matrix.os == 'ubuntu-20.04' && matrix.arch == '' || startsWith(github.ref, 'refs/tags') && (matrix.os != 'macos-latest' || matrix.arch != 'arm64')"
- name: Test httphandler pkg
run: cd httphandler && go test "-tags=static,gitenabled" -v ./...
run: ${{ env.DOCKER_CMD }} sh -c 'cd httphandler && go test "-tags=static,gitenabled" -v ./...'
if: "!startsWith(github.ref, 'refs/tags') && matrix.os == 'ubuntu-20.04' && matrix.arch == '' || startsWith(github.ref, 'refs/tags') && (matrix.os != 'macos-latest' || matrix.arch != 'arm64')"
- name: Build
env:
RELEASE: ${{ inputs.RELEASE }}
CLIENT: ${{ inputs.CLIENT }}
CGO_ENABLED: ${{ inputs.CGO_ENABLED }}
run: python3 --version && python3 build.py
run: ${{ env.DOCKER_CMD }} python3 --version && ${{ env.DOCKER_CMD }} python3 build.py
- name: Smoke Testing (Windows / MacOS)
env:
RELEASE: ${{ inputs.RELEASE }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-${{ matrix.os }}
if: matrix.os != 'ubuntu-20.04'
if: startsWith(github.ref, 'refs/tags') && matrix.os != 'ubuntu-20.04' && matrix.arch == ''
- name: Smoke Testing (Linux)
- name: Smoke Testing (Linux amd64)
env:
RELEASE: ${{ inputs.RELEASE }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-ubuntu-latest
if: matrix.os == 'ubuntu-20.04'
run: ${{ env.DOCKER_CMD }} python3 smoke_testing/init.py ${PWD}/build/kubescape-ubuntu-latest
if: matrix.os == 'ubuntu-20.04' && matrix.arch == ''
- name: Smoke Testing (Linux ${{ matrix.arch }})
env:
RELEASE: ${{ inputs.RELEASE }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: ${{ env.DOCKER_CMD }} python3 smoke_testing/init.py ./build/kubescape-${{ matrix.arch }}-ubuntu-latest
if: startsWith(github.ref, 'refs/tags') && matrix.os == 'ubuntu-20.04' && matrix.arch != ''
- name: golangci-lint
if: matrix.os == 'ubuntu-20.04'
@@ -165,7 +256,7 @@ jobs:
name: Upload artifact (Linux)
if: matrix.os == 'ubuntu-20.04'
with:
name: kubescape-ubuntu-latest
name: kubescape${{ matrix.arch }}-ubuntu-latest
path: build/
if-no-files-found: error
@@ -173,7 +264,7 @@ jobs:
name: Upload artifact (MacOS, Win)
if: matrix.os != 'ubuntu-20.04'
with:
name: kubescape-${{ matrix.os }}
name: kubescape${{ matrix.arch }}-${{ matrix.os }}
path: build/
if-no-files-found: error

34
.github/workflows/build-image.yaml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: build-image
on:
workflow_dispatch:
inputs:
CLIENT:
required: false
type: string
default: "test"
IMAGE_TAG:
required: true
type: string
CO_SIGN:
type: boolean
required: false
default: false
PLATFORMS:
type: boolean
required: false
default: false
jobs:
publish-image:
permissions:
id-token: write
packages: write
contents: read
uses: ./.github/workflows/d-publish-image.yaml
with:
client: ${{ inputs.CLIENT }}
image_name: "quay.io/${{ github.repository_owner }}/kubescape"
image_tag: ${{ inputs.IMAGE_TAG }}
support_platforms: ${{ inputs.PLATFORMS }}
cosign: ${{ inputs.CO_SIGN }}
secrets: inherit

View File

@@ -19,6 +19,10 @@ jobs:
create-release:
name: create-release
runs-on: ubuntu-latest
env:
MAC_OS: macos-latest
UBUNTU_OS: ubuntu-latest
WINDOWS_OS: windows-latest
# permissions:
# contents: write
steps:
@@ -26,6 +30,11 @@ jobs:
id: download-artifact
with:
path: .
# TODO: kubescape-windows-latest is deprecated and should be removed
- name: Get kubescape.exe from kubescape-windows-latest
run: cp ./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }} ./kubescape-${{ env.WINDOWS_OS }}/kubescape.exe
- name: Set release token
run: |
if [ "${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}" != "" ]; then
@@ -35,10 +44,6 @@ jobs:
fi
- name: Release
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # ratchet:softprops/action-gh-release@v1
env:
MAC_OS: macos-latest
UBUNTU_OS: ubuntu-latest
WINDOWS_OS: windows-latest
with:
token: ${{ env.TOKEN }}
name: ${{ inputs.RELEASE_NAME }}
@@ -47,13 +52,21 @@ jobs:
draft: ${{ inputs.DRAFT }}
fail_on_unmatched_files: true
prerelease: false
# TODO: kubescape-windows-latest is deprecated and should be removed
files: |
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}
./kubescape-${{ env.MAC_OS }}/kubescape-${{ env.MAC_OS }}
./kubescape-${{ env.MAC_OS }}/kubescape-${{ env.MAC_OS }}.sha256
./kubescape-${{ env.MAC_OS }}/kubescape-${{ env.MAC_OS }}.tar.gz
./kubescape-${{ env.UBUNTU_OS }}/kubescape-${{ env.UBUNTU_OS }}
./kubescape-${{ env.UBUNTU_OS }}/kubescape-${{ env.UBUNTU_OS }}.sha256
./kubescape-${{ env.UBUNTU_OS }}/kubescape-${{ env.UBUNTU_OS }}.tar.gz
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}
./kubescape-${{ env.WINDOWS_OS }}/kubescape.exe
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}.sha256
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}.tar.gz
./kubescapearm64-${{ env.MAC_OS }}/kubescape-arm64-${{ env.MAC_OS }}
./kubescapearm64-${{ env.MAC_OS }}/kubescape-arm64-${{ env.MAC_OS }}.sha256
./kubescapearm64-${{ env.MAC_OS }}/kubescape-arm64-${{ env.MAC_OS }}.tar.gz
./kubescapearm64-${{ env.UBUNTU_OS }}/kubescape-arm64-${{ env.UBUNTU_OS }}
./kubescapearm64-${{ env.UBUNTU_OS }}/kubescape-arm64-${{ env.UBUNTU_OS }}.sha256
./kubescapearm64-${{ env.UBUNTU_OS }}/kubescape-arm64-${{ env.UBUNTU_OS }}.tar.gz

View File

@@ -0,0 +1,30 @@
# This workflow was added by CodeSee. Learn more at https://codesee.io/
# This is v2.0 of this workflow file
on:
pull_request_target:
types: [opened, synchronize, reopened]
paths-ignore:
- '**.yaml'
- '**.yml'
- '**.md'
- '**.sh'
- 'website/*'
- 'examples/*'
- 'docs/*'
- 'build/*'
- '.github/*'
name: CodeSee
permissions: read-all
jobs:
codesee:
runs-on: ubuntu-latest
continue-on-error: true
name: Analyze the repo with CodeSee
steps:
- uses: Codesee-io/codesee-action@v2
with:
codesee-token: ${{ secrets.CODESEE_ARCH_DIAG_API_TOKEN }}
codesee-url: https://app.codesee.io

23
.github/workflows/comments.yaml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: pr-agent
on:
issue_comment:
permissions:
issues: write
pull-requests: write
jobs:
pr_agent:
runs-on: ubuntu-latest
name: Run pr agent on every pull request, respond to user comments
steps:
- name: PR Agent action step
continue-on-error: true
id: pragent
uses: Codium-ai/pr-agent@main
env:
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -16,12 +16,24 @@ spec:
arch: amd64
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-macos-latest.tar.gz" .TagName }}
bin: kubescape
- selector:
matchLabels:
os: darwin
arch: arm64
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-arm64-macos-latest.tar.gz" .TagName }}
bin: kubescape
- selector:
matchLabels:
os: linux
arch: amd64
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-ubuntu-latest.tar.gz" .TagName }}
bin: kubescape
- selector:
matchLabels:
os: linux
arch: arm64
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-arm64-ubuntu-latest.tar.gz" .TagName }}
bin: kubescape
- selector:
matchLabels:
os: windows

View File

@@ -15,6 +15,10 @@ so the maintainers are able to help guide you and let you know if you are going
Please follow our [code of conduct](CODE_OF_CONDUCT.md) in all of your interactions within the project.
## Build and test locally
Please follow the [instructions here](https://github.com/kubescape/kubescape/wiki/Building).
## Pull Request Process
1. Ensure any install or build dependencies are removed before the end of the layer when doing a

View File

@@ -10,6 +10,14 @@ libgit2:
-git submodule update --init --recursive
cd git2go; make install-static
# build and install libgit2 for macOS m1
libgit2arm64:
git submodule update --init --recursive
if [ "$(shell uname -s)" = "Darwin" ]; then \
sed -i '' 's/cmake -D/cmake -DCMAKE_OSX_ARCHITECTURES="arm64" -D/' git2go/script/build-libgit2.sh; \
fi
cd git2go; make install-static
# go build tags
TAGS = "gitenabled,static"

View File

@@ -37,11 +37,11 @@ curl -s https://raw.githubusercontent.com/kubescape/kubescape/master/install.sh
Learn more about:
* [Installing Kubescape](docs/getting-started.md#install-kubescape)
* [Installing Kubescape](docs/installation.md)
* [Running your first scan](docs/getting-started.md#run-your-first-scan)
* [Usage](docs/getting-started.md#examples)
* [Architecture](docs/architecture.md)
* [Building Kubescape from source](docs/building.md)
* [Building Kubescape from source](https://github.com/kubescape/kubescape/wiki/Building)
_Did you know you can use Kubescape in all these places?_

View File

@@ -1,51 +0,0 @@
@ECHO OFF
IF "%1"=="install" goto Install
IF "%1"=="build" goto Build
IF "%1"=="all" goto All
IF "%1"=="" goto Error ELSE goto Error
:Install
if exist C:\MSYS64\ (
echo "MSYS2 already installed"
) else (
mkdir temp_install & cd temp_install
echo "Downloading MSYS2..."
curl -L https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-x86_64-20220603.exe > msys2-x86_64-20220603.exe
echo "Installing MSYS2..."
msys2-x86_64-20220603.exe install --root C:\MSYS64 --confirm-command
cd .. && rmdir /s /q temp_install
)
echo "Adding MSYS2 to path..."
SET "PATH=C:\MSYS64\mingw64\bin;C:\MSYS64\usr\bin;%PATH%"
echo %PATH%
echo "Installing MSYS2 packages..."
pacman -S --needed --noconfirm make
pacman -S --needed --noconfirm mingw-w64-x86_64-cmake
pacman -S --needed --noconfirm mingw-w64-x86_64-gcc
pacman -S --needed --noconfirm mingw-w64-x86_64-pkg-config
pacman -S --needed --noconfirm msys2-w32api-runtime
IF "%1"=="all" GOTO Build
GOTO End
:Build
SET "PATH=C:\MSYS2\mingw64\bin;C:\MSYS2\usr\bin;%PATH%"
make libgit2
GOTO End
:All
GOTO Install
:Error
echo "Error: Unknown option"
GOTO End
:End

78
build.ps1 Normal file
View File

@@ -0,0 +1,78 @@
# Defining input params
param (
[string]$mode = "error"
)
# Function to install MSYS
function Install {
Write-Host "Starting install..." -ForegroundColor Cyan
# Check to see if already installed
if (Test-Path "C:\MSYS64\") {
Write-Host "MSYS2 already installed" -ForegroundColor Green
} else {
# Create a temp directory
New-Item -Path "$PSScriptRoot\temp_install" -ItemType Directory > $null
# Download MSYS
Write-Host "Downloading MSYS2..." -ForegroundColor Cyan
$bitsJobObj = Start-BitsTransfer "https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-x86_64-20220603.exe" -Destination "$PSScriptRoot\temp_install\msys2-x86_64-20220603.exe"
switch ($bitsJobObj.JobState) {
"Transferred" {
Complete-BitsTransfer -BitsJob $bitsJobObj
break
}
"Error" {
throw "Error downloading"
}
}
Write-Host "MSYS2 download complete" -ForegroundColor Green
# Install MSYS
Write-Host "Installing MSYS2..." -ForegroundColor Cyan
Start-Process -Filepath "$PSScriptRoot\temp_install\msys2-x86_64-20220603.exe" -ArgumentList @("install", "--root", "C:\MSYS64", "--confirm-command") -Wait
Write-Host "MSYS2 install complete" -ForegroundColor Green
# Remove temp directory
Remove-Item "$PSScriptRoot\temp_install" -Recurse
}
# Set PATH
$env:Path = "C:\MSYS64\mingw64\bin;C:\MSYS64\usr\bin;" + $env:Path
# Install MSYS packages
Write-Host "Installing MSYS2 packages..." -ForegroundColor Cyan
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "make") -Wait
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "mingw-w64-x86_64-cmake") -Wait
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "mingw-w64-x86_64-gcc") -Wait
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "mingw-w64-x86_64-pkg-config") -Wait
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "msys2-w32api-runtime") -Wait
Write-Host "MSYS2 packages install complete" -ForegroundColor Green
Write-Host "Install complete" -ForegroundColor Green
}
# Function to build libgit2
function Build {
Write-Host "Starting build..." -ForegroundColor Cyan
# Set PATH
$env:Path = "C:\MSYS64\mingw64\bin;C:\MSYS64\usr\bin;" + $env:Path
# Build
Start-Process -Filepath "make" -ArgumentList @("libgit2") -Wait -NoNewWindow
Write-Host "Build complete" -ForegroundColor Green
}
# Check user call mode
if ($mode -eq "all") {
Install
Build
} elseif ($mode -eq "install") {
Install
} elseif ($mode -eq "build") {
Build
} else {
Write-Host "Error: -mode should be one of (all|install|build)" -ForegroundColor Red
}

View File

@@ -27,7 +27,13 @@ def get_build_dir():
def get_package_name():
if CURRENT_PLATFORM not in platformSuffixes: raise OSError("Platform %s is not supported!" % (CURRENT_PLATFORM))
return "kubescape-" + platformSuffixes[CURRENT_PLATFORM]
# # TODO: kubescape-windows-latest is deprecated and should be removed
# if CURRENT_PLATFORM == "Windows": return "kubescape.exe"
package_name = "kubescape-"
if os.getenv("GOARCH"):
package_name += os.getenv("GOARCH") + "-"
return package_name + platformSuffixes[CURRENT_PLATFORM]
def main():

View File

@@ -1,4 +1,4 @@
FROM golang:1.19-alpine as builder
FROM golang:1.20-alpine as builder
ARG image_version
ARG client

View File

@@ -12,6 +12,7 @@ import (
"github.com/kubescape/kubescape/v2/core/meta"
v1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
"github.com/spf13/cobra"
"golang.org/x/exp/slices"
)
var (
@@ -55,7 +56,7 @@ func GetDownloadCmd(ks meta.IKubescape) *cobra.Command {
if len(args) < 1 {
return fmt.Errorf("policy type required, supported: %v", supported)
}
if cautils.StringInSlice(core.DownloadSupportCommands(), args[0]) == cautils.ValueNotFound {
if !slices.Contains(core.DownloadSupportCommands(), args[0]) {
return fmt.Errorf("invalid parameter '%s'. Supported parameters: %s", args[0], supported)
}
return nil

View File

@@ -17,7 +17,7 @@ var fixCmdExamples = fmt.Sprintf(`
Use with caution, this command will change your files in-place.
# Fix kubernetes YAML manifest files based on a scan command output (output.json)
1) %[1]s scan --format json --format-version v2 --output output.json
1) %[1]s scan . --format json --output output.json
2) %[1]s fix output.json
`, cautils.ExecName())

View File

@@ -11,6 +11,7 @@ import (
"github.com/kubescape/kubescape/v2/core/meta"
v1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
"github.com/spf13/cobra"
"golang.org/x/exp/slices"
)
var (
@@ -43,7 +44,7 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
if len(args) < 1 {
return fmt.Errorf("policy type requeued, supported: %s", supported)
}
if cautils.StringInSlice(core.ListSupportActions(), args[0]) == cautils.ValueNotFound {
if !slices.Contains(core.ListSupportActions(), args[0]) {
return fmt.Errorf("invalid parameter '%s'. Supported parameters: %s", args[0], supported)
}
return nil
@@ -63,8 +64,6 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
},
}
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-print'/'json'")
listCmd.PersistentFlags().MarkDeprecated("id", "Control ID's are included in list outputs")

View File

@@ -110,6 +110,9 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
}
if results.GetComplianceScore() < float32(scanInfo.ComplianceThreshold) {
logger.L().Fatal("scan compliance-score is below permitted threshold", helpers.String("compliance score", fmt.Sprintf("%.2f", results.GetComplianceScore())), helpers.String("compliance-threshold", fmt.Sprintf("%.2f", scanInfo.ComplianceThreshold)))
}
enforceSeverityThresholds(results.GetResults().SummaryDetails.GetResourcesSeverityCounters(), scanInfo, terminateOnExceedingSeverity)
return nil

View File

@@ -11,6 +11,7 @@ import (
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
reporthandlingapis "github.com/kubescape/opa-utils/reporthandling/apis"
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
"golang.org/x/exp/slices"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
@@ -78,14 +79,15 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
var frameworks []string
if len(args) == 0 { // scan all frameworks
if len(args) == 0 {
scanInfo.ScanAll = true
} else {
// Read frameworks from input args
frameworks = strings.Split(args[0], ",")
if cautils.StringInSlice(frameworks, "all") != cautils.ValueNotFound {
if slices.Contains(frameworks, "all") {
scanInfo.ScanAll = true
frameworks = getter.NativeFrameworks
}
if len(args) > 1 {
if len(args[1:]) == 0 || args[1] != "-" {
@@ -105,6 +107,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
}
}
}
scanInfo.SetScanType(cautils.ScanTypeFramework)
scanInfo.FrameworkScan = true
scanInfo.SetPolicyIdentifiers(frameworks, apisv1.KindFramework)
@@ -118,12 +121,16 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
if err = results.HandleResults(ctx); err != nil {
logger.L().Fatal(err.Error())
}
if !scanInfo.VerboseMode {
if !scanInfo.VerboseMode && scanInfo.ScanType == cautils.ScanTypeFramework {
logger.L().Info("Run with '--verbose'/'-v' flag for detailed resources view\n")
}
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
}
if results.GetComplianceScore() < float32(scanInfo.ComplianceThreshold) {
logger.L().Fatal("scan compliance-score is below permitted threshold", helpers.String("compliance-score", fmt.Sprintf("%.2f", results.GetComplianceScore())), helpers.String("compliance-threshold", fmt.Sprintf("%.2f", scanInfo.ComplianceThreshold)))
}
enforceSeverityThresholds(results.GetData().Report.SummaryDetails.GetResourcesSeverityCounters(), scanInfo, terminateOnExceedingSeverity)
return nil
@@ -204,6 +211,9 @@ func validateFrameworkScanInfo(scanInfo *cautils.ScanInfo) error {
if scanInfo.Submit && scanInfo.Local {
return fmt.Errorf("you can use `keep-local` or `submit`, but not both")
}
if 100 < scanInfo.ComplianceThreshold || 0 > scanInfo.ComplianceThreshold {
return fmt.Errorf("bad argument: out of range threshold")
}
if 100 < scanInfo.FailThreshold || 0 > scanInfo.FailThreshold {
return fmt.Errorf("bad argument: out of range threshold")
}

117
cmd/scan/image.go Normal file
View File

@@ -0,0 +1,117 @@
package scan
import (
"context"
"fmt"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/iconlogger"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/core"
"github.com/kubescape/kubescape/v2/core/meta"
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling"
"github.com/kubescape/kubescape/v2/pkg/imagescan"
"github.com/spf13/cobra"
)
type imageScanInfo struct {
Username string
Password string
}
// TODO(vladklokun): document image scanning on the Kubescape Docs Hub?
var (
imageExample = fmt.Sprintf(`
This command is still in BETA. Feel free to contact the kubescape maintainers for more information.
Scan an image for vulnerabilities.
# Scan the 'nginx' image
%[1]s scan image "nginx"
# Image scan documentation:
# https://hub.armosec.io/docs/images
`, cautils.ExecName())
)
// imageCmd represents the image command
func getImageCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo, imgScanInfo *imageScanInfo) *cobra.Command {
cmd := &cobra.Command{
Use: "image <IMAGE_NAME>",
Short: "Scan an image for vulnerabilities",
Example: imageExample,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return fmt.Errorf("the command takes exactly one image name as an argument")
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
if err := validateImageScanInfo(scanInfo); err != nil {
return err
}
failOnSeverity := imagescan.ParseSeverity(scanInfo.FailThresholdSeverity)
ctx := context.Background()
logger.InitLogger(iconlogger.LoggerName)
dbCfg, _ := imagescan.NewDefaultDBConfig()
svc := imagescan.NewScanService(dbCfg)
creds := imagescan.RegistryCredentials{
Username: imgScanInfo.Username,
Password: imgScanInfo.Password,
}
userInput := args[0]
logger.L().Start(fmt.Sprintf("Scanning image: %s", userInput))
scanResults, err := svc.Scan(ctx, userInput, creds)
if err != nil {
logger.L().StopError(fmt.Sprintf("Failed to scan image: %s", userInput))
return err
}
logger.L().StopSuccess(fmt.Sprintf("Successfully scanned image: %s", userInput))
scanInfo.SetScanType(cautils.ScanTypeImage)
outputPrinters := core.GetOutputPrinters(scanInfo, ctx)
uiPrinter := core.GetUIPrinter(ctx, scanInfo)
resultsHandler := resultshandling.NewResultsHandler(nil, outputPrinters, uiPrinter)
resultsHandler.ImageScanData = []cautils.ImageScanData{
{
PresenterConfig: scanResults,
Image: userInput,
},
}
resultsHandler.HandleResults(ctx)
if imagescan.ExceedsSeverityThreshold(scanResults, failOnSeverity) {
terminateOnExceedingSeverity(scanInfo, logger.L())
}
return err
},
}
cmd.PersistentFlags().StringVarP(&imgScanInfo.Username, "username", "u", "", "Username for registry login")
cmd.PersistentFlags().StringVarP(&imgScanInfo.Password, "password", "p", "", "Password for registry login")
return cmd
}
// validateImageScanInfo validates the ScanInfo struct for the `image` command
func validateImageScanInfo(scanInfo *cautils.ScanInfo) error {
severity := scanInfo.FailThresholdSeverity
if err := validateSeverity(severity); severity != "" && err != nil {
return err
}
return nil
}

View File

@@ -1,6 +1,7 @@
package scan
import (
"context"
"flag"
"fmt"
"strings"
@@ -9,6 +10,7 @@ import (
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/cautils/getter"
"github.com/kubescape/kubescape/v2/core/meta"
v1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
"github.com/spf13/cobra"
)
@@ -16,7 +18,7 @@ var scanCmdExamples = fmt.Sprintf(`
Scan command is for scanning an existing cluster or kubernetes manifest files based on pre-defined frameworks
# Scan current cluster with all frameworks
%[1]s scan --enable-host-scan --verbose
%[1]s scan
# Scan kubernetes YAML manifest files
%[1]s scan .
@@ -41,7 +43,8 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
Long: `The action you want to perform`,
Example: scanCmdExamples,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
// setting input patterns for framework scan is only relevancy for non-security view
if len(args) > 0 && scanInfo.View != string(cautils.SecurityViewType) {
if args[0] != "framework" && args[0] != "control" {
return getFrameworkCmd(ks, &scanInfo).RunE(cmd, append([]string{strings.Join(getter.NativeFrameworks, ",")}, args...))
}
@@ -49,6 +52,11 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
if scanInfo.View == string(cautils.SecurityViewType) {
setSecurityViewScanInfo(args, &scanInfo)
return securityScan(scanInfo, ks)
}
if len(args) == 0 {
return getFrameworkCmd(ks, &scanInfo).RunE(cmd, []string{strings.Join(getter.NativeFrameworks, ",")})
@@ -66,15 +74,14 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
scanCmd.PersistentFlags().BoolVar(&scanInfo.CreateAccount, "create-account", false, "Create a Kubescape SaaS account ID account ID is not found in cache. After creating the account, the account ID will be saved in cache. In addition, the scanning results will be uploaded to the Kubescape SaaS")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
scanCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "kube-context", "", "", "Kube context. Default will use the current-context")
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
scanCmd.PersistentFlags().StringVar(&scanInfo.UseArtifactsFrom, "use-artifacts-from", "", "Load artifacts from local directory. If not used will download them")
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Notice, when running with `exclude-namespace` kubescape does not scan cluster-scoped objects.")
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. e.g: --exclude-namespaces ns-a,ns-b. Notice, when running with `exclude-namespace` kubescape does not scan cluster-scoped objects.")
scanCmd.PersistentFlags().Float32VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
scanCmd.PersistentFlags().Float32VarP(&scanInfo.ComplianceThreshold, "compliance-threshold", "", 0, "Compliance threshold is the percent below which the command fails and returns exit code 1")
scanCmd.PersistentFlags().StringVar(&scanInfo.FailThresholdSeverity, "severity-threshold", "", "Severity threshold is the severity of failed controls at which the command fails and returns exit code 1")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "", `Output file format. Supported formats: "pretty-printer", "json", "junit", "prometheus", "pdf", "html", "sarif"`)
@@ -82,7 +89,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to configured backend.")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.VerboseMode, "verbose", "v", false, "Display all of the input resources and not only failed resources")
scanCmd.PersistentFlags().StringVar(&scanInfo.View, "view", string(cautils.ResourceViewType), fmt.Sprintf("View results based on the %s/%s. default is --view=%s", cautils.ResourceViewType, cautils.ControlViewType, cautils.ResourceViewType))
scanCmd.PersistentFlags().StringVar(&scanInfo.View, "view", string(cautils.ResourceViewType), fmt.Sprintf("View results based on the %s/%s/%s. default is --view=%s", cautils.ResourceViewType, cautils.ControlViewType, cautils.SecurityViewType, cautils.ResourceViewType))
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
scanCmd.PersistentFlags().StringVar(&scanInfo.HostSensorYamlPath, "host-scan-yaml", "", "Override default host scanner DaemonSet. Use this flag cautiously")
@@ -91,11 +98,17 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Submit the scan results to Kubescape SaaS where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.OmitRawResources, "omit-raw-resources", "", false, "Omit raw resources from the output. By default the raw resources are included in the output")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.PrintAttackTree, "print-attack-tree", "", false, "Print attack tree")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.ScanImages, "scan-images", "", false, "Scan resources images")
scanCmd.PersistentFlags().MarkDeprecated("silent", "use '--logger' flag instead. Flag will be removed at 1.May.2022")
scanCmd.PersistentFlags().MarkDeprecated("fail-threshold", "use '--compliance-threshold' flag instead. Flag will be removed at 1.Dec.2023")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
scanCmd.PersistentFlags().MarkDeprecated("client-id", "login to Kubescape SaaS will be unsupported, please contact the Kubescape maintainers for more information")
scanCmd.PersistentFlags().MarkDeprecated("secret-key", "login to Kubescape SaaS will be unsupported, please contact the Kubescape maintainers for more information")
// hidden flags
scanCmd.PersistentFlags().MarkHidden("host-scan-yaml") // this flag should be used very cautiously. We prefer users will not use it at all unless the DaemonSet can not run pods on the nodes
scanCmd.PersistentFlags().MarkHidden("omit-raw-resources")
scanCmd.PersistentFlags().MarkHidden("print-attack-tree")
@@ -105,9 +118,46 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy Kubescape host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://github.com/kubescape/kubescape/blob/master/core/pkg/hostsensorutils/hostsensor.yaml")
hostF.NoOptDefVal = "true"
hostF.DefValue = "false, for no TTY in stdin"
scanCmd.PersistentFlags().MarkHidden("enable-host-scan")
scanCmd.PersistentFlags().MarkDeprecated("enable-host-scan", "To activate the host scanner capability, proceed with the installation of the kubescape operator chart found here: https://github.com/kubescape/helm-charts/tree/main/charts/kubescape-cloud-operator. The flag will be removed at 1.Dec.2023")
scanCmd.PersistentFlags().MarkHidden("host-scan-yaml") // this flag should be used very cautiously. We prefer users will not use it at all unless the DaemonSet can not run pods on the nodes
scanCmd.PersistentFlags().MarkDeprecated("host-scan-yaml", "To activate the host scanner capability, proceed with the installation of the kubescape operator chart found here: https://github.com/kubescape/helm-charts/tree/main/charts/kubescape-cloud-operator. The flag will be removed at 1.Dec.2023")
scanCmd.AddCommand(getControlCmd(ks, &scanInfo))
scanCmd.AddCommand(getFrameworkCmd(ks, &scanInfo))
scanCmd.AddCommand(getWorkloadCmd(ks, &scanInfo))
isi := &imageScanInfo{}
scanCmd.AddCommand(getImageCmd(ks, &scanInfo, isi))
return scanCmd
}
func setSecurityViewScanInfo(args []string, scanInfo *cautils.ScanInfo) {
if len(args) > 0 {
scanInfo.SetScanType(cautils.ScanTypeRepo)
scanInfo.InputPatterns = args
} else {
scanInfo.SetScanType(cautils.ScanTypeCluster)
}
scanInfo.SetPolicyIdentifiers([]string{"clusterscan", "mitre", "nsa"}, v1.KindFramework)
}
func securityScan(scanInfo cautils.ScanInfo, ks meta.IKubescape) error {
ctx := context.TODO()
results, err := ks.Scan(ctx, &scanInfo)
if err != nil {
return err
}
if err = results.HandleResults(ctx); err != nil {
return err
}
enforceSeverityThresholds(results.GetData().Report.SummaryDetails.GetResourcesSeverityCounters(), &scanInfo, terminateOnExceedingSeverity)
return nil
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/kubescape/v2/core/cautils"
v1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
"github.com/kubescape/opa-utils/reporthandling/apis"
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
@@ -184,17 +185,20 @@ type spyLogger struct {
setItems []spyLogMessage
}
func (l *spyLogger) Error(msg string, details ...helpers.IDetails) {}
func (l *spyLogger) Success(msg string, details ...helpers.IDetails) {}
func (l *spyLogger) Warning(msg string, details ...helpers.IDetails) {}
func (l *spyLogger) Info(msg string, details ...helpers.IDetails) {}
func (l *spyLogger) Debug(msg string, details ...helpers.IDetails) {}
func (l *spyLogger) SetLevel(level string) error { return nil }
func (l *spyLogger) GetLevel() string { return "" }
func (l *spyLogger) SetWriter(w *os.File) {}
func (l *spyLogger) GetWriter() *os.File { return &os.File{} }
func (l *spyLogger) LoggerName() string { return "" }
func (l *spyLogger) Ctx(_ context.Context) helpers.ILogger { return l }
func (l *spyLogger) Error(msg string, details ...helpers.IDetails) {}
func (l *spyLogger) Success(msg string, details ...helpers.IDetails) {}
func (l *spyLogger) Warning(msg string, details ...helpers.IDetails) {}
func (l *spyLogger) Info(msg string, details ...helpers.IDetails) {}
func (l *spyLogger) Debug(msg string, details ...helpers.IDetails) {}
func (l *spyLogger) SetLevel(level string) error { return nil }
func (l *spyLogger) GetLevel() string { return "" }
func (l *spyLogger) SetWriter(w *os.File) {}
func (l *spyLogger) GetWriter() *os.File { return &os.File{} }
func (l *spyLogger) LoggerName() string { return "" }
func (l *spyLogger) Ctx(_ context.Context) helpers.ILogger { return l }
func (l *spyLogger) Start(msg string, details ...helpers.IDetails) {}
func (l *spyLogger) StopSuccess(msg string, details ...helpers.IDetails) {}
func (l *spyLogger) StopError(msg string, details ...helpers.IDetails) {}
func (l *spyLogger) Fatal(msg string, details ...helpers.IDetails) {
firstDetail := details[0]
@@ -254,3 +258,106 @@ func Test_terminateOnExceedingSeverity(t *testing.T) {
)
}
}
func TestSetSecurityViewScanInfo(t *testing.T) {
tests := []struct {
name string
args []string
want *cautils.ScanInfo
}{
{
name: "no args",
args: []string{},
want: &cautils.ScanInfo{
InputPatterns: []string{},
ScanType: cautils.ScanTypeCluster,
PolicyIdentifier: []cautils.PolicyIdentifier{
{
Kind: v1.KindFramework,
Identifier: "clusterscan",
},
{
Kind: v1.KindFramework,
Identifier: "mitre",
},
{
Kind: v1.KindFramework,
Identifier: "nsa",
},
},
},
},
{
name: "with args",
args: []string{
"file.yaml",
"file2.yaml",
},
want: &cautils.ScanInfo{
ScanType: cautils.ScanTypeRepo,
InputPatterns: []string{
"file.yaml",
"file2.yaml",
},
PolicyIdentifier: []cautils.PolicyIdentifier{
{
Kind: v1.KindFramework,
Identifier: "clusterscan",
},
{
Kind: v1.KindFramework,
Identifier: "mitre",
},
{
Kind: v1.KindFramework,
Identifier: "nsa",
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := &cautils.ScanInfo{
View: string(cautils.SecurityViewType),
}
setSecurityViewScanInfo(tt.args, got)
if len(tt.want.InputPatterns) != len(got.InputPatterns) {
t.Errorf("in test: %s, got: %v, want: %v", tt.name, got.InputPatterns, tt.want.InputPatterns)
}
if tt.want.ScanType != got.ScanType {
t.Errorf("in test: %s, got: %v, want: %v", tt.name, got.ScanType, tt.want.ScanType)
}
for i := range tt.want.InputPatterns {
found := false
for j := range tt.want.InputPatterns[i] {
if tt.want.InputPatterns[i][j] == got.InputPatterns[i][j] {
found = true
break
}
}
if !found {
t.Errorf("in test: %s, got: %v, want: %v", tt.name, got.InputPatterns, tt.want.InputPatterns)
}
}
for i := range tt.want.PolicyIdentifier {
found := false
for j := range got.PolicyIdentifier {
if tt.want.PolicyIdentifier[i].Kind == got.PolicyIdentifier[j].Kind && tt.want.PolicyIdentifier[i].Identifier == got.PolicyIdentifier[j].Identifier {
found = true
break
}
}
if !found {
t.Errorf("in test: %s, got: %v, want: %v", tt.name, got.PolicyIdentifier, tt.want.PolicyIdentifier)
}
}
})
}
}

View File

@@ -114,3 +114,27 @@ func Test_validateSeverity(t *testing.T) {
})
}
}
func Test_validateWorkloadIdentifier(t *testing.T) {
testCases := []struct {
Description string
Input string
Want error
}{
{"valid workload identifier should be valid", "deployment/test", nil},
{"invalid workload identifier missing kind", "deployment", ErrInvalidWorkloadIdentifier},
{"invalid workload identifier with namespace", "ns/deployment/name", ErrInvalidWorkloadIdentifier},
}
for _, testCase := range testCases {
t.Run(testCase.Description, func(t *testing.T) {
input := testCase.Input
want := testCase.Want
got := validateWorkloadIdentifier(input)
if got != want {
t.Errorf("got: %v, want: %v", got, want)
}
})
}
}

126
cmd/scan/workload.go Normal file
View File

@@ -0,0 +1,126 @@
package scan
import (
"context"
"errors"
"fmt"
"strings"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/meta"
v1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
"github.com/kubescape/opa-utils/objectsenvelopes"
"github.com/spf13/cobra"
)
var (
workloadExample = fmt.Sprintf(`
This command is still in BETA. Feel free to contact the kubescape maintainers for more information.
Scan a workload for misconfigurations and image vulnerabilities.
# Scan an workload
%[1]s scan workload <kind>/<name>
# Scan an workload in a specific namespace
%[1]s scan workload <kind>/<name> --namespace <namespace>
# Scan an workload from a file path
%[1]s scan workload <kind>/<name> --file-path <file path>
# Scan an workload from a helm-chart template
%[1]s scan workload <kind>/<name> --chart-path <chart path> --file-path <file path>
`, cautils.ExecName())
ErrInvalidWorkloadIdentifier = errors.New("invalid workload identifier")
)
var namespace string
// controlCmd represents the control command
func getWorkloadCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Command {
workloadCmd := &cobra.Command{
Use: "workload <kind>/<name> [`<glob pattern>`/`-`] [flags]",
Short: "Scan a workload for misconfigurations and image vulnerabilities",
Example: workloadExample,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return fmt.Errorf("usage: <kind>/<name> [`<glob pattern>`/`-`] [flags]")
}
if scanInfo.ChartPath != "" && scanInfo.FilePath == "" {
return fmt.Errorf("usage: --chart-path <chart path> --file-path <file path>")
}
return validateWorkloadIdentifier(args[0])
},
RunE: func(cmd *cobra.Command, args []string) error {
kind, name, err := parseWorkloadIdentifierString(args[0])
if err != nil {
return fmt.Errorf("invalid input: %s", err.Error())
}
setWorkloadScanInfo(scanInfo, kind, name)
// todo: add api version if provided
ctx := context.TODO()
results, err := ks.Scan(ctx, scanInfo)
if err != nil {
logger.L().Fatal(err.Error())
}
if err = results.HandleResults(ctx); err != nil {
logger.L().Fatal(err.Error())
}
return nil
},
}
workloadCmd.PersistentFlags().StringVarP(&namespace, "namespace", "n", "", "Namespace of the workload. Default will be empty.")
workloadCmd.PersistentFlags().StringVar(&scanInfo.FilePath, "file-path", "", "Path to the workload file.")
workloadCmd.PersistentFlags().StringVar(&scanInfo.ChartPath, "chart-path", "", "Path to the helm chart the workload is part of. Must be used with --file-path.")
return workloadCmd
}
func setWorkloadScanInfo(scanInfo *cautils.ScanInfo, kind string, name string) {
scanInfo.SetScanType(cautils.ScanTypeWorkload)
scanInfo.ScanImages = true
scanInfo.ScanObject = &objectsenvelopes.ScanObject{}
scanInfo.ScanObject.SetNamespace(namespace)
scanInfo.ScanObject.SetKind(kind)
scanInfo.ScanObject.SetName(name)
scanInfo.SetPolicyIdentifiers([]string{"workloadscan"}, v1.KindFramework)
if scanInfo.FilePath != "" {
scanInfo.InputPatterns = []string{scanInfo.FilePath}
}
}
func validateWorkloadIdentifier(workloadIdentifier string) error {
// workloadIdentifier is in the form of kind/name
x := strings.Split(workloadIdentifier, "/")
if len(x) != 2 || x[0] == "" || x[1] == "" {
return ErrInvalidWorkloadIdentifier
}
return nil
}
func parseWorkloadIdentifierString(workloadIdentifier string) (kind, name string, err error) {
// workloadIdentifier is in the form of namespace/kind/name
// example: default/Deployment/nginx-deployment
x := strings.Split(workloadIdentifier, "/")
if len(x) != 2 {
return "", "", ErrInvalidWorkloadIdentifier
}
return x[0], x[1], nil
}

69
cmd/scan/workload_test.go Normal file
View File

@@ -0,0 +1,69 @@
package scan
import (
"testing"
"github.com/kubescape/kubescape/v2/core/cautils"
v1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
"github.com/kubescape/opa-utils/objectsenvelopes"
)
func TestSetWorkloadScanInfo(t *testing.T) {
test := []struct {
Description string
kind string
name string
want *cautils.ScanInfo
}{
{
Description: "Set workload scan info",
kind: "Deployment",
name: "test",
want: &cautils.ScanInfo{
PolicyIdentifier: []cautils.PolicyIdentifier{
{
Identifier: "workloadscan",
Kind: v1.KindFramework,
},
},
ScanType: cautils.ScanTypeWorkload,
ScanObject: &objectsenvelopes.ScanObject{
Kind: "Deployment",
Metadata: objectsenvelopes.ScanObjectMetadata{
Name: "test",
},
},
},
},
}
for _, tc := range test {
t.Run(
tc.Description,
func(t *testing.T) {
scanInfo := &cautils.ScanInfo{}
setWorkloadScanInfo(scanInfo, tc.kind, tc.name)
if scanInfo.ScanType != tc.want.ScanType {
t.Errorf("got: %v, want: %v", scanInfo.ScanType, tc.want.ScanType)
}
if scanInfo.ScanObject.Kind != tc.want.ScanObject.Kind {
t.Errorf("got: %v, want: %v", scanInfo.ScanObject.Kind, tc.want.ScanObject.Kind)
}
if scanInfo.ScanObject.Metadata.Name != tc.want.ScanObject.Metadata.Name {
t.Errorf("got: %v, want: %v", scanInfo.ScanObject.Metadata.Name, tc.want.ScanObject.Metadata.Name)
}
if len(scanInfo.PolicyIdentifier) != 1 {
t.Errorf("got: %v, want: %v", len(scanInfo.PolicyIdentifier), 1)
}
if scanInfo.PolicyIdentifier[0].Identifier != tc.want.PolicyIdentifier[0].Identifier {
t.Errorf("got: %v, want: %v", scanInfo.PolicyIdentifier[0].Identifier, tc.want.PolicyIdentifier[0].Identifier)
}
},
)
}
}

View File

@@ -6,14 +6,16 @@ package update
import (
"fmt"
"os/exec"
"runtime"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/spf13/cobra"
)
const (
installationLink string = "https://github.com/kubescape/kubescape/blob/master/docs/installation.md"
)
var updateCmdExamples = fmt.Sprintf(`
# Update to the latest kubescape release
%[1]s update
@@ -31,33 +33,7 @@ func GetUpdateCmd() *cobra.Command {
//your version == latest version
logger.L().Info(("You are in the latest version"))
} else {
const OSTYPE string = runtime.GOOS
var ShellToUse string
switch OSTYPE {
case "windows":
cautils.StartSpinner()
//run the installation command for windows
ShellToUse = "powershell"
_, err := exec.Command(ShellToUse, "-c", "iwr -useb https://raw.githubusercontent.com/kubescape/kubescape/master/install.ps1 | iex").Output()
if err != nil {
logger.L().Fatal(err.Error())
}
cautils.StopSpinner()
default:
ShellToUse = "bash"
cautils.StartSpinner()
//run the installation command for linux and macOS
_, err := exec.Command(ShellToUse, "-c", "curl -s https://raw.githubusercontent.com/kubescape/kubescape/master/install.sh | /bin/bash").Output()
if err != nil {
logger.L().Fatal(err.Error())
}
cautils.StopSpinner()
}
fmt.Printf("please refer to our installation docs in the following link: %s", installationLink)
}
return nil
},

View File

@@ -17,7 +17,11 @@ import (
corev1 "k8s.io/api/core/v1"
)
const configFileName = "config"
const (
configFileName string = "config"
kubescapeNamespace string = "kubescape"
kubescapeConfigMapName string = "kubescape-config"
)
func ConfigFileFullPath() string { return getter.GetDefaultPath(configFileName + ".json") }
@@ -29,7 +33,6 @@ type ConfigObj struct {
AccountID string `json:"accountID,omitempty"`
ClientID string `json:"clientID,omitempty"`
SecretKey string `json:"secretKey,omitempty"`
CustomerGUID string `json:"customerGUID,omitempty"` // Deprecated
Token string `json:"invitationParam,omitempty"`
CustomerAdminEMail string `json:"adminMail,omitempty"`
ClusterName string `json:"clusterName,omitempty"`
@@ -63,6 +66,35 @@ func (co *ConfigObj) Config() []byte {
return []byte{}
}
func (co *ConfigObj) updateEmptyFields(inCO *ConfigObj) error {
if inCO.AccountID != "" {
co.AccountID = inCO.AccountID
}
if inCO.CloudAPIURL != "" {
co.CloudAPIURL = inCO.CloudAPIURL
}
if inCO.CloudAuthURL != "" {
co.CloudAuthURL = inCO.CloudAuthURL
}
if inCO.CloudReportURL != "" {
co.CloudReportURL = inCO.CloudReportURL
}
if inCO.CloudUIURL != "" {
co.CloudUIURL = inCO.CloudUIURL
}
if inCO.ClusterName != "" {
co.ClusterName = inCO.ClusterName
}
if inCO.CustomerAdminEMail != "" {
co.CustomerAdminEMail = inCO.CustomerAdminEMail
}
if inCO.Token != "" {
co.Token = inCO.Token
}
return nil
}
// ======================================================================================
// =============================== interface ============================================
// ======================================================================================
@@ -242,18 +274,19 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
backendAPI: backendAPI,
configObj: &ConfigObj{},
configMapName: getConfigMapName(),
configMapNamespace: getConfigMapNamespace(),
configMapNamespace: GetConfigMapNamespace(),
}
// first, load from configMap
if c.existsConfigMap() {
c.loadConfigFromConfigMap()
}
// second, load from file
// first, load from file
if existsConfigFile() { // get from file
loadConfigFromFile(c.configObj)
}
// second, load from configMap
if c.existsConfigMap() {
c.updateConfigEmptyFieldsFromConfigMap()
}
updateCredentials(c.configObj, credentials)
updateCloudURLs(c.configObj)
@@ -359,6 +392,22 @@ func (c *ClusterConfig) ToMapString() map[string]interface{} {
}
return m
}
func (c *ClusterConfig) updateConfigEmptyFieldsFromConfigMap() error {
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
if err != nil {
return err
}
tempCO := ConfigObj{}
if jsonConf, ok := configMap.Data["config.json"]; ok {
json.Unmarshal([]byte(jsonConf), &tempCO)
return c.configObj.updateEmptyFields(&tempCO)
}
return err
}
func (c *ClusterConfig) loadConfigFromConfigMap() error {
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
if err != nil {
@@ -509,10 +558,6 @@ func readConfig(dat []byte, configObj *ConfigObj) error {
if err := json.Unmarshal(dat, configObj); err != nil {
return err
}
if configObj.AccountID == "" {
configObj.AccountID = configObj.CustomerGUID
}
configObj.CustomerGUID = ""
return nil
}
@@ -554,14 +599,15 @@ func getConfigMapName() string {
if n := os.Getenv("KS_DEFAULT_CONFIGMAP_NAME"); n != "" {
return n
}
return "kubescape"
return kubescapeConfigMapName
}
func getConfigMapNamespace() string {
// GetConfigMapNamespace returns the namespace of the cluster config, which is the same for all in-cluster components
func GetConfigMapNamespace() string {
if n := os.Getenv("KS_DEFAULT_CONFIGMAP_NAMESPACE"); n != "" {
return n
}
return "default"
return kubescapeNamespace
}
func getAccountFromEnv(credentials *Credentials) {

View File

@@ -299,3 +299,159 @@ func Test_initializeCloudAPI(t *testing.T) {
})
}
}
func TestGetConfigMapNamespace(t *testing.T) {
tests := []struct {
name string
env string
want string
}{
{
name: "no env",
want: kubescapeNamespace,
},
{
name: "default ns",
env: kubescapeNamespace,
want: kubescapeNamespace,
},
{
name: "custom ns",
env: "my-ns",
want: "my-ns",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if tt.env != "" {
_ = os.Setenv("KS_DEFAULT_CONFIGMAP_NAMESPACE", tt.env)
}
assert.Equalf(t, tt.want, GetConfigMapNamespace(), "GetConfigMapNamespace()")
})
}
}
const (
anyString string = "anyString"
shouldNotUpdate string = "shouldNotUpdate"
shouldUpdate string = "shouldUpdate"
)
func checkIsUpdateCorrectly(t *testing.T, beforeField string, afterField string) {
switch beforeField {
case anyString:
assert.Equal(t, anyString, afterField)
case "":
assert.Equal(t, shouldUpdate, afterField)
}
}
func TestUpdateEmptyFields(t *testing.T) {
tests := []struct {
inCo *ConfigObj
outCo *ConfigObj
}{
{
outCo: &ConfigObj{
AccountID: "",
Token: "",
CustomerAdminEMail: "",
ClusterName: "",
CloudReportURL: "",
CloudAPIURL: "",
CloudUIURL: "",
CloudAuthURL: "",
},
inCo: &ConfigObj{
AccountID: shouldUpdate,
Token: shouldUpdate,
CustomerAdminEMail: shouldUpdate,
ClusterName: shouldUpdate,
CloudReportURL: shouldUpdate,
CloudAPIURL: shouldUpdate,
CloudUIURL: shouldUpdate,
CloudAuthURL: shouldUpdate,
},
},
{
outCo: &ConfigObj{
AccountID: anyString,
Token: anyString,
CustomerAdminEMail: "",
ClusterName: "",
CloudReportURL: "",
CloudAPIURL: "",
CloudUIURL: "",
CloudAuthURL: "",
},
inCo: &ConfigObj{
AccountID: shouldNotUpdate,
Token: shouldNotUpdate,
CustomerAdminEMail: shouldUpdate,
ClusterName: shouldUpdate,
CloudReportURL: shouldUpdate,
CloudAPIURL: shouldUpdate,
CloudUIURL: shouldUpdate,
CloudAuthURL: shouldUpdate,
},
},
{
outCo: &ConfigObj{
AccountID: "",
Token: "",
CustomerAdminEMail: anyString,
ClusterName: anyString,
CloudReportURL: anyString,
CloudAPIURL: anyString,
CloudUIURL: anyString,
CloudAuthURL: anyString,
},
inCo: &ConfigObj{
AccountID: shouldUpdate,
Token: shouldUpdate,
CustomerAdminEMail: shouldNotUpdate,
ClusterName: shouldNotUpdate,
CloudReportURL: shouldNotUpdate,
CloudAPIURL: shouldNotUpdate,
CloudUIURL: shouldNotUpdate,
CloudAuthURL: shouldNotUpdate,
},
},
{
outCo: &ConfigObj{
AccountID: anyString,
Token: anyString,
CustomerAdminEMail: "",
ClusterName: anyString,
CloudReportURL: "",
CloudAPIURL: anyString,
CloudUIURL: "",
CloudAuthURL: anyString,
},
inCo: &ConfigObj{
AccountID: shouldNotUpdate,
Token: shouldNotUpdate,
CustomerAdminEMail: shouldUpdate,
ClusterName: shouldNotUpdate,
CloudReportURL: shouldUpdate,
CloudAPIURL: shouldNotUpdate,
CloudUIURL: shouldUpdate,
CloudAuthURL: shouldNotUpdate,
},
},
}
for i := range tests {
beforeChangesOutCO := tests[i].outCo
tests[i].outCo.updateEmptyFields(tests[i].inCo)
checkIsUpdateCorrectly(t, beforeChangesOutCO.AccountID, tests[i].outCo.AccountID)
checkIsUpdateCorrectly(t, beforeChangesOutCO.CloudAPIURL, tests[i].outCo.CloudAPIURL)
checkIsUpdateCorrectly(t, beforeChangesOutCO.CloudAuthURL, tests[i].outCo.CloudAuthURL)
checkIsUpdateCorrectly(t, beforeChangesOutCO.CloudReportURL, tests[i].outCo.CloudReportURL)
checkIsUpdateCorrectly(t, beforeChangesOutCO.CloudUIURL, tests[i].outCo.CloudUIURL)
checkIsUpdateCorrectly(t, beforeChangesOutCO.ClusterName, tests[i].outCo.ClusterName)
checkIsUpdateCorrectly(t, beforeChangesOutCO.CustomerAdminEMail, tests[i].outCo.CustomerAdminEMail)
checkIsUpdateCorrectly(t, beforeChangesOutCO.Token, tests[i].outCo.Token)
}
}

View File

@@ -2,7 +2,9 @@ package cautils
import (
"context"
"sort"
"github.com/anchore/grype/grype/presenter/models"
"github.com/armosec/armoapi-go/armotypes"
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/opa-utils/reporthandling"
@@ -15,12 +17,29 @@ import (
// K8SResources map[<api group>/<api version>/<resource>][]<resourceID>
type K8SResources map[string][]string
type KSResources map[string][]string
type ExternalResources map[string][]string
type ImageScanData struct {
PresenterConfig *models.PresenterConfig
Image string
}
type ScanTypes string
const (
TopWorkloadsNumber = 5
ScanTypeCluster ScanTypes = "cluster"
ScanTypeRepo ScanTypes = "repo"
ScanTypeImage ScanTypes = "image"
ScanTypeWorkload ScanTypes = "workload"
ScanTypeFramework ScanTypes = "framework"
)
type OPASessionObj struct {
K8SResources *K8SResources // input k8s objects
ArmoResource *KSResources // input ARMO objects
K8SResources K8SResources // input k8s objects
ExternalResources ExternalResources // input non-k8s objects (external resources)
AllPolicies *Policies // list of all frameworks
ExcludedRules map[string]bool // rules to exclude map[rule name>]X
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<resource ID>]<resource>
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<resource ID>]<resource result>
ResourceSource map[string]reporthandling.Source // resources sources, map[<resource ID>]<resource result>
@@ -36,9 +55,10 @@ type OPASessionObj struct {
Policies []reporthandling.Framework // list of frameworks to scan
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
OmitRawResources bool // omit raw resources from output
SingleResourceScan workloadinterface.IWorkload // single resource scan
}
func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework, k8sResources *K8SResources, scanInfo *ScanInfo) *OPASessionObj {
func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework, k8sResources K8SResources, scanInfo *ScanInfo) *OPASessionObj {
return &OPASessionObj{
Report: &reporthandlingv2.PostureReport{},
Policies: frameworks,
@@ -55,6 +75,45 @@ func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework
}
}
// SetTopWorkloads sets the top workloads by score
func (sessionObj *OPASessionObj) SetTopWorkloads() {
count := 0
topWorkloadsSorted := make([]prioritization.PrioritizedResource, 0)
// create list in order to sort
for _, wl := range sessionObj.ResourcesPrioritized {
topWorkloadsSorted = append(topWorkloadsSorted, wl)
}
// sort by score. If scores are equal, sort by resource ID
sort.Slice(topWorkloadsSorted, func(i, j int) bool {
if topWorkloadsSorted[i].Score == topWorkloadsSorted[j].Score {
return topWorkloadsSorted[i].ResourceID < topWorkloadsSorted[j].ResourceID
}
return topWorkloadsSorted[i].Score > topWorkloadsSorted[j].Score
})
if sessionObj.Report == nil {
sessionObj.Report = &reporthandlingv2.PostureReport{}
}
// set top workloads according to number of top workloads
for i := 0; i < TopWorkloadsNumber; i++ {
if i >= len(topWorkloadsSorted) {
break
}
source := sessionObj.ResourceSource[topWorkloadsSorted[i].ResourceID]
wlObj := &reporthandling.Resource{
IMetadata: sessionObj.AllResources[topWorkloadsSorted[i].ResourceID],
Source: &source,
}
sessionObj.Report.SummaryDetails.TopWorkloadsByScore = append(sessionObj.Report.SummaryDetails.TopWorkloadsByScore, wlObj)
count++
}
}
func (sessionObj *OPASessionObj) SetMapNamespaceToNumberOfResources(mapNamespaceToNumberOfResources map[string]int) {
if sessionObj.Metadata.ContextMetadata.ClusterContextMetadata == nil {
sessionObj.Metadata.ContextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{}

View File

@@ -4,6 +4,8 @@ import (
"golang.org/x/mod/semver"
"github.com/armosec/utils-go/boolutils"
cloudsupport "github.com/kubescape/k8s-interface/cloudsupport/v1"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/apis"
)
@@ -15,15 +17,25 @@ func NewPolicies() *Policies {
}
}
func (policies *Policies) Set(frameworks []reporthandling.Framework, version string) {
func (policies *Policies) Set(frameworks []reporthandling.Framework, version string, excludedRules map[string]bool, scanningScope reporthandling.ScanningScopeType) {
for i := range frameworks {
if !isFrameworkFitToScanScope(frameworks[i], scanningScope) {
continue
}
if frameworks[i].Name != "" && len(frameworks[i].Controls) > 0 {
policies.Frameworks = append(policies.Frameworks, frameworks[i].Name)
}
for j := range frameworks[i].Controls {
compatibleRules := []reporthandling.PolicyRule{}
for r := range frameworks[i].Controls[j].Rules {
if !ruleWithKSOpaDependency(frameworks[i].Controls[j].Rules[r].Attributes) && isRuleKubescapeVersionCompatible(frameworks[i].Controls[j].Rules[r].Attributes, version) {
if excludedRules != nil {
ruleName := frameworks[i].Controls[j].Rules[r].Name
if _, exclude := excludedRules[ruleName]; exclude {
continue
}
}
if !ruleWithKSOpaDependency(frameworks[i].Controls[j].Rules[r].Attributes) && isRuleKubescapeVersionCompatible(frameworks[i].Controls[j].Rules[r].Attributes, version) && isControlFitToScanScope(frameworks[i].Controls[j], scanningScope) {
compatibleRules = append(compatibleRules, frameworks[i].Controls[j].Rules[r])
}
}
@@ -76,3 +88,89 @@ func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version
}
return true
}
func getCloudType(scanInfo *ScanInfo) (bool, reporthandling.ScanningScopeType) {
if cloudsupport.IsAKS() {
return true, reporthandling.ScopeCloudAKS
}
if cloudsupport.IsEKS(k8sinterface.GetConfig()) {
return true, reporthandling.ScopeCloudEKS
}
if cloudsupport.IsGKE(k8sinterface.GetConfig()) {
return true, reporthandling.ScopeCloudGKE
}
return false, ""
}
func GetScanningScope(scanInfo *ScanInfo) reporthandling.ScanningScopeType {
var result reporthandling.ScanningScopeType
switch scanInfo.GetScanningContext() {
case ContextCluster:
isCloud, cloudType := getCloudType(scanInfo)
if isCloud {
result = cloudType
} else {
result = reporthandling.ScopeCluster
}
default:
result = reporthandling.ScopeFile
}
return result
}
func isScanningScopeMatchToControlScope(scanScope reporthandling.ScanningScopeType, controlScope reporthandling.ScanningScopeType) bool {
result := false
switch controlScope {
case reporthandling.ScopeFile:
result = (reporthandling.ScopeFile == scanScope)
case reporthandling.ScopeCluster:
result = (reporthandling.ScopeCluster == scanScope) || (reporthandling.ScopeCloud == scanScope) || (reporthandling.ScopeCloudAKS == scanScope) || (reporthandling.ScopeCloudEKS == scanScope) || (reporthandling.ScopeCloudGKE == scanScope)
case reporthandling.ScopeCloud:
result = (reporthandling.ScopeCloud == scanScope) || (reporthandling.ScopeCloudAKS == scanScope) || (reporthandling.ScopeCloudEKS == scanScope) || (reporthandling.ScopeCloudGKE == scanScope)
case reporthandling.ScopeCloudAKS:
result = (reporthandling.ScopeCloudAKS == scanScope)
case reporthandling.ScopeCloudEKS:
result = (reporthandling.ScopeCloudEKS == scanScope)
case reporthandling.ScopeCloudGKE:
result = (reporthandling.ScopeCloudGKE == scanScope)
default:
result = true
}
return result
}
func isControlFitToScanScope(control reporthandling.Control, scanScopeMatches reporthandling.ScanningScopeType) bool {
// for backward compatibility - case: kubescape with scope(new one) and regolibrary without scope(old one)
if control.ScanningScope == nil {
return true
}
if len(control.ScanningScope.Matches) == 0 {
return true
}
for i := range control.ScanningScope.Matches {
if isScanningScopeMatchToControlScope(scanScopeMatches, control.ScanningScope.Matches[i]) {
return true
}
}
return false
}
func isFrameworkFitToScanScope(framework reporthandling.Framework, scanScopeMatches reporthandling.ScanningScopeType) bool {
// for backward compatibility - case: kubescape with scope(new one) and regolibrary without scope(old one)
if framework.ScanningScope == nil {
return true
}
if len(framework.ScanningScope.Matches) == 0 {
return true
}
for i := range framework.ScanningScope.Matches {
if isScanningScopeMatchToControlScope(scanScopeMatches, framework.ScanningScope.Matches[i]) {
return true
}
}
return false
}

View File

@@ -0,0 +1,104 @@
package cautils
import (
"fmt"
"testing"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/stretchr/testify/assert"
)
func TestIsControlFitToScanScope(t *testing.T) {
tests := []struct {
scanInfo *ScanInfo
Control reporthandling.Control
expected_res bool
}{
{
scanInfo: &ScanInfo{
InputPatterns: []string{
"./testdata/any_file_for_test.json",
},
},
Control: reporthandling.Control{
ScanningScope: &reporthandling.ScanningScope{
Matches: []reporthandling.ScanningScopeType{
reporthandling.ScopeFile,
},
},
},
expected_res: true,
},
{
scanInfo: &ScanInfo{
InputPatterns: []string{
"./testdata/any_file_for_test.json",
},
},
Control: reporthandling.Control{
ScanningScope: &reporthandling.ScanningScope{
Matches: []reporthandling.ScanningScopeType{
reporthandling.ScopeCluster,
reporthandling.ScopeFile,
},
},
},
expected_res: true,
},
{
scanInfo: &ScanInfo{},
Control: reporthandling.Control{
ScanningScope: &reporthandling.ScanningScope{
Matches: []reporthandling.ScanningScopeType{
reporthandling.ScopeCluster,
},
},
},
expected_res: true,
},
{
scanInfo: &ScanInfo{
InputPatterns: []string{
"./testdata/any_file_for_test.json",
},
},
Control: reporthandling.Control{
ScanningScope: &reporthandling.ScanningScope{
Matches: []reporthandling.ScanningScopeType{
reporthandling.ScopeCloudGKE,
},
},
},
expected_res: false,
},
{
scanInfo: &ScanInfo{},
Control: reporthandling.Control{
ScanningScope: &reporthandling.ScanningScope{
Matches: []reporthandling.ScanningScopeType{
reporthandling.ScopeCloudEKS,
},
},
},
expected_res: false,
},
{
scanInfo: &ScanInfo{},
Control: reporthandling.Control{
ScanningScope: &reporthandling.ScanningScope{
Matches: []reporthandling.ScanningScopeType{
reporthandling.ScopeCloud,
},
},
},
expected_res: false,
}}
for i := range tests {
assert.Equal(t, isControlFitToScanScope(tests[i].Control, GetScanningScope(tests[i].scanInfo)), tests[i].expected_res, fmt.Sprintf("tests_true index %d", i))
}
}

View File

@@ -1,25 +1,54 @@
package cautils
import (
"fmt"
"io"
"os"
"time"
spinnerpkg "github.com/briandowns/spinner"
"github.com/fatih/color"
"github.com/jwalton/gchalk"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/mattn/go-isatty"
"github.com/schollz/progressbar/v3"
)
var FailureDisplay = color.New(color.Bold, color.FgHiRed).FprintfFunc()
var WarningDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
var FailureTextDisplay = color.New(color.Faint, color.FgHiRed).FprintfFunc()
var InfoDisplay = color.New(color.Bold, color.FgCyan).FprintfFunc()
var InfoTextDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
var SimpleDisplay = color.New().FprintfFunc()
var SuccessDisplay = color.New(color.Bold, color.FgHiGreen).FprintfFunc()
var DescriptionDisplay = color.New(color.Faint, color.FgWhite).FprintfFunc()
func FailureDisplay(w io.Writer, format string, a ...interface{}) {
fmt.Fprintf(w, gchalk.WithBrightRed().Bold(format), a...)
}
func WarningDisplay(w io.Writer, format string, a ...interface{}) {
fmt.Fprintf(w, gchalk.WithBrightYellow().Bold(format), a...)
}
func FailureTextDisplay(w io.Writer, format string, a ...interface{}) {
fmt.Fprintf(w, gchalk.WithBrightRed().Dim(format), a...)
}
func InfoDisplay(w io.Writer, format string, a ...interface{}) {
fmt.Fprintf(w, gchalk.WithCyan().Bold(format), a...)
}
func InfoTextDisplay(w io.Writer, format string, a ...interface{}) {
fmt.Fprintf(w, gchalk.WithBrightYellow().Bold(format), a...)
}
func SimpleDisplay(w io.Writer, format string, a ...interface{}) {
fmt.Fprintf(w, gchalk.White(format), a...)
}
func SuccessDisplay(w io.Writer, format string, a ...interface{}) {
fmt.Fprintf(w, gchalk.WithBlue().Bold(format), a...)
}
func DescriptionDisplay(w io.Writer, format string, a ...interface{}) {
fmt.Fprintf(w, gchalk.WithWhite().Dim(format), a...)
}
func BoldDisplay(w io.Writer, format string, a ...interface{}) {
fmt.Fprintf(w, gchalk.Bold(format), a...)
}
var spinner *spinnerpkg.Spinner

View File

@@ -11,6 +11,7 @@ import (
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/workloadinterface"
"golang.org/x/exp/slices"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/opa-utils/objectsenvelopes"
@@ -31,8 +32,13 @@ const (
JSON_FILE_FORMAT FileFormat = "json"
)
type Chart struct {
Name string
Path string
}
// LoadResourcesFromHelmCharts scans a given path (recursively) for helm charts, renders the templates and returns a map of workloads and a map of chart names
func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[string][]workloadinterface.IMetadata, map[string]string) {
func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[string][]workloadinterface.IMetadata, map[string]Chart) {
directories, _ := listDirs(basePath)
helmDirectories := make([]string, 0)
for _, dir := range directories {
@@ -42,7 +48,7 @@ func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[stri
}
sourceToWorkloads := map[string][]workloadinterface.IMetadata{}
sourceToChartName := map[string]string{}
sourceToChart := make(map[string]Chart, 0)
for _, helmDir := range helmDirectories {
chart, err := NewHelmChart(helmDir)
if err == nil {
@@ -55,11 +61,14 @@ func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[stri
chartName := chart.GetName()
for k, v := range wls {
sourceToWorkloads[k] = v
sourceToChartName[k] = chartName
sourceToChart[k] = Chart{
Name: chartName,
Path: helmDir,
}
}
}
}
return sourceToWorkloads, sourceToChartName
return sourceToWorkloads, sourceToChart
}
// If the contents at given path is a Kustomize Directory, LoadResourcesFromKustomizeDirectory will
@@ -284,11 +293,11 @@ func convertYamlToJson(i interface{}) interface{} {
}
func IsYaml(filePath string) bool {
return StringInSlice(YAML_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
return slices.Contains(YAML_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", ""))
}
func IsJson(filePath string) bool {
return StringInSlice(JSON_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
return slices.Contains(JSON_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", ""))
}
func glob(root, pattern string, onlyDirectories bool) ([]string, error) {

View File

@@ -53,7 +53,8 @@ func TestLoadResourcesFromHelmCharts(t *testing.T) {
w := workloads[0]
assert.True(t, localworkload.IsTypeLocalWorkload(w.GetObject()), "Expected localworkload as object type")
assert.Equal(t, "kubescape", sourceToChartName[file])
assert.Equal(t, "kubescape", sourceToChartName[file].Name)
assert.Equal(t, helmChartPath(), sourceToChartName[file].Path)
switch filepath.Base(file) {
case "serviceaccount.yaml":

View File

@@ -78,12 +78,12 @@ func (drp *DownloadReleasedPolicy) ListControls() ([]string, error) {
}
var controlsFrameworksList [][]string
for _, control := range controls {
controlsFrameworksList = append(controlsFrameworksList, control.FrameworkNames)
controlsFrameworksList = append(controlsFrameworksList, drp.gs.GetOpaFrameworkListByControlID(control.ControlID))
}
controlsNamesWithIDsandFrameworksList := make([]string, len(controlsIDsList))
// by design all slices have the same lengt
for i := range controlsIDsList {
controlsNamesWithIDsandFrameworksList[i] = fmt.Sprintf("%v|%v|%v", controlsIDsList[i], controlsNamesList[i], strings.Join(controlsFrameworksList[i], ","))
controlsNamesWithIDsandFrameworksList[i] = fmt.Sprintf("%v|%v|%v", controlsIDsList[i], controlsNamesList[i], strings.Join(controlsFrameworksList[i], ", "))
}
return controlsNamesWithIDsandFrameworksList, nil
}

View File

@@ -6,6 +6,7 @@ import (
"testing"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/armoapi-go/identifiers"
jsoniter "github.com/json-iterator/go"
"github.com/kubescape/kubescape/v2/internal/testutils"
"github.com/kubescape/opa-utils/reporthandling"
@@ -152,7 +153,7 @@ func mockExceptions() []armotypes.PostureExceptionPolicy {
Actions: []armotypes.PostureExceptionPolicyActions{
"alertOnly",
},
Resources: []armotypes.PortalDesignator{
Resources: []identifiers.PortalDesignator{
{
DesignatorType: "Attributes",
Attributes: map[string]string{
@@ -187,7 +188,7 @@ func mockExceptions() []armotypes.PostureExceptionPolicy {
Actions: []armotypes.PostureExceptionPolicyActions{
"alertOnly",
},
Resources: []armotypes.PortalDesignator{
Resources: []identifiers.PortalDesignator{
{
DesignatorType: "Attributes",
Attributes: map[string]string{
@@ -237,7 +238,7 @@ func mockCustomerConfig(cluster, scope string) func() *armotypes.CustomerConfig
Attributes: map[string]interface{}{
"label": "value",
},
Scope: armotypes.PortalDesignator{
Scope: identifiers.PortalDesignator{
DesignatorType: "Attributes",
Attributes: map[string]string{
"kind": "Cluster",

File diff suppressed because it is too large Load Diff

View File

@@ -6,6 +6,7 @@
},
"creationTime": "",
"description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png",
"typeTags": ["compliance"],
"controls": [
{
"guid": "",

View File

@@ -6,6 +6,7 @@
},
"creationTime": "",
"description": "Implement NSA security advices for K8s ",
"typeTags": ["compliance"],
"controls": [
{
"guid": "",

View File

@@ -25789,7 +25789,7 @@
},
{
"guid": "",
"name": "exclude-kubescape-host-scanner-resources",
"name": "exclude-host-scanner-resources",
"attributes": {
"systemException": true
},
@@ -25804,7 +25804,7 @@
"attributes": {
"kind": "DaemonSet",
"name": "host-scanner",
"namespace": "kubescape-host-scanner"
"namespace": "kubescape"
}
}
],

View File

@@ -67,6 +67,12 @@ func errAuth(resp *http.Response) error {
}
func readString(rdr io.Reader, sizeHint int64) (string, error) {
// if the response is empty, return an empty string
if sizeHint < 0 {
return "", nil
}
var b strings.Builder
b.Grow(int(sizeHint))

View File

@@ -1,6 +1,7 @@
package getter
import (
"io"
"testing"
"github.com/stretchr/testify/require"
@@ -43,3 +44,56 @@ func TestIsNativeFramework(t *testing.T) {
require.Truef(t, isNativeFramework("nSa"), "expected nsa to be native (case insensitive)")
require.Falsef(t, isNativeFramework("foo"), "expected framework to be custom")
}
func Test_readString(t *testing.T) {
type args struct {
rdr io.Reader
sizeHint int64
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
name: "should return empty string if sizeHint is negative",
args: args{
rdr: nil,
sizeHint: -1,
},
want: "",
wantErr: false,
},
{
name: "should return empty string if sizeHint is zero",
args: args{
rdr: &io.LimitedReader{},
sizeHint: 0,
},
want: "",
wantErr: false,
},
{
name: "should return empty string if sizeHint is positive",
args: args{
rdr: &io.LimitedReader{},
sizeHint: 1,
},
want: "",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := readString(tt.args.rdr, tt.args.sizeHint)
if (err != nil) != tt.wantErr {
t.Errorf("readString() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("readString() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -3,6 +3,7 @@ package cautils
import (
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/opa-utils/reporthandling"
helpersv1 "github.com/kubescape/opa-utils/reporthandling/helpers/v1"
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
)
@@ -54,10 +55,8 @@ func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, con
crv1.Remediation = crv2.Remediation
rulesv1 := map[string]reporthandling.RuleReport{}
iter := crv2.ListResourcesIDs().All()
for iter.HasNext() {
resourceID := iter.Next()
l := helpersv1.GetAllListsFromPool()
for resourceID := range crv2.ListResourcesIDs(l).All() {
if result, ok := opaSessionObj.ResourcesResult[resourceID]; ok {
for _, rulev2 := range result.ListRulesOfControl(crv2.GetID(), "") {
@@ -104,6 +103,7 @@ func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, con
}
}
}
helpersv1.PutAllListsToPool(l)
if len(rulesv1) > 0 {
for i := range rulesv1 {
crv1.RuleReports = append(crv1.RuleReports, rulesv1[i])

View File

@@ -8,13 +8,13 @@ import (
"path/filepath"
"strings"
"github.com/armosec/armoapi-go/armotypes"
giturl "github.com/kubescape/go-git-url"
"github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/kubescape/v2/core/cautils/getter"
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
"github.com/kubescape/opa-utils/objectsenvelopes"
"github.com/kubescape/opa-utils/reporthandling"
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
@@ -87,51 +87,59 @@ func (bpf *BoolPtrFlag) Set(val string) error {
// TODO - UPDATE
type ViewTypes string
type EnvScopeTypes string
type ManageClusterTypes string
const (
ResourceViewType ViewTypes = "resource"
SecurityViewType ViewTypes = "security"
ControlViewType ViewTypes = "control"
)
type PolicyIdentifier struct {
Identifier string // policy Identifier e.g. c-0012 for control, nsa,mitre for frameworks
Kind apisv1.NotificationPolicyKind // policy kind e.g. Framework,Control,Rule
Designators armotypes.PortalDesignator
Identifier string // policy Identifier e.g. c-0012 for control, nsa,mitre for frameworks
Kind apisv1.NotificationPolicyKind // policy kind e.g. Framework,Control,Rule
}
type ScanInfo struct {
Getters // TODO - remove from object
PolicyIdentifier []PolicyIdentifier // TODO - remove from object
UseExceptions string // Load file with exceptions configuration
ControlsInputs string // Load file with inputs for controls
AttackTracks string // Load file with attack tracks
UseFrom []string // Load framework from local file (instead of download). Use when running offline
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
VerboseMode bool // Display all of the input resources and not only failed resources
View string // Display all of the input resources and not only failed resources
Format string // Format results (table, json, junit ...)
Output string // Store results in an output file, Output file name
FormatVersion string // Output object can be different between versions, this is for testing and backward compatibility
CustomClusterName string // Set the custom name of the cluster
ExcludedNamespaces string // used for host scanner namespace
IncludeNamespaces string //
InputPatterns []string // Yaml files input patterns
Silent bool // Silent mode - Do not print progress logs
FailThreshold float32 // Failure score threshold
FailThresholdSeverity string // Severity at and above which the command should fail
Submit bool // Submit results to Kubescape Cloud BE
CreateAccount bool // Create account in Kubescape Cloud BE if no account found in local cache
ScanID string // Report id of the current scan
HostSensorEnabled BoolPtrFlag // Deploy Kubescape K8s host scanner to collect data from certain controls
HostSensorYamlPath string // Path to hostsensor file
Local bool // Do not submit results
Credentials Credentials // account ID
KubeContext string // context name
FrameworkScan bool // false if scanning control
ScanAll bool // true if scan all frameworks
OmitRawResources bool // true if omit raw resources from the output
PrintAttackTree bool // true if print attack tree
Getters // TODO - remove from object
PolicyIdentifier []PolicyIdentifier // TODO - remove from object
UseExceptions string // Load file with exceptions configuration
ControlsInputs string // Load file with inputs for controls
AttackTracks string // Load file with attack tracks
UseFrom []string // Load framework from local file (instead of download). Use when running offline
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
VerboseMode bool // Display all of the input resources and not only failed resources
View string // Display all of the input resources and not only failed resources
Format string // Format results (table, json, junit ...)
Output string // Store results in an output file, Output file name
FormatVersion string // Output object can be different between versions, this is for testing and backward compatibility
CustomClusterName string // Set the custom name of the cluster
ExcludedNamespaces string // used for host scanner namespace
IncludeNamespaces string //
InputPatterns []string // Yaml files input patterns
Silent bool // Silent mode - Do not print progress logs
FailThreshold float32 // DEPRECATED - Failure score threshold
ComplianceThreshold float32 // Compliance score threshold
FailThresholdSeverity string // Severity at and above which the command should fail
Submit bool // Submit results to Kubescape Cloud BE
CreateAccount bool // Create account in Kubescape Cloud BE if no account found in local cache
ScanID string // Report id of the current scan
HostSensorEnabled BoolPtrFlag // Deploy Kubescape K8s host scanner to collect data from certain controls
HostSensorYamlPath string // Path to hostsensor file
Local bool // Do not submit results
Credentials Credentials // account ID
KubeContext string // context name
FrameworkScan bool // false if scanning control
ScanAll bool // true if scan all frameworks
OmitRawResources bool // true if omit raw resources from the output
PrintAttackTree bool // true if print attack tree
ScanObject *objectsenvelopes.ScanObject // identifies a single resource (k8s object) to be scanned
ScanType ScanTypes
ScanImages bool
ChartPath string
FilePath string
}
type Getters struct {
@@ -203,6 +211,10 @@ func (scanInfo *ScanInfo) Formats() []string {
}
}
func (scanInfo *ScanInfo) SetScanType(scanType ScanTypes) {
scanInfo.ScanType = scanType
}
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind apisv1.NotificationPolicyKind) {
for _, policy := range policies {
if !scanInfo.contains(policy) {
@@ -250,6 +262,7 @@ func scanInfoToScanMetadata(ctx context.Context, scanInfo *ScanInfo) *reporthand
metadata.ScanMetadata.KubescapeVersion = BuildNumber
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
metadata.ScanMetadata.FailThreshold = scanInfo.FailThreshold
metadata.ScanMetadata.ComplianceThreshold = scanInfo.ComplianceThreshold
metadata.ScanMetadata.HostScanner = scanInfo.HostSensorEnabled.GetBool()
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
metadata.ScanMetadata.ControlsInputs = scanInfo.ControlsInputs
@@ -339,11 +352,30 @@ func setContextMetadata(ctx context.Context, contextMetadata *reporthandlingv2.C
BasePath: getAbsPath(input),
HostName: getHostname(),
}
// add repo context for submitting
contextMetadata.RepoContextMetadata = &reporthandlingv2.RepoContextMetadata{
Provider: "none",
Repo: fmt.Sprintf("path@%s", getAbsPath(input)),
Owner: getHostname(),
Branch: "none",
DefaultBranch: "none",
LocalRootPath: getAbsPath(input),
}
case ContextFile:
contextMetadata.FileContextMetadata = &reporthandlingv2.FileContextMetadata{
FilePath: getAbsPath(input),
HostName: getHostname(),
}
// add repo context for submitting
contextMetadata.RepoContextMetadata = &reporthandlingv2.RepoContextMetadata{
Provider: "none",
Repo: fmt.Sprintf("file@%s", getAbsPath(input)),
Owner: getHostname(),
Branch: "none",
DefaultBranch: "none",
LocalRootPath: getAbsPath(input),
}
case ContextGitLocal:
// local
context, err := metadataGitLocal(input)

View File

@@ -2,11 +2,12 @@ package cautils
import (
"fmt"
"os"
"sort"
"strconv"
"strings"
)
const ValueNotFound = -1
func ConvertLabelsToString(labels map[string]string) string {
labelsStr := ""
delimiter := ""
@@ -34,11 +35,31 @@ func ConvertStringToLabels(labelsStr string) map[string]string {
return labels
}
func StringInSlice(strSlice []string, str string) int {
for i := range strSlice {
if strSlice[i] == str {
return i
func StringSlicesAreEqual(a, b []string) bool {
if len(a) != len(b) {
return false
}
sort.Strings(a)
sort.Strings(b)
for i := range a {
if a[i] != b[i] {
return false
}
}
return ValueNotFound
return true
}
func ParseIntEnvVar(varName string, defaultValue int) (int, error) {
varValue, exists := os.LookupEnv(varName)
if !exists {
return defaultValue, nil
}
intValue, err := strconv.Atoi(varValue)
if err != nil {
return defaultValue, fmt.Errorf("failed to parse %s env var as int: %w", varName, err)
}
return intValue, nil
}

View File

@@ -2,8 +2,11 @@ package cautils
import (
"fmt"
"os"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestConvertLabelsToString(t *testing.T) {
@@ -33,3 +36,102 @@ func TestConvertStringToLabels(t *testing.T) {
t.Errorf("%s != %s", fmt.Sprintf("%v", rstrMap), fmt.Sprintf("%v", strMap))
}
}
func TestParseIntEnvVar(t *testing.T) {
testCases := []struct {
expectedErr string
name string
varName string
varValue string
defaultValue int
expected int
}{
{
name: "Variable does not exist",
varName: "DOES_NOT_EXIST",
varValue: "",
defaultValue: 123,
expected: 123,
expectedErr: "",
},
{
name: "Variable exists and is a valid integer",
varName: "MY_VAR",
varValue: "456",
defaultValue: 123,
expected: 456,
expectedErr: "",
},
{
name: "Variable exists but is not a valid integer",
varName: "MY_VAR",
varValue: "not_an_integer",
defaultValue: 123,
expected: 123,
expectedErr: "failed to parse MY_VAR env var as int",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
if tc.varValue != "" {
os.Setenv(tc.varName, tc.varValue)
} else {
os.Unsetenv(tc.varName)
}
actual, err := ParseIntEnvVar(tc.varName, tc.defaultValue)
if tc.expectedErr != "" {
assert.NotNil(t, err)
assert.ErrorContains(t, err, tc.expectedErr)
} else {
assert.Nil(t, err)
}
assert.Equalf(t, tc.expected, actual, "unexpected result")
})
}
}
func TestStringSlicesAreEqual(t *testing.T) {
tt := []struct {
name string
a []string
b []string
want bool
}{
{
name: "equal unsorted slices",
a: []string{"foo", "bar", "baz"},
b: []string{"baz", "foo", "bar"},
want: true,
},
{
name: "equal sorted slices",
a: []string{"bar", "baz", "foo"},
b: []string{"bar", "baz", "foo"},
want: true,
},
{
name: "unequal slices",
a: []string{"foo", "bar", "baz"},
b: []string{"foo", "bar", "qux"},
want: false,
},
{
name: "different length slices",
a: []string{"foo", "bar", "baz"},
b: []string{"foo", "bar"},
want: false,
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
got := StringSlicesAreEqual(tc.a, tc.b)
if got != tc.want {
t.Errorf("StringSlicesAreEqual(%v, %v) = %v; want %v", tc.a, tc.b, got, tc.want)
}
})
}
}

View File

View File

@@ -31,7 +31,7 @@ type IVersionCheckHandler interface {
func NewIVersionCheckHandler(ctx context.Context) IVersionCheckHandler {
if BuildNumber == "" {
logger.L().Ctx(ctx).Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
logger.L().Ctx(ctx).Warning("Unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
}
if v, ok := os.LookupEnv(CLIENT_ENV); ok && v != "" {

View File

@@ -32,9 +32,9 @@ var (
}
)
func MapKSResource(ksResourceMap *KSResources, resources []string) []string {
func MapExternalResource(externalResourceMap ExternalResources, resources []string) []string {
var hostResources []string
for k := range *ksResourceMap {
for k := range externalResourceMap {
for _, resource := range resources {
if strings.Contains(k, resource) {
hostResources = append(hostResources, k)
@@ -44,16 +44,16 @@ func MapKSResource(ksResourceMap *KSResources, resources []string) []string {
return hostResources
}
func MapHostResources(ksResourceMap *KSResources) []string {
return MapKSResource(ksResourceMap, HostSensorResources)
func MapHostResources(externalResourceMap ExternalResources) []string {
return MapExternalResource(externalResourceMap, HostSensorResources)
}
func MapImageVulnResources(ksResourceMap *KSResources) []string {
return MapKSResource(ksResourceMap, ImageVulnResources)
func MapImageVulnResources(externalResourceMap ExternalResources) []string {
return MapExternalResource(externalResourceMap, ImageVulnResources)
}
func MapCloudResources(ksResourceMap *KSResources) []string {
return MapKSResource(ksResourceMap, CloudResources)
func MapCloudResources(externalResourceMap ExternalResources) []string {
return MapExternalResource(externalResourceMap, CloudResources)
}
func SetInfoMapForResources(info string, resources []string, errorMap map[string]apis.StatusInfo) {

View File

@@ -9,7 +9,7 @@ import (
func (ks *Kubescape) SetCachedConfig(setConfig *metav1.SetConfig) error {
tenant := getTenantConfig(nil, "", "", getKubernetesApi())
tenant := getTenantConfig(nil, "", "", nil)
if setConfig.Account != "" {
tenant.GetConfigObj().AccountID = setConfig.Account
@@ -45,6 +45,6 @@ func (ks *Kubescape) ViewCachedConfig(viewConfig *metav1.ViewConfig) error {
func (ks *Kubescape) DeleteCachedConfig(ctx context.Context, deleteConfig *metav1.DeleteConfig) error {
tenant := getTenantConfig(nil, "", "", getKubernetesApi()) // change k8sinterface
tenant := getTenantConfig(nil, "", "", nil) // change k8sinterface
return tenant.DeleteCachedConfig(ctx)
}

View File

@@ -65,13 +65,13 @@ func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.Kubern
return nil
}
func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan bool, scanningContext cautils.ScanningContext) reporter.IReport {
func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan bool, scanInfo cautils.ScanInfo) reporter.IReport {
_, span := otel.Tracer("").Start(ctx, "getReporter")
defer span.End()
if submit {
submitData := reporterv2.SubmitContextScan
if scanningContext != cautils.ContextCluster {
if scanInfo.GetScanningContext() != cautils.ContextCluster {
submitData = reporterv2.SubmitContextRepository
}
return reporterv2.NewReportEventReceiver(tenantConfig.GetConfigObj(), reportID, submitData)
@@ -81,7 +81,8 @@ func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, report
return reporterv2.NewReportMock("", "")
}
var message string
if !fwScan {
if !fwScan && scanInfo.ScanType != cautils.ScanTypeWorkload {
message = "Kubescape does not submit scan results when scanning controls"
}
@@ -94,11 +95,12 @@ func getResourceHandler(ctx context.Context, scanInfo *cautils.ScanInfo, tenantC
if len(scanInfo.InputPatterns) > 0 || k8s == nil {
// scanInfo.HostSensor.SetBool(false)
return resourcehandler.NewFileResourceHandler(ctx, scanInfo.InputPatterns, registryAdaptors)
return resourcehandler.NewFileResourceHandler()
}
getter.GetKSCloudAPIConnector()
rbacObjects := getRBACHandler(tenantConfig, k8s, scanInfo.Submit)
return resourcehandler.NewK8sResourceHandler(k8s, getFieldSelector(scanInfo), hostSensorHandler, rbacObjects, registryAdaptors)
return resourcehandler.NewK8sResourceHandler(k8s, hostSensorHandler, rbacObjects, registryAdaptors)
}
// getHostSensorHandler yields a IHostSensor that knows how to collect a host's scanned resources.
@@ -133,17 +135,6 @@ func getHostSensorHandler(ctx context.Context, scanInfo *cautils.ScanInfo, k8s *
}
}
func getFieldSelector(scanInfo *cautils.ScanInfo) resourcehandler.IFieldSelector {
if scanInfo.IncludeNamespaces != "" {
return resourcehandler.NewIncludeSelector(scanInfo.IncludeNamespaces)
}
if scanInfo.ExcludedNamespaces != "" {
return resourcehandler.NewExcludeSelector(scanInfo.ExcludedNamespaces)
}
return &resourcehandler.EmptySelector{}
}
func policyIdentifierIdentities(pi []cautils.PolicyIdentifier) string {
policiesIdentities := ""
for i := range pi {
@@ -183,13 +174,13 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
return
}
scanningContext := scanInfo.GetScanningContext()
if scanningContext == cautils.ContextFile || scanningContext == cautils.ContextDir {
if scanInfo.Local {
scanInfo.Submit = false
return
}
if scanInfo.Local {
// do not submit single resource scan to BE
if scanInfo.ScanObject != nil {
scanInfo.Submit = false
return
}
@@ -286,12 +277,12 @@ func getAttackTracksGetter(ctx context.Context, attackTracks, accountID string,
}
// getUIPrinter returns a printer that will be used to print to the programs UI (terminal)
func getUIPrinter(ctx context.Context, verboseMode bool, formatVersion string, attackTree bool, viewType cautils.ViewTypes) printer.IPrinter {
func GetUIPrinter(ctx context.Context, scanInfo *cautils.ScanInfo) printer.IPrinter {
var p printer.IPrinter
if helpers.ToLevel(logger.L().GetLevel()) >= helpers.WarningLevel {
p = &printerv2.SilentPrinter{}
} else {
p = printerv2.NewPrettyPrinter(verboseMode, formatVersion, attackTree, viewType)
p = printerv2.NewPrettyPrinter(scanInfo.VerboseMode, scanInfo.FormatVersion, scanInfo.PrintAttackTree, cautils.ViewTypes(scanInfo.View), scanInfo.ScanType, scanInfo.InputPatterns)
// Since the UI of the program is a CLI (Stdout), it means that it should always print to Stdout
p.SetWriter(ctx, os.Stdout.Name())

View File

@@ -81,7 +81,14 @@ func Test_getUIPrinter(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
logger.L().SetLevel(tt.args.loggerLevel.String())
got := getUIPrinter(tt.args.ctx, tt.args.verboseMode, tt.args.formatVersion, tt.args.printAttack, tt.args.viewType)
scanInfo := &cautils.ScanInfo{
FormatVersion: tt.args.formatVersion,
VerboseMode: tt.args.verboseMode,
PrintAttackTree: tt.args.printAttack,
View: string(tt.args.viewType),
}
got := GetUIPrinter(tt.args.ctx, scanInfo)
assert.Equal(t, tt.want.structType, reflect.TypeOf(got).String())

View File

@@ -11,6 +11,7 @@ import (
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
v2 "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2"
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/utils"
"github.com/olekukonko/tablewriter"
)
@@ -87,19 +88,27 @@ func prettyPrintListFormat(ctx context.Context, targetPolicy string, policies []
return
}
header := fmt.Sprintf("Supported %s", targetPolicy)
policyTable := tablewriter.NewWriter(printer.GetWriter(ctx, ""))
policyTable.SetAutoWrapText(true)
header := fmt.Sprintf("Supported %s", targetPolicy)
policyTable.SetHeader([]string{header})
policyTable.SetHeaderLine(true)
policyTable.SetRowLine(true)
policyTable.SetAlignment(tablewriter.ALIGN_CENTER)
policyTable.SetUnicodeHV(tablewriter.Regular, tablewriter.Regular)
data := v2.Matrix{}
controlRows := generatePolicyRows(policies)
var headerColors []tablewriter.Colors
for range controlRows[0] {
headerColors = append(headerColors, tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiYellowColor})
}
policyTable.SetHeaderColor(headerColors...)
data = append(data, controlRows...)
policyTable.SetAlignment(tablewriter.ALIGN_CENTER)
policyTable.AppendBulk(data)
policyTable.Render()
}
@@ -112,13 +121,29 @@ func jsonListFormat(_ context.Context, _ string, policies []string) {
func prettyPrintControls(ctx context.Context, policies []string) {
controlsTable := tablewriter.NewWriter(printer.GetWriter(ctx, ""))
controlsTable.SetAutoWrapText(true)
controlsTable.SetHeader([]string{"Control ID", "Control Name", "Docs", "Frameworks"})
controlsTable.SetAutoWrapText(false)
controlsTable.SetHeaderLine(true)
controlsTable.SetRowLine(true)
data := v2.Matrix{}
controlsTable.SetUnicodeHV(tablewriter.Regular, tablewriter.Regular)
controlRows := generateControlRows(policies)
short := utils.CheckShortTerminalWidth(controlRows, []string{"Control ID", "Control Name", "Docs", "Frameworks"})
if short {
controlsTable.SetAutoWrapText(false)
controlsTable.SetHeader([]string{"Controls"})
controlRows = shortFormatControlRows(controlRows)
} else {
controlsTable.SetHeader([]string{"Control ID", "Control Name", "Docs", "Frameworks"})
}
var headerColors []tablewriter.Colors
for range controlRows[0] {
headerColors = append(headerColors, tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiYellowColor})
}
controlsTable.SetHeaderColor(headerColors...)
data := v2.Matrix{}
data = append(data, controlRows...)
controlsTable.AppendBulk(data)
@@ -134,7 +159,7 @@ func generateControlRows(policies []string) [][]string {
docs := cautils.GetControlLink(id)
currentRow := []string{id, control, docs, framework}
currentRow := []string{id, control, docs, strings.Replace(framework, " ", "\n", -1)}
rows = append(rows, currentRow)
}
@@ -151,3 +176,11 @@ func generatePolicyRows(policies []string) [][]string {
}
return rows
}
func shortFormatControlRows(controlRows [][]string) [][]string {
rows := [][]string{}
for _, controlRow := range controlRows {
rows = append(rows, []string{fmt.Sprintf("Control ID"+strings.Repeat(" ", 3)+": %+v\nControl Name"+strings.Repeat(" ", 1)+": %+v\nDocs"+strings.Repeat(" ", 9)+": %+v\nFrameworks"+strings.Repeat(" ", 3)+": %+v", controlRow[0], controlRow[1], controlRow[2], strings.Replace(controlRow[3], "\n", " ", -1))})
}
return rows
}

View File

@@ -6,7 +6,9 @@ import (
"github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/go-logger/iconlogger"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/cautils/getter"
"github.com/kubescape/kubescape/v2/core/pkg/hostsensorutils"
@@ -17,8 +19,10 @@ import (
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling"
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/reporter"
"github.com/kubescape/kubescape/v2/pkg/imagescan"
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
"go.opentelemetry.io/otel"
"golang.org/x/exp/slices"
"github.com/kubescape/opa-utils/resources"
)
@@ -27,9 +31,9 @@ type componentInterfaces struct {
tenantConfig cautils.ITenantConfig
resourceHandler resourcehandler.IResourceHandler
report reporter.IReport
outputPrinters []printer.IPrinter
uiPrinter printer.IPrinter
hostSensorHandler hostsensorutils.IHostSensor
outputPrinters []printer.IPrinter
}
func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInterfaces {
@@ -80,10 +84,7 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
// ================== setup registry adaptors ======================================
registryAdaptors, err := resourcehandler.NewRegistryAdaptors()
if err != nil {
logger.L().Ctx(ctx).Error("failed to initialize registry adaptors", helpers.Error(err))
}
registryAdaptors, _ := resourcehandler.NewRegistryAdaptors()
// ================== setup resource collector object ======================================
@@ -92,19 +93,12 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
// ================== setup reporter & printer objects ======================================
// reporting behavior - setup reporter
reportHandler := getReporter(ctx, tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan, scanInfo.GetScanningContext())
reportHandler := getReporter(ctx, tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan, *scanInfo)
// setup printers
formats := scanInfo.Formats()
outputPrinters := GetOutputPrinters(scanInfo, ctx)
outputPrinters := make([]printer.IPrinter, 0)
for _, format := range formats {
printerHandler := resultshandling.NewPrinter(ctx, format, scanInfo.FormatVersion, scanInfo.PrintAttackTree, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
printerHandler.SetWriter(ctx, scanInfo.Output)
outputPrinters = append(outputPrinters, printerHandler)
}
uiPrinter := getUIPrinter(ctx, scanInfo.VerboseMode, scanInfo.FormatVersion, scanInfo.PrintAttackTree, cautils.ViewTypes(scanInfo.View))
uiPrinter := GetUIPrinter(ctx, scanInfo)
// ================== return interface ======================================
@@ -118,10 +112,22 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
}
}
func GetOutputPrinters(scanInfo *cautils.ScanInfo, ctx context.Context) []printer.IPrinter {
formats := scanInfo.Formats()
outputPrinters := make([]printer.IPrinter, 0)
for _, format := range formats {
printerHandler := resultshandling.NewPrinter(ctx, format, scanInfo.FormatVersion, scanInfo.PrintAttackTree, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
printerHandler.SetWriter(ctx, scanInfo.Output)
outputPrinters = append(outputPrinters, printerHandler)
}
return outputPrinters
}
func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*resultshandling.ResultsHandler, error) {
ctxInit, spanInit := otel.Tracer("").Start(ctx, "initialization")
logger.L().Info("Kubescape scanner starting")
logger.InitLogger(iconlogger.LoggerName)
logger.L().Start("Kubescape scanner initializing")
// ===================== Initialization =====================
scanInfo.Init(ctxInit) // initialize scan info
@@ -149,21 +155,32 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
// remove host scanner components
defer func() {
if err := interfaces.hostSensorHandler.TearDown(); err != nil {
logger.L().Ctx(ctx).Error("failed to tear down host scanner", helpers.Error(err))
logger.L().Ctx(ctx).StopError("Failed to tear down host scanner", helpers.Error(err))
}
}()
logger.L().StopSuccess("Initialized scanner")
resultsHandling := resultshandling.NewResultsHandler(interfaces.report, interfaces.outputPrinters, interfaces.uiPrinter)
// ===================== policies & resources =====================
ctxPolicies, spanPolicies := otel.Tracer("").Start(ctxInit, "policies & resources")
policyHandler := policyhandler.NewPolicyHandler(interfaces.resourceHandler)
scanData, err := policyHandler.CollectResources(ctxPolicies, scanInfo.PolicyIdentifier, scanInfo, cautils.NewProgressHandler(""))
// ===================== policies =====================
ctxPolicies, spanPolicies := otel.Tracer("").Start(ctxInit, "policies")
policyHandler := policyhandler.NewPolicyHandler()
scanData, err := policyHandler.CollectPolicies(ctxPolicies, scanInfo.PolicyIdentifier, scanInfo)
if err != nil {
spanInit.End()
return resultsHandling, err
}
spanPolicies.End()
// ===================== resources =====================
ctxResources, spanResources := otel.Tracer("").Start(ctxInit, "resources")
err = resourcehandler.CollectResources(ctxResources, interfaces.resourceHandler, scanInfo.PolicyIdentifier, scanData, cautils.NewProgressHandler(""), scanInfo)
if err != nil {
spanInit.End()
return resultsHandling, err
}
spanResources.End()
spanInit.End()
// ========================= opa testing =====================
@@ -172,20 +189,28 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
deps := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), interfaces.tenantConfig.GetContextName())
reportResults := opaprocessor.NewOPAProcessor(scanData, deps)
if err := reportResults.ProcessRulesListener(ctxOpa, cautils.NewProgressHandler("")); err != nil {
if err := reportResults.ProcessRulesListener(ctxOpa, cautils.NewProgressHandler(""), scanInfo); err != nil {
// TODO - do something
return resultsHandling, fmt.Errorf("%w", err)
}
// ======================== prioritization ===================
_, spanPrioritization := otel.Tracer("").Start(ctxOpa, "prioritization")
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(ctxOpa, scanInfo.Getters.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
logger.L().Ctx(ctx).Warning("failed to get attack tracks, this may affect the scanning results", helpers.Error(err))
} else if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
return resultsHandling, fmt.Errorf("%w", err)
if scanInfo.PrintAttackTree || isPrioritizationScanType(scanInfo.ScanType) {
_, spanPrioritization := otel.Tracer("").Start(ctxOpa, "prioritization")
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(ctxOpa, scanInfo.Getters.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
logger.L().Ctx(ctx).Warning("failed to get attack tracks, this may affect the scanning results", helpers.Error(err))
} else if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
return resultsHandling, fmt.Errorf("%w", err)
}
if err == nil && isPrioritizationScanType(scanInfo.ScanType) {
scanData.SetTopWorkloads()
}
spanPrioritization.End()
}
spanPrioritization.End()
if scanInfo.ScanImages {
scanImages(scanInfo.ScanType, scanData, ctx, resultsHandling)
}
// ========================= results handling =====================
resultsHandling.SetData(scanData)
@@ -195,3 +220,62 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
return resultsHandling, nil
}
func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ctx context.Context, resultsHandling *resultshandling.ResultsHandler) {
imagesToScan := []string{}
if scanType == cautils.ScanTypeWorkload {
containers, err := workloadinterface.NewWorkloadObj(scanData.SingleResourceScan.GetObject()).GetContainers()
if err != nil {
logger.L().Error("failed to get containers", helpers.Error(err))
return
}
for _, container := range containers {
if !slices.Contains(imagesToScan, container.Image) {
imagesToScan = append(imagesToScan, container.Image)
}
}
} else {
for _, workload := range scanData.AllResources {
containers, err := workloadinterface.NewWorkloadObj(workload.GetObject()).GetContainers()
if err != nil {
logger.L().Error(fmt.Sprintf("failed to get containers for kind: %s, name: %s, namespace: %s", workload.GetKind(), workload.GetName(), workload.GetNamespace()), helpers.Error(err))
continue
}
for _, container := range containers {
if !slices.Contains(imagesToScan, container.Image) {
imagesToScan = append(imagesToScan, container.Image)
}
}
}
}
dbCfg, _ := imagescan.NewDefaultDBConfig()
svc := imagescan.NewScanService(dbCfg)
for _, img := range imagesToScan {
logger.L().Start("Scanning", helpers.String("image", img))
if err := scanSingleImage(ctx, img, svc, resultsHandling); err != nil {
logger.L().StopError("failed to scan", helpers.String("image", img), helpers.Error(err))
}
logger.L().StopSuccess("Scanned successfully", helpers.String("image", img))
}
}
func scanSingleImage(ctx context.Context, img string, svc imagescan.Service, resultsHandling *resultshandling.ResultsHandler) error {
scanResults, err := svc.Scan(ctx, img, imagescan.RegistryCredentials{})
if err != nil {
return err
}
resultsHandling.ImageScanData = append(resultsHandling.ImageScanData, cautils.ImageScanData{
Image: img,
PresenterConfig: scanResults,
})
return nil
}
func isPrioritizationScanType(scanType cautils.ScanTypes) bool {
return scanType == cautils.ScanTypeCluster || scanType == cautils.ScanTypeRepo
}

59
core/metrics/metrics.go Normal file
View File

@@ -0,0 +1,59 @@
package metrics
import (
"context"
"strings"
"sync"
"github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
)
const (
METER_NAME = "github.com/kubescape/kubescape/v2"
METRIC_NAME_PREFIX = "kubescape"
)
var initOnce sync.Once
// Metrics are defined here
var (
kubernetesResourcesCount metric.Int64UpDownCounter
workerNodesCount metric.Int64UpDownCounter
)
// Init initializes the metrics
func Init() {
initOnce.Do(func() {
var err error
meterProvider := otel.GetMeterProvider()
meter := meterProvider.Meter(METER_NAME)
metricName := func(name string) string {
return strings.Join([]string{METRIC_NAME_PREFIX, name}, "_")
}
if kubernetesResourcesCount, err = meter.Int64UpDownCounter(metricName("kubernetes_resources_count")); err != nil {
logger.L().Error("failed to register instrument", helpers.Error(err))
}
if workerNodesCount, err = meter.Int64UpDownCounter(metricName("worker_nodes_count")); err != nil {
logger.L().Error("failed to register instrument", helpers.Error(err))
}
})
}
// UpdateKubernetesResourcesCount updates the kubernetes resources count metric
func UpdateKubernetesResourcesCount(ctx context.Context, value int64) {
if kubernetesResourcesCount != nil {
kubernetesResourcesCount.Add(ctx, value)
}
}
// UpdateWorkerNodesCount updates the worker nodes count metric
func UpdateWorkerNodesCount(ctx context.Context, value int64) {
if workerNodesCount != nil {
workerNodesCount.Add(ctx, value)
}
}

View File

@@ -4,6 +4,7 @@ import (
"encoding/json"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/armoapi-go/identifiers"
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/opa-utils/reporthandling"
)
@@ -38,9 +39,17 @@ func MockFramework_0006_0013() *reporthandling.Framework {
Name: "framework-0006-0013",
},
}
c06 := &reporthandling.Control{}
c06 := &reporthandling.Control{ScanningScope: &reporthandling.ScanningScope{
Matches: []reporthandling.ScanningScopeType{
reporthandling.ScopeCluster,
},
}}
json.Unmarshal([]byte(mockControl_0006), c06)
c13 := &reporthandling.Control{}
c13 := &reporthandling.Control{ScanningScope: &reporthandling.ScanningScope{
Matches: []reporthandling.ScanningScopeType{
reporthandling.ScopeCluster,
},
}}
json.Unmarshal([]byte(mockControl_0013), c13)
fw.Controls = []reporthandling.Control{*c06, *c13}
return fw
@@ -53,7 +62,11 @@ func MockFramework_0044() *reporthandling.Framework {
Name: "framework-0044",
},
}
c44 := &reporthandling.Control{}
c44 := &reporthandling.Control{ScanningScope: &reporthandling.ScanningScope{
Matches: []reporthandling.ScanningScopeType{
reporthandling.ScopeCluster,
},
}}
json.Unmarshal([]byte(mockControl_0044), c44)
fw.Controls = []reporthandling.Control{*c44}
@@ -73,11 +86,11 @@ func MockExceptionAllKinds(policy *armotypes.PosturePolicy) *armotypes.PostureEx
return &armotypes.PostureExceptionPolicy{
PosturePolicies: []armotypes.PosturePolicy{*policy},
Actions: []armotypes.PostureExceptionPolicyActions{armotypes.AlertOnly},
Resources: []armotypes.PortalDesignator{
Resources: []identifiers.PortalDesignator{
{
DesignatorType: armotypes.DesignatorAttributes,
DesignatorType: identifiers.DesignatorAttributes,
Attributes: map[string]string{
armotypes.AttributeKind: ".*",
identifiers.AttributeKind: ".*",
},
},
},

View File

@@ -3,7 +3,7 @@ package containerscan
import (
"strings"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/armoapi-go/identifiers"
)
func (layer *ScanResultLayer) GetFilesByPackage(pkgname string) (files *PkgFiles) {
@@ -24,11 +24,11 @@ func (layer *ScanResultLayer) GetPackagesNames() []string {
return pkgsNames
}
func (scanresult *ScanResultReport) GetDesignatorsNContext() (*armotypes.PortalDesignator, []armotypes.ArmoContext) {
designatorsObj := armotypes.AttributesDesignatorsFromWLID(scanresult.WLID)
func (scanresult *ScanResultReport) GetDesignatorsNContext() (*identifiers.PortalDesignator, []identifiers.ArmoContext) {
designatorsObj := identifiers.AttributesDesignatorsFromWLID(scanresult.WLID)
designatorsObj.Attributes["containerName"] = scanresult.ContainerName
designatorsObj.Attributes["customerGUID"] = scanresult.CustomerGUID
contextObj := armotypes.DesignatorToArmoContext(designatorsObj, "designators")
contextObj := identifiers.DesignatorToArmoContext(designatorsObj, "designators")
return designatorsObj, contextObj
}

View File

@@ -1,7 +1,7 @@
package containerscan
import (
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/armoapi-go/identifiers"
cautils "github.com/armosec/utils-k8s-go/armometadata"
)
@@ -69,8 +69,8 @@ func (scanresult *ScanResultReport) Summarize() *ElasticContainerScanSummaryResu
ListOfDangerousArtifcats: scanresult.ListOfDangerousArtifcats,
}
summary.Cluster = designatorsObj.Attributes[armotypes.AttributeCluster]
summary.Namespace = designatorsObj.Attributes[armotypes.AttributeNamespace]
summary.Cluster = designatorsObj.Attributes[identifiers.AttributeCluster]
summary.Namespace = designatorsObj.Attributes[identifiers.AttributeNamespace]
imageInfo, e2 := cautils.ImageTagToImageInfo(scanresult.ImgTag)
if e2 == nil {

View File

@@ -1,10 +1,12 @@
package containerscan
import "github.com/armosec/armoapi-go/armotypes"
import (
"github.com/armosec/armoapi-go/identifiers"
)
type ElasticContainerVulnerabilityResult struct {
Designators armotypes.PortalDesignator `json:"designators"`
Context []armotypes.ArmoContext `json:"context"`
Designators identifiers.PortalDesignator `json:"designators"`
Context []identifiers.ArmoContext `json:"context"`
WLID string `json:"wlid"`
ContainerScanID string `json:"containersScanID"`
@@ -35,8 +37,8 @@ type SeverityStats struct {
}
type ElasticContainerScanSeveritySummary struct {
Designators armotypes.PortalDesignator `json:"designators"`
Context []armotypes.ArmoContext `json:"context"`
Designators identifiers.PortalDesignator `json:"designators"`
Context []identifiers.ArmoContext `json:"context"`
SeverityStats
CustomerGUID string `json:"customerGUID"`
@@ -57,8 +59,8 @@ type ElasticContainerScanSeveritySummary struct {
type ElasticContainerScanSummaryResult struct {
SeverityStats
Designators armotypes.PortalDesignator `json:"designators"`
Context []armotypes.ArmoContext `json:"context"`
Designators identifiers.PortalDesignator `json:"designators"`
Context []identifiers.ArmoContext `json:"context"`
CustomerGUID string `json:"customerGUID"`
ContainerScanID string `json:"containersScanID"`

View File

@@ -20,7 +20,7 @@ type FixHandler struct {
// ResourceFixInfo is a struct that holds the information about the resource that needs to be fixed
type ResourceFixInfo struct {
YamlExpressions map[string]*armotypes.FixPath
YamlExpressions map[string]armotypes.FixPath
Resource *reporthandling.Resource
FilePath string
DocumentIndex int

View File

@@ -73,19 +73,17 @@ func isSupportedScanningTarget(report *reporthandlingv2.PostureReport) error {
}
func getLocalPath(report *reporthandlingv2.PostureReport) string {
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.GitLocal {
switch report.Metadata.ScanMetadata.ScanningTarget {
case reporthandlingv2.GitLocal:
return report.Metadata.ContextMetadata.RepoContextMetadata.LocalRootPath
}
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.Directory {
case reporthandlingv2.Directory:
return report.Metadata.ContextMetadata.DirectoryContextMetadata.BasePath
}
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.File {
case reporthandlingv2.File:
return filepath.Dir(report.Metadata.ContextMetadata.FileContextMetadata.FilePath)
default:
return ""
}
return ""
}
func (h *FixHandler) buildResourcesMap() map[string]*reporthandling.Resource {
@@ -155,7 +153,7 @@ func (h *FixHandler) PrepareResourcesToFix(ctx context.Context) []ResourceFixInf
rfi := ResourceFixInfo{
FilePath: absolutePath,
Resource: resourceObj,
YamlExpressions: make(map[string]*armotypes.FixPath, 0),
YamlExpressions: make(map[string]armotypes.FixPath, 0),
DocumentIndex: documentIndex,
}
@@ -185,7 +183,7 @@ func (h *FixHandler) PrintExpectedChanges(resourcesToFix []ResourceFixInfo) {
i := 1
for _, fixPath := range resourceFixInfo.YamlExpressions {
sb.WriteString(fmt.Sprintf("\t%d) %s = %s\n", i, (*fixPath).Path, (*fixPath).Value))
sb.WriteString(fmt.Sprintf("\t%d) %s = %s\n", i, fixPath.Path, fixPath.Value))
i++
}
sb.WriteString("\n------\n")
@@ -201,14 +199,14 @@ func (h *FixHandler) ApplyChanges(ctx context.Context, resourcesToFix []Resource
fileYamlExpressions := h.getFileYamlExpressions(resourcesToFix)
for filepath, yamlExpression := range fileYamlExpressions {
fileAsString, err := getFileString(filepath)
fileAsString, err := GetFileString(filepath)
if err != nil {
errors = append(errors, err)
continue
}
fixedYamlString, err := h.ApplyFixToContent(ctx, fileAsString, yamlExpression)
fixedYamlString, err := ApplyFixToContent(ctx, fileAsString, yamlExpression)
if err != nil {
errors = append(errors, fmt.Errorf("Failed to fix file %s: %w ", filepath, err))
@@ -242,7 +240,8 @@ func (h *FixHandler) getFilePathAndIndex(filePathWithIndex string) (filePath str
}
}
func (h *FixHandler) ApplyFixToContent(ctx context.Context, yamlAsString, yamlExpression string) (fixedString string, err error) {
func ApplyFixToContent(ctx context.Context, yamlAsString, yamlExpression string) (fixedString string, err error) {
yamlAsString = sanitizeYaml(yamlAsString)
newline := determineNewlineSeparator(yamlAsString)
yamlLines := strings.Split(yamlAsString, newline)
@@ -264,6 +263,7 @@ func (h *FixHandler) ApplyFixToContent(ctx context.Context, yamlAsString, yamlEx
fixedYamlLines := getFixedYamlLines(yamlLines, fixInfo, newline)
fixedString = getStringFromSlice(fixedYamlLines, newline)
fixedString = revertSanitizeYaml(fixedString)
return fixedString, nil
}
@@ -301,8 +301,8 @@ func (rfi *ResourceFixInfo) addYamlExpressionsFromResourceAssociatedControl(docu
continue
}
yamlExpression := fixPathToValidYamlExpression(rulePaths.FixPath.Path, rulePaths.FixPath.Value, documentIndex)
rfi.YamlExpressions[yamlExpression] = &rulePaths.FixPath
yamlExpression := FixPathToValidYamlExpression(rulePaths.FixPath.Path, rulePaths.FixPath.Value, documentIndex)
rfi.YamlExpressions[yamlExpression] = rulePaths.FixPath
}
}
}
@@ -317,7 +317,7 @@ func reduceYamlExpressions(resource *ResourceFixInfo) string {
return strings.Join(expressions, " | ")
}
func fixPathToValidYamlExpression(fixPath, value string, documentIndexInYaml int) string {
func FixPathToValidYamlExpression(fixPath, value string, documentIndexInYaml int) string {
isStringValue := true
if _, err := strconv.ParseBool(value); err == nil {
isStringValue = false
@@ -340,7 +340,7 @@ func joinStrings(inputStrings ...string) string {
return strings.Join(inputStrings, "")
}
func getFileString(filepath string) (string, error) {
func GetFileString(filepath string) (string, error) {
bytes, err := os.ReadFile(filepath)
if err != nil {
@@ -368,3 +368,28 @@ func determineNewlineSeparator(contents string) string {
return unixNewline
}
}
// sanitizeYaml receives a YAML file as a string, sanitizes it and returns the result
//
// Callers should remember to call the corresponding revertSanitizeYaml function.
//
// It applies the following sanitization:
//
// - Since `yaml/v3` fails to serialize documents starting with a document
// separator, we comment it out to be compatible.
func sanitizeYaml(fileAsString string) string {
if fileAsString[:3] == "---" {
fileAsString = "# " + fileAsString
}
return fileAsString
}
// revertSanitizeYaml receives a sanitized YAML file as a string and reverts the applied sanitization
//
// For sanitization details, refer to the sanitizeYaml() function.
func revertSanitizeYaml(fixedYamlString string) string {
if fixedYamlString[:5] == "# ---" {
fixedYamlString = fixedYamlString[2:]
}
return fixedYamlString
}

View File

@@ -101,6 +101,13 @@ func getTestCases() []indentationTestCase {
"inserts/tc-11-01-expected.yaml",
},
// Starts with ---
{
"inserts/tc-12-00-begin-with-document-separator.yaml",
"select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false",
"inserts/tc-12-01-expected.yaml",
},
// Removal Scenarios
{
"removals/tc-01-00-input.yaml",
@@ -118,10 +125,10 @@ func getTestCases() []indentationTestCase {
"removals/tc-03-01-expected.yaml",
},
{
"removes/tc-04-00-input.yaml",
"removals/tc-04-00-input.yaml",
`del(select(di==0).spec.containers[0].securityContext) |
del(select(di==1).spec.containers[1])`,
"removes/tc-04-01-expected.yaml",
"removals/tc-04-01-expected.yaml",
},
// Replace Scenarios
@@ -162,6 +169,12 @@ func getTestCases() []indentationTestCase {
select(di==0).spec.securityContext.runAsRoot |= false`,
"hybrids/tc-04-01-expected.yaml",
},
{
"hybrids/tc-05-00-input-leading-doc-separator.yaml",
`del(select(di==0).spec.containers[0].securityContext) |
select(di==0).spec.securityContext.runAsRoot |= false`,
"hybrids/tc-05-01-expected.yaml",
},
}
return indentationTestCases
@@ -169,22 +182,28 @@ func getTestCases() []indentationTestCase {
func TestApplyFixKeepsFormatting(t *testing.T) {
testCases := getTestCases()
getTestDataPath := func(filename string) string {
currentFile := "testdata/" + filename
return filepath.Join(testutils.CurrentDir(), currentFile)
}
for _, tc := range testCases {
t.Run(tc.inputFile, func(t *testing.T) {
getTestDataPath := func(filename string) string {
currentFile := "testdata/" + filename
return filepath.Join(testutils.CurrentDir(), currentFile)
inputFilename := getTestDataPath(tc.inputFile)
input, err := os.ReadFile(inputFilename)
if err != nil {
t.Fatalf(`Unable to open file %s due to: %v`, inputFilename, err)
}
expectedFilename := getTestDataPath(tc.expectedFile)
wantRaw, err := os.ReadFile(expectedFilename)
if err != nil {
t.Fatalf(`Unable to open file %s due to: %v`, expectedFilename, err)
}
input, _ := os.ReadFile(getTestDataPath(tc.inputFile))
wantRaw, _ := os.ReadFile(getTestDataPath(tc.expectedFile))
want := string(wantRaw)
expression := tc.yamlExpression
h, _ := NewFixHandlerMock()
got, _ := h.ApplyFixToContent(context.TODO(), string(input), expression)
fileAsString := string(input)
got, _ := ApplyFixToContent(context.TODO(), fileAsString, expression)
assert.Equalf(
t, want, got,
@@ -241,7 +260,7 @@ func Test_fixPathToValidYamlExpression(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := fixPathToValidYamlExpression(tt.args.fixPath, tt.args.value, tt.args.documentIndexInYaml); got != tt.want {
if got := FixPathToValidYamlExpression(tt.args.fixPath, tt.args.value, tt.args.documentIndexInYaml); got != tt.want {
t.Errorf("fixPathToValidYamlExpression() = %v, want %v", got, tt.want)
}
})

View File

@@ -0,0 +1,22 @@
# Fix to Apply:
# REMOVE:
# "del(select(di==0).spec.containers[0].securityContext)"
# INSERT:
# select(di==0).spec.securityContext.runAsRoot: false
---
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: true

View File

@@ -0,0 +1,22 @@
# Fix to Apply:
# REMOVE:
# "del(select(di==0).spec.containers[0].securityContext)"
# INSERT:
# select(di==0).spec.securityContext.runAsRoot: false
---
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: false

View File

@@ -0,0 +1,10 @@
---
apiVersion: v1
kind: Pod
metadata:
name: begin-with-document-separator
spec:
containers:
- name: nginx_container
image: nginx

View File

@@ -0,0 +1,12 @@
---
apiVersion: v1
kind: Pod
metadata:
name: begin-with-document-separator
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
allowPrivilegeEscalation: false

View File

@@ -23,8 +23,6 @@ func decodeDocumentRoots(yamlAsString string) ([]yaml.Node, error) {
var node yaml.Node
err := dec.Decode(&node)
nodes = append(nodes, node)
if errors.Is(err, io.EOF) {
break
}
@@ -32,6 +30,8 @@ func decodeDocumentRoots(yamlAsString string) ([]yaml.Node, error) {
return nil, fmt.Errorf("Cannot Decode File as YAML")
}
nodes = append(nodes, node)
}
return nodes, nil

View File

@@ -6,13 +6,13 @@ metadata:
k8s-app: kubescape-host-scanner
kubernetes.io/metadata.name: kubescape-host-scanner
tier: kubescape-host-scanner-control-plane
name: kubescape-host-scanner
name: kubescape
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: host-scanner
namespace: kubescape-host-scanner
namespace: kubescape
labels:
app: host-scanner
k8s-app: kubescape-host-scanner
@@ -32,7 +32,7 @@ spec:
- operator: Exists
containers:
- name: host-sensor
image: quay.io/kubescape/host-scanner:v1.0.54
image: quay.io/kubescape/host-scanner:v1.0.61
securityContext:
allowPrivilegeEscalation: true
privileged: true
@@ -52,12 +52,17 @@ spec:
volumeMounts:
- mountPath: /host_fs
name: host-filesystem
readinessProbe:
startupProbe:
httpGet:
path: /kernelVersion
path: /readyz
port: 7888
initialDelaySeconds: 1
periodSeconds: 1
failureThreshold: 30
periodSeconds: 1
livenessProbe:
httpGet:
path: /healthz
port: 7888
periodSeconds: 10
terminationGracePeriodSeconds: 120
dnsPolicy: ClusterFirstWithHostNet
automountServiceAccountToken: false

View File

@@ -17,6 +17,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
)
@@ -51,7 +52,7 @@ func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, hostSensorYAMLFile
if hostSensorYAMLFile != "" {
d, err := loadHostSensorFromFile(hostSensorYAMLFile)
if err != nil {
return nil, fmt.Errorf("failed to load host-scan yaml file, reason: %w", err)
return nil, fmt.Errorf("failed to load host-scanner yaml file, reason: %w", err)
}
hostSensorYAML = d
}
@@ -82,7 +83,10 @@ func (hsh *HostSensorHandler) Init(ctx context.Context) error {
// store pod names
// make sure all pods are running, after X seconds treat has running anyway, and log an error on the pods not running yet
logger.L().Info("Installing host scanner")
logger.L().Debug("The host scanner is a DaemonSet that runs on each node in the cluster. The DaemonSet will be running in it's own namespace and will be deleted once the scan is completed. If you do not wish to install the host scanner, please run the scan without the --enable-host-scan flag.")
// log is used to avoid log duplication
// coming from the different host-scanner instances
log := NewLogCoupling()
cautils.StartSpinner()
defer cautils.StopSpinner()
@@ -91,9 +95,9 @@ func (hsh *HostSensorHandler) Init(ctx context.Context) error {
return fmt.Errorf("failed to apply host scanner YAML, reason: %v", err)
}
hsh.populatePodNamesToNodeNames(ctx)
hsh.populatePodNamesToNodeNames(ctx, log)
if err := hsh.checkPodForEachNode(); err != nil {
logger.L().Ctx(ctx).Warning("failed to validate host-sensor pods status", helpers.Error(err))
logger.L().Ctx(ctx).Warning(failedToValidateHostSensorPodStatus, helpers.Error(err))
}
return nil
@@ -131,7 +135,7 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
}
// Get namespace name
namespaceName := ""
namespaceName := cautils.GetConfigMapNamespace()
for i := range workloads {
if workloads[i].GetKind() == "Namespace" {
namespaceName = workloads[i].GetName()
@@ -149,6 +153,7 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
}
// set namespace in all objects
if w.GetKind() != "Namespace" {
logger.L().Debug("Setting namespace", helpers.String("kind", w.GetKind()), helpers.String("name", w.GetName()), helpers.String("namespace", namespaceName))
w.SetNamespace(namespaceName)
}
// Get container port
@@ -156,7 +161,7 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
containers, err := w.GetContainers()
if err != nil {
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
}
return fmt.Errorf("container not found in DaemonSet: %v", err)
}
@@ -180,7 +185,7 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
}
if e != nil {
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
}
return fmt.Errorf("failed to create/update YAML, reason: %v", e)
}
@@ -190,14 +195,14 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
b, err := json.Marshal(newWorkload.GetObject())
if err != nil {
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
}
return fmt.Errorf("failed to Marshal YAML of DaemonSet, reason: %v", err)
}
var ds appsv1.DaemonSet
if err := json.Unmarshal(b, &ds); err != nil {
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
}
return fmt.Errorf("failed to Unmarshal YAML of DaemonSet, reason: %v", err)
}
@@ -228,7 +233,7 @@ func (hsh *HostSensorHandler) checkPodForEachNode() error {
hsh.podListLock.RLock()
podsMap := hsh.hostSensorPodNames
hsh.podListLock.RUnlock()
return fmt.Errorf("host-sensor pods number (%d) differ than nodes number (%d) after deadline exceeded. Kubescape will take data only from the pods below: %v",
return fmt.Errorf("host-scanner pods number (%d) differ than nodes number (%d) after deadline exceeded. Kubescape will take data only from the pods below: %v",
podsNum, len(nodesList.Items), podsMap)
}
time.Sleep(100 * time.Millisecond)
@@ -238,7 +243,7 @@ func (hsh *HostSensorHandler) checkPodForEachNode() error {
}
// initiating routine to keep pod list updated
func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context) {
func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context, log *LogsMap) {
go func() {
var watchRes watch.Interface
var err error
@@ -247,10 +252,10 @@ func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context) {
LabelSelector: fmt.Sprintf("name=%s", hsh.daemonSet.Spec.Template.Labels["name"]),
})
if err != nil {
logger.L().Ctx(ctx).Warning("failed to watch over DaemonSet pods - are we missing watch pods permissions?", helpers.Error(err))
logger.L().Ctx(ctx).Warning(failedToWatchOverDaemonSetPods, helpers.Error(err))
}
if watchRes == nil {
logger.L().Ctx(ctx).Error("failed to watch over DaemonSet pods, will not be able to get host-sensor data")
logger.L().Ctx(ctx).Error("failed to watch over DaemonSet pods, will not be able to get host-scanner data")
return
}
@@ -259,12 +264,12 @@ func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context) {
if !ok {
continue
}
go hsh.updatePodInListAtomic(ctx, eve.Type, pod)
go hsh.updatePodInListAtomic(ctx, eve.Type, pod, log)
}
}()
}
func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventType watch.EventType, podObj *corev1.Pod) {
func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventType watch.EventType, podObj *corev1.Pod, log *LogsMap) {
hsh.podListLock.Lock()
defer hsh.podListLock.Unlock()
@@ -285,10 +290,11 @@ func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventTy
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values) > 0 {
nodeName = podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
}
logger.L().Ctx(ctx).Warning("One host-sensor pod is unable to schedule on node. We will fail to collect the data from this node",
helpers.String("message", podObj.Status.Conditions[0].Message),
helpers.String("nodeName", nodeName),
helpers.String("podName", podObj.ObjectMeta.Name))
if !log.isDuplicated(oneHostSensorPodIsUnabledToSchedule) {
logger.L().Ctx(ctx).Warning(oneHostSensorPodIsUnabledToSchedule,
helpers.String("message", podObj.Status.Conditions[0].Message))
log.update(oneHostSensorPodIsUnabledToSchedule)
}
if nodeName != "" {
hsh.hostSensorUnscheduledPodNames[podObj.ObjectMeta.Name] = nodeName
}
@@ -301,14 +307,84 @@ func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventTy
}
}
// tearDownNamespace manage the host-scanner deletion.
func (hsh *HostSensorHandler) tearDownHostScanner(namespace string) error {
client := hsh.k8sObj.KubernetesClient
// delete host-scanner DaemonSet
err := client.AppsV1().
DaemonSets(namespace).
Delete(
hsh.k8sObj.Context,
hsh.daemonSet.Name,
metav1.DeleteOptions{
GracePeriodSeconds: &hsh.gracePeriod,
},
)
if err != nil {
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
}
// wait for DaemonSet to be deleted
err = hsh.waitHostScannerDeleted(hsh.k8sObj.Context)
if err != nil {
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
}
return nil
}
// tearDownNamespace manage the given namespace deletion.
// At first, it checks if the namespace was already present before installing host-scanner.
// In that case skips the deletion.
// If was not, then patches the namespace in order to remove the finalizers,
// and finally delete the it.
func (hsh *HostSensorHandler) tearDownNamespace(namespace string) error {
// if namespace was already present on kubernetes (before installing host-scanner),
// then we shouldn't delete it.
if hsh.namespaceWasPresent() {
return nil
}
if err := hsh.k8sObj.KubernetesClient.CoreV1().Namespaces().Delete(hsh.k8sObj.Context, namespace, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
return fmt.Errorf("failed to delete host-sensor namespace: %v", err)
// to make it more readable we store the object client in a variable
client := hsh.k8sObj.KubernetesClient
// prepare patch json to remove finalizers from namespace
patchData := `
[
{
"op": "replace",
"path": "/metadata/finalizers",
"value": []
}
]
`
// patch namespace object removing finalizers
_, err := client.CoreV1().
Namespaces().
Patch(
hsh.k8sObj.Context,
namespace,
types.JSONPatchType,
[]byte(patchData),
metav1.PatchOptions{},
)
if err != nil {
return fmt.Errorf("failed to remove finalizers from Namespace: %v", err)
}
// patch namespace object removing finalizers
// delete namespace object
err = client.CoreV1().
Namespaces().
Delete(
hsh.k8sObj.Context,
namespace,
metav1.DeleteOptions{
GracePeriodSeconds: &hsh.gracePeriod,
},
)
if err != nil {
return fmt.Errorf("failed to delete %s Namespace: %v", namespace, err)
}
return nil
@@ -317,14 +393,13 @@ func (hsh *HostSensorHandler) tearDownNamespace(namespace string) error {
func (hsh *HostSensorHandler) TearDown() error {
namespace := hsh.GetNamespace()
// delete DaemonSet
if err := hsh.k8sObj.KubernetesClient.AppsV1().DaemonSets(hsh.GetNamespace()).Delete(hsh.k8sObj.Context, hsh.daemonSet.Name, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
if err := hsh.tearDownHostScanner(namespace); err != nil {
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
}
// delete Namespace
if err := hsh.tearDownNamespace(namespace); err != nil {
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
return fmt.Errorf("failed to delete host-scanner Namespace: %v", err)
}
// TODO: wait for termination? may take up to 120 seconds!!!
return nil
}
@@ -344,3 +419,32 @@ func loadHostSensorFromFile(hostSensorYAMLFile string) (string, error) {
// TODO - Add file validation
return string(dat), err
}
// waitHostScannerDeleted watch for host-scanner deletion.
// In case it fails it returns an error.
func (hsh *HostSensorHandler) waitHostScannerDeleted(ctx context.Context) error {
labelSelector := fmt.Sprintf("name=%s", hsh.daemonSet.Name)
opts := metav1.ListOptions{
TypeMeta: metav1.TypeMeta{},
LabelSelector: labelSelector,
FieldSelector: "",
}
watcher, err := hsh.k8sObj.KubernetesClient.CoreV1().
Pods(hsh.daemonSet.Namespace).
Watch(ctx, opts)
if err != nil {
return err
}
defer watcher.Stop()
for {
select {
case event := <-watcher.ResultChan():
if event.Type == watch.Deleted {
return nil
}
case <-ctx.Done():
return nil
}
}
}

View File

@@ -34,14 +34,14 @@ func TestHostSensorHandler(t *testing.T) {
})
t.Run("should return namespace", func(t *testing.T) {
require.Equal(t, "kubescape-host-scanner", h.GetNamespace())
require.Equal(t, "kubescape", h.GetNamespace())
})
t.Run("should collect resources from pods - happy path", func(t *testing.T) {
envelope, status, err := h.CollectResources(ctx)
require.NoError(t, err)
require.Len(t, envelope, 11*2) // has cloud provider, no control plane requested
require.Len(t, envelope, 9*2) // has cloud provider, no control plane requested
require.Len(t, status, 0)
foundControl, foundProvider := false, false
@@ -91,7 +91,7 @@ func TestHostSensorHandler(t *testing.T) {
envelope, status, err := h.CollectResources(ctx)
require.NoError(t, err)
require.Len(t, envelope, 12*2) // has empty cloud provider, has control plane info
require.Len(t, envelope, 10*2) // has empty cloud provider, has control plane info
require.Len(t, status, 0)
foundControl, foundProvider := false, false
@@ -141,37 +141,6 @@ func TestHostSensorHandler(t *testing.T) {
})
})
t.Run("should build host sensor with error in response from /kubeletConfigurations", func(t *testing.T) {
k8s := NewKubernetesApiMock(WithNode(mockNode1()),
WithPod(mockPod1()),
WithPod(mockPod2()),
WithResponses(mockResponsesNoCloudProvider()),
WithErrorResponse(RestURL{"http", "pod1", "7888", "/kubeletConfigurations"}), // this endpoint will return an error from this pod
)
h, err := NewHostSensorHandler(k8s, "")
require.NoError(t, err)
require.NotNil(t, h)
t.Run("should initialize host sensor", func(t *testing.T) {
require.NoError(t, h.Init(ctx))
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
require.NoError(t, err)
w.Stop()
require.Len(t, h.hostSensorPodNames, 2)
})
t.Run("should collect resources from pods, with some errors", func(t *testing.T) {
envelope, status, err := h.CollectResources(ctx)
require.NoError(t, err)
require.Len(t, envelope, 12*2-1) // one resource is missing
require.Len(t, status, 0) // error is not reported in status: this is due to the worker pool not bubbling up errors
})
})
t.Run("should FAIL to build host sensor because there are no nodes", func(t *testing.T) {
h, err := NewHostSensorHandler(NewKubernetesApiMock(), "")
require.Error(t, err)

View File

@@ -10,12 +10,9 @@ import (
"sync"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
"github.com/kubescape/opa-utils/reporthandling/apis"
"sigs.k8s.io/yaml"
)
// getPodList clones the internal list of pods being watched as a map of pod names.
@@ -87,12 +84,17 @@ func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(ctx context.Context, pat
podList := hsh.getPodList()
res := make([]hostsensor.HostSensorDataEnvelope, 0, len(podList))
var wg sync.WaitGroup
// initialization of the channels
hsh.workerPool.init(len(podList))
// log is used to avoid log duplication
// coming from the different host-scanner instances
log := NewLogCoupling()
hsh.workerPool.hostSensorApplyJobs(podList, path, requestKind)
hsh.workerPool.hostSensorGetResults(&res)
hsh.workerPool.createWorkerPool(ctx, hsh, &wg)
hsh.workerPool.createWorkerPool(ctx, hsh, &wg, log)
hsh.workerPool.waitForDone(&wg)
return res, nil
@@ -165,27 +167,6 @@ func (hsh *HostSensorHandler) getCloudProviderInfo(ctx context.Context) ([]hosts
return hsh.sendAllPodsHTTPGETRequest(ctx, "/cloudProviderInfo", CloudProviderInfo)
}
// getKubeletCommandLine returns the list of kubelet command lines.
func (hsh *HostSensorHandler) getKubeletCommandLine(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
resps, err := hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeletCommandLine", KubeletCommandLine)
if err != nil {
return resps, err
}
for resp := range resps {
var data = make(map[string]interface{})
data["fullCommand"] = string(resps[resp].Data)
resBytesMarshal, err := json.Marshal(data)
// TODO catch error
if err == nil {
resps[resp].Data = stdjson.RawMessage(resBytesMarshal)
}
}
return resps, nil
}
// getCNIInfo returns the list of CNI metadata
func (hsh *HostSensorHandler) getCNIInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
@@ -204,22 +185,6 @@ func (hsh *HostSensorHandler) getOsReleaseFile(ctx context.Context) ([]hostsenso
return hsh.sendAllPodsHTTPGETRequest(ctx, "/osRelease", "OsReleaseFile")
}
// getKubeletConfigurations returns the list of kubelet configurations.
func (hsh *HostSensorHandler) getKubeletConfigurations(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
res, err := hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeletConfigurations", "KubeletConfiguration") // empty kind, will be overridden
for resIdx := range res {
jsonBytes, ery := yaml.YAMLToJSON(res[resIdx].Data)
if ery != nil {
logger.L().Ctx(ctx).Warning("failed to convert kubelet configurations from yaml to json", helpers.Error(ery))
continue
}
res[resIdx].SetData(jsonBytes)
}
return res, err
}
// hasCloudProviderInfo iterates over the []hostsensor.HostSensorDataEnvelope list to find info about the cloud provider.
//
// If information are found, then return true. Return false otherwise.
@@ -259,14 +224,6 @@ func (hsh *HostSensorHandler) CollectResources(ctx context.Context) ([]hostsenso
Query func(context.Context) ([]hostsensor.HostSensorDataEnvelope, error)
}{
// queries to the deployed host-scanner
{
Resource: KubeletConfiguration,
Query: hsh.getKubeletConfigurations,
},
{
Resource: KubeletCommandLine,
Query: hsh.getKubeletCommandLine,
},
{
Resource: OsReleaseFile,
Query: hsh.getOsReleaseFile,

View File

@@ -43,22 +43,23 @@ func (wp *workerPool) init(noOfPods ...int) {
}
// The worker takes a job out of the chan, executes the request, and pushes the result to the results chan
func (wp *workerPool) hostSensorWorker(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup) {
func (wp *workerPool) hostSensorWorker(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup, log *LogsMap) {
defer wg.Done()
for job := range wp.jobs {
hostSensorDataEnvelope, err := hsh.getResourcesFromPod(job.podName, job.nodeName, job.requestKind, job.path)
if err != nil {
logger.L().Ctx(ctx).Warning("failed to get data", helpers.String("path", job.path), helpers.String("podName", job.podName), helpers.Error(err))
if err != nil && !log.isDuplicated(failedToGetData) {
logger.L().Ctx(ctx).Warning(failedToGetData, helpers.String("path", job.path), helpers.Error(err))
log.update(failedToGetData)
continue
}
wp.results <- hostSensorDataEnvelope
}
}
func (wp *workerPool) createWorkerPool(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup) {
func (wp *workerPool) createWorkerPool(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup, log *LogsMap) {
for i := 0; i < noOfWorkers; i++ {
wg.Add(1)
go wp.hostSensorWorker(ctx, hsh, wg)
go wp.hostSensorWorker(ctx, hsh, wg, log)
}
}

View File

@@ -0,0 +1,51 @@
package hostsensorutils
import "sync"
type LogsMap struct {
// use sync.Mutex to avoid read/write
// access issues in multi-thread environments.
sync.Mutex
usedLogs map[string]int
}
// NewLogCoupling return an empty LogsMap struct ready to be used.
func NewLogCoupling() *LogsMap {
return &LogsMap{
usedLogs: make(map[string]int),
}
}
// update add the logContent to the internal map
// and set the occurrencty to 1 (if it has never been used before),
// increment its values otherwise.
func (lm *LogsMap) update(logContent string) {
lm.Lock()
_, ok := lm.usedLogs[logContent]
if !ok {
lm.usedLogs[logContent] = 1
} else {
lm.usedLogs[logContent]++
}
lm.Unlock()
}
// isDuplicated check if logContent is already present inside the internal map.
// Return true in case logContent already exists, false otherwise.
func (lm *LogsMap) isDuplicated(logContent string) bool {
lm.Lock()
_, ok := lm.usedLogs[logContent]
lm.Unlock()
return ok
}
// GgtOccurrence retrieve the number of occurrences logContent has been used.
func (lm *LogsMap) getOccurrence(logContent string) int {
lm.Lock()
occurrence, ok := lm.usedLogs[logContent]
lm.Unlock()
if !ok {
return 0
}
return occurrence
}

View File

@@ -0,0 +1,100 @@
package hostsensorutils
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestLogsMap_Update(t *testing.T) {
t.Parallel()
tests := []struct {
name string
logs []string
expectedLog string
expected int
}{
{
name: "test_1",
logs: []string{
"log_1",
"log_1",
"log_1",
},
expectedLog: "log_1",
expected: 3,
},
{
name: "test_2",
logs: []string{},
expectedLog: "log_2",
expected: 0,
},
{
name: "test_3",
logs: []string{
"log_3",
},
expectedLog: "log_3",
expected: 1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
lm := NewLogCoupling()
for _, log := range tt.logs {
lm.update(log)
}
if !assert.Equal(t, lm.getOccurrence(tt.expectedLog), tt.expected) {
t.Log("log occurrences are different")
}
})
}
}
func TestLogsMap_IsDuplicated(t *testing.T) {
t.Parallel()
tests := []struct {
name string
logs []string
expectedLog string
expected bool
}{
{
name: "test_1",
logs: []string{
"log_1",
"log_1",
"log_1",
},
expectedLog: "log_1",
expected: true,
},
{
name: "test_2",
logs: []string{
"log_1",
"log_1",
},
expectedLog: "log_2",
expected: false,
},
{
name: "test_3",
logs: []string{},
expectedLog: "log_3",
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
lm := NewLogCoupling()
for _, log := range tt.logs {
lm.update(log)
}
if !assert.Equal(t, lm.isDuplicated(tt.expectedLog), tt.expected) {
t.Log("duplication value differ from expected")
}
})
}
}

View File

@@ -0,0 +1,10 @@
package hostsensorutils
// messages used for warnings
var (
failedToGetData = "failed to get data"
failedToTeardownNamespace = "failed to teardown Namespace"
oneHostSensorPodIsUnabledToSchedule = "One host-sensor pod is unable to schedule on node. We will fail to collect the data from this node"
failedToWatchOverDaemonSetPods = "failed to watch over DaemonSet pods"
failedToValidateHostSensorPodStatus = "failed to validate host-scanner pods status"
)

View File

@@ -4,7 +4,7 @@ import (
"context"
"github.com/google/go-containerregistry/pkg/name"
"github.com/sigstore/cosign/pkg/cosign"
"github.com/sigstore/cosign/v2/pkg/cosign"
)
func has_signature(img string) bool {

View File

@@ -6,12 +6,12 @@ import (
"fmt"
"github.com/google/go-containerregistry/pkg/name"
"github.com/sigstore/cosign/cmd/cosign/cli/options"
"github.com/sigstore/cosign/cmd/cosign/cli/sign"
"github.com/sigstore/cosign/pkg/cosign"
"github.com/sigstore/cosign/pkg/cosign/pkcs11key"
ociremote "github.com/sigstore/cosign/pkg/oci/remote"
sigs "github.com/sigstore/cosign/pkg/signature"
"github.com/sigstore/cosign/v2/cmd/cosign/cli/options"
"github.com/sigstore/cosign/v2/cmd/cosign/cli/sign"
"github.com/sigstore/cosign/v2/pkg/cosign"
"github.com/sigstore/cosign/v2/pkg/cosign/pkcs11key"
ociremote "github.com/sigstore/cosign/v2/pkg/oci/remote"
sigs "github.com/sigstore/cosign/v2/pkg/signature"
)
// VerifyCommand verifies a signature on a supplied container image

View File

@@ -1,49 +1,42 @@
package opaprocessor
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func Test_verify(t *testing.T) {
type args struct {
img string
key string
}
tests := []struct {
name string
args args
want bool
wantErr assert.ErrorAssertionFunc
}{
{
"valid signature",
args{
img: "hisu/cosign-tests:signed",
key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
},
true,
assert.NoError,
},
{
"no signature",
args{
img: "hisu/cosign-tests:unsigned",
key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
},
false,
assert.Error,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := verify(tt.args.img, tt.args.key)
if !tt.wantErr(t, err, fmt.Sprintf("verify(%v, %v)", tt.args.img, tt.args.key)) {
return
}
assert.Equalf(t, tt.want, got, "verify(%v, %v)", tt.args.img, tt.args.key)
})
}
}
// func Test_verify(t *testing.T) {
// type args struct {
// img string
// key string
// }
// tests := []struct {
// name string
// args args
// want bool
// wantErr assert.ErrorAssertionFunc
// }{
// {
// "valid signature",
// args{
// img: "hisu/cosign-tests:signed",
// key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
// },
// true,
// assert.NoError,
// },
// {
// "no signature",
// args{
// img: "hisu/cosign-tests:unsigned",
// key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
// },
// false,
// assert.Error,
// },
// }
// for _, tt := range tests {
// t.Run(tt.name, func(t *testing.T) {
// got, err := verify(tt.args.img, tt.args.key)
// if !tt.wantErr(t, err, fmt.Sprintf("verify(%v, %v)", tt.args.img, tt.args.key)) {
// return
// }
// assert.Equalf(t, tt.want, got, "verify(%v, %v)", tt.args.img, tt.args.key)
// })
// }
// }

View File

@@ -0,0 +1,13 @@
package opaprocessor
import (
"github.com/docker/distribution/reference"
)
func normalize_image_name(img string) (string, error) {
name, err := reference.ParseNormalizedNamed(img)
if err != nil {
return "", err
}
return name.String(), nil
}

View File

@@ -0,0 +1,28 @@
package opaprocessor
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_normalize_name(t *testing.T) {
tests := []struct {
name string
img string
want string
}{
{
name: "Normalize image name",
img: "nginx",
want: "docker.io/library/nginx",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
name, _ := normalize_image_name(tt.img)
assert.Equal(t, tt.want, name, tt.name)
})
}
}

View File

@@ -3,9 +3,7 @@ package opaprocessor
import (
"context"
"fmt"
"runtime"
"sync"
"time"
"github.com/armosec/armoapi-go/armotypes"
logger "github.com/kubescape/go-logger"
@@ -23,7 +21,6 @@ import (
"github.com/open-policy-agent/opa/rego"
"github.com/open-policy-agent/opa/storage"
"go.opentelemetry.io/otel"
"golang.org/x/sync/errgroup"
)
const ScoreConfigPath = "/resources/config"
@@ -58,8 +55,9 @@ func NewOPAProcessor(sessionObj *cautils.OPASessionObj, regoDependenciesData *re
}
}
func (opap *OPAProcessor) ProcessRulesListener(ctx context.Context, progressListener IJobProgressNotificationClient) error {
opap.OPASessionObj.AllPolicies = ConvertFrameworksToPolicies(opap.Policies, cautils.BuildNumber)
func (opap *OPAProcessor) ProcessRulesListener(ctx context.Context, progressListener IJobProgressNotificationClient, ScanInfo *cautils.ScanInfo) error {
scanningScope := cautils.GetScanningScope(ScanInfo)
opap.OPASessionObj.AllPolicies = ConvertFrameworksToPolicies(opap.Policies, cautils.BuildNumber, opap.ExcludedRules, scanningScope)
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Policies, opap.OPASessionObj.AllPolicies)
@@ -86,130 +84,56 @@ func (opap *OPAProcessor) Process(ctx context.Context, policies *cautils.Policie
opap.loggerStartScanning()
defer opap.loggerDoneScanning()
cautils.StartSpinner()
defer cautils.StopSpinner()
if progressListener != nil {
progressListener.Start(len(policies.Controls))
defer progressListener.Stop()
}
// results to collect from controls being processed in parallel
type results struct {
resourceAssociatedControl map[string]resourcesresults.ResourceAssociatedControl
allResources map[string]workloadinterface.IMetadata
}
resultsChan := make(chan results)
controlsGroup, groupCtx := errgroup.WithContext(ctx)
controlsGroup.SetLimit(2 * runtime.NumCPU())
allResources := make(map[string]workloadinterface.IMetadata, max(len(opap.AllResources), heuristicAllocResources))
for k, v := range opap.AllResources {
allResources[k] = v
}
var resultsCollector sync.WaitGroup
resultsCollector.Add(1)
go func() {
// collects the results from processing all rules for all controls.
//
// NOTE: since policies.Controls is a map, iterating over it doesn't guarantee any
// specific ordering. Therefore, if a conflict is possible on resources, e.g. 2 rules,
// referencing the same resource, the eventual result of the merge is not guaranteed to be
// stable. This behavior is consistent with the previous (unparallelized) processing.
defer resultsCollector.Done()
for result := range resultsChan {
// merge both maps in parallel
var merger sync.WaitGroup
merger.Add(1)
go func() {
// merge all resources
defer merger.Done()
for k, v := range result.allResources {
allResources[k] = v
}
}()
merger.Add(1)
go func() {
defer merger.Done()
// update resources with latest results
for resourceID, controlResult := range result.resourceAssociatedControl {
result, found := opap.ResourcesResult[resourceID]
if !found {
result = resourcesresults.Result{ResourceID: resourceID}
}
result.AssociatedControls = append(result.AssociatedControls, controlResult)
opap.ResourcesResult[resourceID] = result
}
}()
merger.Wait()
}
}()
// processes rules for all controls in parallel
for _, controlToPin := range policies.Controls {
for _, toPin := range policies.Controls {
if progressListener != nil {
progressListener.ProgressJob(1, fmt.Sprintf("Control: %s", controlToPin.ControlID))
progressListener.ProgressJob(1, fmt.Sprintf("Control: %s", toPin.ControlID))
}
control := controlToPin
control := toPin
controlsGroup.Go(func() error {
resourceAssociatedControl, allResourcesFromControl, err := opap.processControl(groupCtx, &control)
if err != nil {
logger.L().Ctx(groupCtx).Warning(err.Error())
resourcesAssociatedControl, err := opap.processControl(ctx, &control)
if err != nil {
logger.L().Ctx(ctx).Warning(err.Error())
}
if len(resourcesAssociatedControl) == 0 {
continue
}
// update resources with latest results
for resourceID, controlResult := range resourcesAssociatedControl {
if _, ok := opap.ResourcesResult[resourceID]; !ok {
opap.ResourcesResult[resourceID] = resourcesresults.Result{ResourceID: resourceID}
}
select {
case resultsChan <- results{
resourceAssociatedControl: resourceAssociatedControl,
allResources: allResourcesFromControl,
}:
case <-groupCtx.Done(): // interrupted (NOTE: at this moment, this never happens since errors are muted)
return groupCtx.Err()
}
return nil
})
t := opap.ResourcesResult[resourceID]
t.AssociatedControls = append(t.AssociatedControls, controlResult)
opap.ResourcesResult[resourceID] = t
}
}
// wait for all results from all rules to be collected
err := controlsGroup.Wait()
close(resultsChan)
resultsCollector.Wait()
if err != nil {
return err
}
// merge the final result in resources
for k, v := range allResources {
opap.AllResources[k] = v
}
opap.Report.ReportGenerationTime = time.Now().UTC()
return nil
}
func (opap *OPAProcessor) loggerStartScanning() {
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
if reporthandlingv2.Cluster == targetScan {
logger.L().Info("Scanning", helpers.String(targetScan.String(), cautils.ClusterName))
logger.L().Start("Scanning", helpers.String(targetScan.String(), cautils.ClusterName))
} else {
logger.L().Info("Scanning " + targetScan.String())
logger.L().Start("Scanning " + targetScan.String())
}
}
func (opap *OPAProcessor) loggerDoneScanning() {
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
if reporthandlingv2.Cluster == targetScan {
logger.L().Success("Done scanning", helpers.String(targetScan.String(), cautils.ClusterName))
logger.L().StopSuccess("Done scanning", helpers.String(targetScan.String(), cautils.ClusterName))
} else {
logger.L().Success("Done scanning " + targetScan.String())
logger.L().StopSuccess("Done scanning " + targetScan.String())
}
}
@@ -217,22 +141,16 @@ func (opap *OPAProcessor) loggerDoneScanning() {
//
// NOTE: the call to processControl no longer mutates the state of the current OPAProcessor instance,
// but returns a map instead, to be merged by the caller.
func (opap *OPAProcessor) processControl(ctx context.Context, control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, map[string]workloadinterface.IMetadata, error) {
resourcesAssociatedControl := make(map[string]resourcesresults.ResourceAssociatedControl, heuristicAllocControls)
allResources := make(map[string]workloadinterface.IMetadata, heuristicAllocResources)
func (opap *OPAProcessor) processControl(ctx context.Context, control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, error) {
resourcesAssociatedControl := make(map[string]resourcesresults.ResourceAssociatedControl)
for i := range control.Rules {
resourceAssociatedRule, allResourcesFromRule, err := opap.processRule(ctx, &control.Rules[i], control.FixedInput)
resourceAssociatedRule, err := opap.processRule(ctx, &control.Rules[i], control.FixedInput)
if err != nil {
logger.L().Ctx(ctx).Warning(err.Error())
continue
}
// merge all resources for all processed rules in this control
for k, v := range allResourcesFromRule {
allResources[k] = v
}
// append failed rules to controls
for resourceID, ruleResponse := range resourceAssociatedRule {
var controlResult resourcesresults.ResourceAssociatedControl
@@ -254,85 +172,101 @@ func (opap *OPAProcessor) processControl(ctx context.Context, control *reporthan
}
}
return resourcesAssociatedControl, allResources, nil
return resourcesAssociatedControl, nil
}
// processRule processes a single policy rule, with some extra fixed control inputs.
//
// NOTE: processRule no longer mutates the state of the current OPAProcessor instance,
// and returns a map instead, to be merged by the caller.
func (opap *OPAProcessor) processRule(ctx context.Context, rule *reporthandling.PolicyRule, fixedControlInputs map[string][]string) (map[string]*resourcesresults.ResourceAssociatedRule, map[string]workloadinterface.IMetadata, error) {
func (opap *OPAProcessor) processRule(ctx context.Context, rule *reporthandling.PolicyRule, fixedControlInputs map[string][]string) (map[string]*resourcesresults.ResourceAssociatedRule, error) {
resources := make(map[string]*resourcesresults.ResourceAssociatedRule)
ruleRegoDependenciesData := opap.makeRegoDeps(rule.ConfigInputs, fixedControlInputs)
inputResources, err := reporthandling.RegoResourcesAggregator(
rule,
getAllSupportedObjects(opap.K8SResources, opap.ArmoResource, opap.AllResources, rule), // NOTE: this uses the initial snapshot of AllResources
)
if err != nil {
return nil, nil, fmt.Errorf("error getting aggregated k8sObjects: %w", err)
}
if len(inputResources) == 0 {
return nil, nil, nil // no resources found for testing
}
inputRawResources := workloadinterface.ListMetaToMap(inputResources)
// the failed resources are a subgroup of the enumeratedData, so we store the enumeratedData like it was the input data
enumeratedData, err := opap.enumerateData(ctx, rule, inputRawResources)
if err != nil {
return nil, nil, err
}
inputResources = objectsenvelopes.ListMapToMeta(enumeratedData)
resources := make(map[string]*resourcesresults.ResourceAssociatedRule, len(inputResources))
allResources := make(map[string]workloadinterface.IMetadata, len(inputResources))
for i, inputResource := range inputResources {
resources[inputResource.GetID()] = &resourcesresults.ResourceAssociatedRule{
Name: rule.Name,
ControlConfigurations: ruleRegoDependenciesData.PostureControlInputs,
Status: apis.StatusPassed,
resourcesPerNS := getAllSupportedObjects(opap.K8SResources, opap.ExternalResources, opap.AllResources, rule)
for i := range resourcesPerNS {
resourceToScan := resourcesPerNS[i]
if _, ok := resourcesPerNS[clusterScope]; ok && i != clusterScope {
resourceToScan = append(resourceToScan, resourcesPerNS[clusterScope]...)
}
inputResources, err := reporthandling.RegoResourcesAggregator(
rule,
resourceToScan, // NOTE: this uses the initial snapshot of AllResources
)
if err != nil {
continue
}
allResources[inputResource.GetID()] = inputResources[i]
}
ruleResponses, err := opap.runOPAOnSingleRule(ctx, rule, inputRawResources, ruleData, ruleRegoDependenciesData)
if err != nil {
return resources, allResources, err
}
if len(inputResources) == 0 {
continue // no resources found for testing
}
// ruleResponse to ruleResult
for _, ruleResponse := range ruleResponses {
failedResources := objectsenvelopes.ListMapToMeta(ruleResponse.GetFailedResources())
for _, failedResource := range failedResources {
var ruleResult *resourcesresults.ResourceAssociatedRule
if r, found := resources[failedResource.GetID()]; found {
ruleResult = r
} else {
ruleResult = &resourcesresults.ResourceAssociatedRule{
Paths: make([]armotypes.PosturePaths, 0, len(ruleResponse.FailedPaths)+len(ruleResponse.FixPaths)+1),
inputRawResources := workloadinterface.ListMetaToMap(inputResources)
// the failed resources are a subgroup of the enumeratedData, so we store the enumeratedData like it was the input data
enumeratedData, err := opap.enumerateData(ctx, rule, inputRawResources)
if err != nil {
continue
}
inputResources = objectsenvelopes.ListMapToMeta(enumeratedData)
for i, inputResource := range inputResources {
resources[inputResource.GetID()] = &resourcesresults.ResourceAssociatedRule{
Name: rule.Name,
ControlConfigurations: ruleRegoDependenciesData.PostureControlInputs,
Status: apis.StatusPassed,
}
opap.AllResources[inputResource.GetID()] = inputResources[i]
}
ruleResponses, err := opap.runOPAOnSingleRule(ctx, rule, inputRawResources, ruleData, ruleRegoDependenciesData)
if err != nil {
continue
// return resources, allResources, err
}
// ruleResponse to ruleResult
for _, ruleResponse := range ruleResponses {
failedResources := objectsenvelopes.ListMapToMeta(ruleResponse.GetFailedResources())
for _, failedResource := range failedResources {
var ruleResult *resourcesresults.ResourceAssociatedRule
if r, found := resources[failedResource.GetID()]; found {
ruleResult = r
} else {
ruleResult = &resourcesresults.ResourceAssociatedRule{
Paths: make([]armotypes.PosturePaths, 0, len(ruleResponse.FailedPaths)+len(ruleResponse.FixPaths)+1),
}
}
}
ruleResult.SetStatus(apis.StatusFailed, nil)
for _, failedPath := range ruleResponse.FailedPaths {
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FailedPath: failedPath})
}
ruleResult.SetStatus(apis.StatusFailed, nil)
for _, failedPath := range ruleResponse.FailedPaths {
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FailedPath: failedPath})
}
for _, fixPath := range ruleResponse.FixPaths {
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixPath: fixPath})
}
for _, fixPath := range ruleResponse.FixPaths {
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixPath: fixPath})
}
if ruleResponse.FixCommand != "" {
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixCommand: ruleResponse.FixCommand})
}
if ruleResponse.FixCommand != "" {
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixCommand: ruleResponse.FixCommand})
}
// if ruleResponse has relatedObjects, add it to ruleResult
if len(ruleResponse.RelatedObjects) > 0 {
for _, relatedObject := range ruleResponse.RelatedObjects {
wl := objectsenvelopes.NewObject(relatedObject.Object)
if wl != nil {
ruleResult.RelatedResourcesIDs = append(ruleResult.RelatedResourcesIDs, wl.GetID())
}
}
}
resources[failedResource.GetID()] = ruleResult
resources[failedResource.GetID()] = ruleResult
}
}
}
return resources, allResources, nil
return resources, nil
}
func (opap *OPAProcessor) runOPAOnSingleRule(ctx context.Context, rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, ruleRegoDependenciesData resources.RegoDependenciesData) ([]reporthandling.RuleResponse, error) {
@@ -355,6 +289,7 @@ func (opap *OPAProcessor) runRegoOnK8s(ctx context.Context, rule *reporthandling
// register signature verification methods for the OPA ast engine (since these are package level symbols, we do it only once)
rego.RegisterBuiltin2(cosignVerifySignatureDeclaration, cosignVerifySignatureDefinition)
rego.RegisterBuiltin1(cosignHasSignatureDeclaration, cosignHasSignatureDefinition)
rego.RegisterBuiltin1(imageNameNormalizeDeclaration, imageNameNormalizeDefinition)
})
modules[rule.Name] = getRuleData(rule)

View File

@@ -1,23 +1,176 @@
package opaprocessor
import (
"archive/zip"
"context"
_ "embed"
"encoding/json"
"fmt"
"io"
"os"
"runtime"
"testing"
"time"
"github.com/armosec/armoapi-go/armotypes"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/mocks"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
"github.com/kubescape/opa-utils/resources"
"github.com/stretchr/testify/assert"
"github.com/kubescape/k8s-interface/workloadinterface"
// _ "k8s.io/client-go/plugin/pkg/client/auth"
)
func NewOPAProcessorMock() *OPAProcessor {
return &OPAProcessor{}
var (
//go:embed testdata/opaSessionObjMock.json
opaSessionObjMockData string
//go:embed testdata/opaSessionObjMock1.json
opaSessionObjMockData1 string
//go:embed testdata/regoDependenciesDataMock.json
regoDependenciesData string
allResourcesMockData []byte
//go:embed testdata/resourcesMock1.json
resourcesMock1 []byte
)
func unzipAllResourcesTestDataAndSetVar(zipFilePath, destFilePath string) error {
archive, err := zip.OpenReader(zipFilePath)
if err != nil {
return err
}
os.RemoveAll(destFilePath)
f := archive.File[0]
if err != nil {
return err
}
dstFile, err := os.OpenFile(destFilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
fileInArchive, err := f.Open()
if err != nil {
return err
}
_, err = io.Copy(dstFile, fileInArchive) //nolint:gosec
dstFile.Close()
fileInArchive.Close()
archive.Close()
file, err := os.Open(destFilePath)
if err != nil {
panic(err)
}
allResourcesMockData, err = io.ReadAll(file)
if err != nil {
panic(err)
}
file.Close()
return nil
}
func NewOPAProcessorMock(opaSessionObjMock string, resourcesMock []byte) *OPAProcessor {
opap := &OPAProcessor{}
if err := json.Unmarshal([]byte(regoDependenciesData), &opap.regoDependenciesData); err != nil {
panic(err)
}
// no err check because Unmarshal will fail on AllResources field (expected)
json.Unmarshal([]byte(opaSessionObjMock), &opap.OPASessionObj)
opap.AllResources = make(map[string]workloadinterface.IMetadata)
allResources := make(map[string]map[string]interface{})
if err := json.Unmarshal(resourcesMock, &allResources); err != nil {
panic(err)
}
for i := range allResources {
opap.AllResources[i] = workloadinterface.NewWorkloadObj(allResources[i])
}
return opap
}
func monitorHeapSpace(maxHeap *uint64, quitChan chan bool) {
for {
select {
case <-quitChan:
return
default:
var m runtime.MemStats
runtime.ReadMemStats(&m)
heapSpace := m.HeapAlloc
if heapSpace > *maxHeap {
*maxHeap = heapSpace
}
time.Sleep(100 * time.Millisecond)
}
}
}
/*
goarch: arm64
pkg: github.com/kubescape/kubescape/v2/core/pkg/opaprocessor
BenchmarkProcess/opaprocessor.Process_1-8 1 29714096083 ns/op 22309913416 B/op 498183685 allocs/op
--- BENCH: BenchmarkProcess/opaprocessor.Process_1-8
processorhandler_test.go:178: opaprocessor.Process_1_max_heap_space_gb: 2.85
processorhandler_test.go:179: opaprocessor.Process_1_execution_time_sec: 29.714054
BenchmarkProcess/opaprocessor.Process_4-8 1 25574892875 ns/op 22037035032 B/op 498167263 allocs/op
--- BENCH: BenchmarkProcess/opaprocessor.Process_4-8
processorhandler_test.go:178: opaprocessor.Process_4_max_heap_space_gb: 6.76
processorhandler_test.go:179: opaprocessor.Process_4_execution_time_sec: 25.574884
BenchmarkProcess/opaprocessor.Process_8-8 1 16534461291 ns/op 22308814384 B/op 498167171 allocs/op
--- BENCH: BenchmarkProcess/opaprocessor.Process_8-8
processorhandler_test.go:178: opaprocessor.Process_8_max_heap_space_gb: 9.47
processorhandler_test.go:179: opaprocessor.Process_8_execution_time_sec: 16.534455
BenchmarkProcess/opaprocessor.Process_16-8 1 18924050500 ns/op 22179562272 B/op 498167367 allocs/op
--- BENCH: BenchmarkProcess/opaprocessor.Process_16-8
processorhandler_test.go:178: opaprocessor.Process_16_max_heap_space_gb: 11.69
processorhandler_test.go:179: opaprocessor.Process_16_execution_time_sec: 16.321493
*/
func BenchmarkProcess(b *testing.B) {
b.SetParallelism(1)
// since all resources JSON is a large file, we need to unzip it and set the variable before running the benchmark
unzipAllResourcesTestDataAndSetVar("testdata/allResourcesMock.json.zip", "testdata/allResourcesMock.json")
maxGoRoutinesArr := []int{1, 4, 8, 16}
for _, maxGoRoutines := range maxGoRoutinesArr {
testName := fmt.Sprintf("opaprocessor.Process_%d", maxGoRoutines)
b.Run(testName, func(b *testing.B) {
// setup
opap := NewOPAProcessorMock(opaSessionObjMockData, allResourcesMockData)
b.ResetTimer()
var maxHeap uint64
quitChan := make(chan bool)
go monitorHeapSpace(&maxHeap, quitChan)
// test
opap.Process(context.Background(), opap.OPASessionObj.AllPolicies, nil)
// teardown
quitChan <- true
b.Log(fmt.Sprintf("%s_max_heap_space_gb: %.2f", testName, float64(maxHeap)/(1024*1024*1024)))
b.Log(fmt.Sprintf("%s_execution_time_sec: %f", testName, b.Elapsed().Seconds()))
})
}
}
func TestProcessResourcesResult(t *testing.T) {
// set k8s
@@ -32,10 +185,11 @@ func TestProcessResourcesResult(t *testing.T) {
opaSessionObj := cautils.NewOPASessionObjMock()
opaSessionObj.Policies = frameworks
policies := ConvertFrameworksToPolicies(opaSessionObj.Policies, "")
scanningScope := cautils.GetScanningScope(&cautils.ScanInfo{InputPatterns: []string{""}})
policies := ConvertFrameworksToPolicies(opaSessionObj.Policies, "", nil, scanningScope)
ConvertFrameworksToSummaryDetails(&opaSessionObj.Report.SummaryDetails, opaSessionObj.Policies, policies)
opaSessionObj.K8SResources = &k8sResources
opaSessionObj.K8SResources = k8sResources
opaSessionObj.AllResources[deployment.GetID()] = deployment
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock())
@@ -44,18 +198,18 @@ func TestProcessResourcesResult(t *testing.T) {
assert.Equal(t, 1, len(opaSessionObj.ResourcesResult))
res := opaSessionObj.ResourcesResult[deployment.GetID()]
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
assert.Equal(t, 2, res.ListControlsIDs(nil).Len())
assert.Equal(t, 1, res.ListControlsIDs(nil).Failed())
assert.Equal(t, 1, res.ListControlsIDs(nil).Passed())
assert.True(t, res.GetStatus(nil).IsFailed())
assert.False(t, res.GetStatus(nil).IsPassed())
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
opap.updateResults(context.TODO())
res = opaSessionObj.ResourcesResult[deployment.GetID()]
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
assert.Equal(t, 2, res.ListControlsIDs(nil).Len())
assert.Equal(t, 1, res.ListControlsIDs(nil).Failed())
assert.Equal(t, 1, res.ListControlsIDs(nil).Passed())
assert.True(t, res.GetStatus(nil).IsFailed())
assert.False(t, res.GetStatus(nil).IsPassed())
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
@@ -68,32 +222,114 @@ func TestProcessResourcesResult(t *testing.T) {
assert.Equal(t, 0, summaryDetails.NumberOfResources().Skipped())
// test resource listing
assert.Equal(t, 1, summaryDetails.ListResourcesIDs().All().Len())
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Skipped()))
assert.Equal(t, 1, summaryDetails.ListResourcesIDs(nil).Len())
assert.Equal(t, 1, summaryDetails.ListResourcesIDs(nil).Failed())
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Passed())
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Skipped())
// test control listing
assert.Equal(t, res.ListControlsIDs(nil).All().Len(), summaryDetails.NumberOfControls().All())
assert.Equal(t, len(res.ListControlsIDs(nil).Passed()), summaryDetails.NumberOfControls().Passed())
assert.Equal(t, len(res.ListControlsIDs(nil).Skipped()), summaryDetails.NumberOfControls().Skipped())
assert.Equal(t, len(res.ListControlsIDs(nil).Failed()), summaryDetails.NumberOfControls().Failed())
assert.Equal(t, res.ListControlsIDs(nil).Len(), summaryDetails.NumberOfControls().All())
assert.Equal(t, res.ListControlsIDs(nil).Passed(), summaryDetails.NumberOfControls().Passed())
assert.Equal(t, res.ListControlsIDs(nil).Skipped(), summaryDetails.NumberOfControls().Skipped())
assert.Equal(t, res.ListControlsIDs(nil).Failed(), summaryDetails.NumberOfControls().Failed())
assert.True(t, summaryDetails.GetStatus().IsFailed())
opaSessionObj.Exceptions = []armotypes.PostureExceptionPolicy{*mocks.MockExceptionAllKinds(&armotypes.PosturePolicy{FrameworkName: frameworks[0].Name})}
opap.updateResults(context.TODO())
res = opaSessionObj.ResourcesResult[deployment.GetID()]
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
assert.Equal(t, 2, len(res.ListControlsIDs(nil).Passed()))
assert.Equal(t, 2, res.ListControlsIDs(nil).Len())
assert.Equal(t, 2, res.ListControlsIDs(nil).Passed())
assert.True(t, res.GetStatus(nil).IsPassed())
assert.False(t, res.GetStatus(nil).IsFailed())
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
// test resource listing
summaryDetails = opaSessionObj.Report.SummaryDetails
assert.Equal(t, 1, summaryDetails.ListResourcesIDs().All().Len())
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Skipped()))
assert.Equal(t, 1, summaryDetails.ListResourcesIDs(nil).Len())
assert.Equal(t, 1, summaryDetails.ListResourcesIDs(nil).Failed())
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Passed())
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Skipped())
}
// don't parallelize this test because it uses a global variable - allResourcesMockData
func TestProcessRule(t *testing.T) {
testCases := []struct {
name string
rule reporthandling.PolicyRule
resourcesMock []byte
opaSessionObjMock string
expectedResult map[string]*resourcesresults.ResourceAssociatedRule
}{
{
name: "TestRelatedResourcesIDs",
rule: reporthandling.PolicyRule{
PortalBase: armotypes.PortalBase{
Name: "exposure-to-internet",
Attributes: map[string]interface{}{
"armoBuiltin": true,
},
},
Rule: "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": ingress,\n \"failedPaths\": result,\n }]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"ingress.spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n",
Match: []reporthandling.RuleMatchObjects{
{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"Pod", "Service"},
},
{
APIGroups: []string{"apps"},
APIVersions: []string{"v1"},
Resources: []string{"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet"},
},
{
APIGroups: []string{"batch"},
APIVersions: []string{"*"},
Resources: []string{"Job", "CronJob"},
},
{
APIGroups: []string{"extensions", "networking.k8s.io"},
APIVersions: []string{"v1beta1", "v1"},
Resources: []string{"Ingress"},
},
},
Description: "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.",
Remediation: "",
RuleQuery: "armo_builtins",
RuleLanguage: reporthandling.RegoLanguage,
},
resourcesMock: resourcesMock1,
opaSessionObjMock: opaSessionObjMockData1,
expectedResult: map[string]*resourcesresults.ResourceAssociatedRule{
"/v1/default/Pod/fake-pod-1-22gck": {
Name: "exposure-to-internet",
ControlConfigurations: map[string][]string{},
Status: "failed",
SubStatus: "",
Paths: nil,
Exception: nil,
RelatedResourcesIDs: []string{
"/v1/default/Service/fake-service-1",
},
},
"/v1/default/Service/fake-service-1": {
Name: "exposure-to-internet",
ControlConfigurations: map[string][]string{},
Status: "passed",
SubStatus: "",
Paths: nil,
Exception: nil,
RelatedResourcesIDs: nil,
},
},
},
}
for _, tc := range testCases {
// since all resources JSON is a large file, we need to unzip it and set the variable before running the benchmark
unzipAllResourcesTestDataAndSetVar("testdata/allResourcesMock.json.zip", "testdata/allResourcesMock.json")
opap := NewOPAProcessorMock(tc.opaSessionObjMock, tc.resourcesMock)
resources, err := opap.processRule(context.Background(), &tc.rule, nil)
assert.NoError(t, err)
assert.Equal(t, tc.expectedResult, resources)
}
}

View File

@@ -16,6 +16,10 @@ import (
"go.opentelemetry.io/otel"
)
const clusterScope = "clusterScope"
var largeClusterSize int = -1
// updateResults updates the results objects and report objects. This is a critical function - DO NOT CHANGE
//
// The function:
@@ -25,6 +29,10 @@ import (
func (opap *OPAProcessor) updateResults(ctx context.Context) {
_, span := otel.Tracer("").Start(ctx, "OPAProcessor.updateResults")
defer span.End()
defer logger.L().Ctx(ctx).Success("Done aggregating results")
cautils.StartSpinner()
defer cautils.StopSpinner()
// remove data from all objects
for i := range opap.AllResources {
@@ -87,14 +95,21 @@ func isEmptyResources(counters reportsummary.ICounters) bool {
return counters.Failed() == 0 && counters.Skipped() == 0 && counters.Passed() == 0
}
func getAllSupportedObjects(k8sResources *cautils.K8SResources, ksResources *cautils.KSResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
k8sObjects := []workloadinterface.IMetadata{}
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.Match)...)
k8sObjects = append(k8sObjects, getKSObjects(ksResources, allResources, rule.DynamicMatch)...)
func getAllSupportedObjects(k8sResources cautils.K8SResources, externalResources cautils.ExternalResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) map[string][]workloadinterface.IMetadata {
k8sObjects := getKubernetesObjects(k8sResources, allResources, rule.Match)
externalObjs := getKubenetesObjectsFromExternalResources(externalResources, allResources, rule.DynamicMatch)
if len(externalObjs) > 0 {
l, ok := k8sObjects[clusterScope]
if !ok {
l = []workloadinterface.IMetadata{}
}
l = append(l, externalObjs...)
k8sObjects[clusterScope] = l
}
return k8sObjects
}
func getKSObjects(k8sResources *cautils.KSResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
func getKubenetesObjectsFromExternalResources(externalResources cautils.ExternalResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
k8sObjects := []workloadinterface.IMetadata{}
for m := range match {
@@ -103,7 +118,7 @@ func getKSObjects(k8sResources *cautils.KSResources, allResources map[string]wor
for _, resource := range match[m].Resources {
groupResources := k8sinterface.ResourceGroupToString(groups, version, resource)
for _, groupResource := range groupResources {
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
if k8sObj, ok := externalResources[groupResource]; ok {
for i := range k8sObj {
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
}
@@ -114,11 +129,11 @@ func getKSObjects(k8sResources *cautils.KSResources, allResources map[string]wor
}
}
return filterOutChildResources(k8sObjects, match)
return k8sObjects
}
func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
k8sObjects := []workloadinterface.IMetadata{}
func getKubernetesObjects(k8sResources cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) map[string][]workloadinterface.IMetadata {
k8sObjects := map[string][]workloadinterface.IMetadata{}
for m := range match {
for _, groups := range match[m].APIGroups {
@@ -126,14 +141,18 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[s
for _, resource := range match[m].Resources {
groupResources := k8sinterface.ResourceGroupToString(groups, version, resource)
for _, groupResource := range groupResources {
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
/*
if k8sObj == nil {
// logger.L().Debug("skipping", helpers.String("resource", groupResource))
if k8sObj, ok := k8sResources[groupResource]; ok {
for i := range k8sObj {
obj := allResources[k8sObj[i]]
ns := getNamespaceName(obj, len(allResources))
l, ok := k8sObjects[ns]
if !ok {
l = []workloadinterface.IMetadata{}
}
*/
for i := range k8sObj {
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
l = append(l, obj)
k8sObjects[ns] = l
}
}
}
@@ -142,34 +161,9 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[s
}
}
return filterOutChildResources(k8sObjects, match)
return k8sObjects
// return filterOutChildResources(k8sObjects, match)
}
// filterOutChildResources filter out child resources if the parent resource is in the list
func filterOutChildResources(objects []workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
response := []workloadinterface.IMetadata{}
owners := []string{}
for m := range match {
owners = append(owners, match[m].Resources...)
}
for i := range objects {
if !k8sinterface.IsTypeWorkload(objects[i].GetObject()) {
response = append(response, objects[i])
continue
}
w := workloadinterface.NewWorkloadObj(objects[i].GetObject())
ownerReferences, err := w.GetOwnerReferences()
if err != nil || len(ownerReferences) == 0 {
response = append(response, w)
} else if !k8sinterface.IsStringInSlice(owners, ownerReferences[0].Kind) {
response = append(response, w)
}
}
return response
}
func getRuleDependencies(ctx context.Context) (map[string]string, error) {
modules := resources.LoadRegoModules()
if len(modules) == 0 {
@@ -240,3 +234,30 @@ func ruleData(rule *reporthandling.PolicyRule) string {
func ruleEnumeratorData(rule *reporthandling.PolicyRule) string {
return rule.ResourceEnumerator
}
func getNamespaceName(obj workloadinterface.IMetadata, clusterSize int) string {
if !isLargeCluster(clusterSize) {
return clusterScope
}
// if the resource is in namespace scope, get the namespace
if k8sinterface.IsResourceInNamespaceScope(obj.GetKind()) {
return obj.GetNamespace()
}
if obj.GetKind() == "Namespace" {
return obj.GetName()
}
return clusterScope
}
// isLargeCluster returns true if the cluster size is larger than the largeClusterSize
// This code is a workaround for large clusters. The final solution will be to scan resources individually
func isLargeCluster(clusterSize int) bool {
if largeClusterSize < 0 {
// initialize large cluster size
largeClusterSize, _ = cautils.ParseIntEnvVar("LARGE_CLUSTER_SIZE", 2500)
}
return clusterSize > largeClusterSize
}

Binary file not shown.

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
{"clusterName":"kwok-kwok-cluster","postureControlInputs":{"cpu_limit_max":[],"cpu_limit_min":[],"cpu_request_max":[],"cpu_request_min":[],"imageRepositoryAllowList":[],"insecureCapabilities":["SETPCAP","NET_ADMIN","NET_RAW","SYS_MODULE","SYS_RAWIO","SYS_PTRACE","SYS_ADMIN","SYS_BOOT","MAC_OVERRIDE","MAC_ADMIN","PERFMON","ALL","BPF"],"k8sRecommendedLabels":["app.kubernetes.io/name","app.kubernetes.io/instance","app.kubernetes.io/version","app.kubernetes.io/component","app.kubernetes.io/part-of","app.kubernetes.io/managed-by","app.kubernetes.io/created-by"],"listOfDangerousArtifacts":["bin/bash","sbin/sh","bin/ksh","bin/tcsh","bin/zsh","usr/bin/scsh","bin/csh","bin/busybox","usr/bin/busybox"],"max_critical_vulnerabilities":["5"],"max_high_vulnerabilities":["10"],"memory_limit_max":[],"memory_limit_min":[],"memory_request_max":[],"memory_request_min":[],"publicRegistries":[],"recommendedLabels":["app","tier","phase","version","owner","env"],"sensitiveInterfaces":["nifi","argo-server","weave-scope-app","kubeflow","kubernetes-dashboard","jenkins","prometheus-deployment"],"sensitiveKeyNames":["aws_access_key_id","aws_secret_access_key","azure_batchai_storage_account","azure_batchai_storage_key","azure_batch_account","azure_batch_key","secret","key","password","pwd","token","jwt","bearer","credential"],"sensitiveValues":["BEGIN \\w+ PRIVATE KEY","PRIVATE KEY","eyJhbGciO","JWT","Bearer","_key_","_secret_"],"sensitiveValuesAllowed":[],"servicesNames":["nifi-service","argo-server","minio","postgres","workflow-controller-metrics","weave-scope-app","kubernetes-dashboard"],"trustedCosignPublicKeys":[],"untrustedRegistries":[],"wlKnownNames":["coredns","kube-proxy","event-exporter-gke","kube-dns","17-default-backend","metrics-server","ca-audit","ca-dashboard-aggregator","ca-notification-server","ca-ocimage","ca-oracle","ca-posture","ca-rbac","ca-vuln-scan","ca-webhook","ca-websocket","clair-clair"]},"dataControlInputs":null,"k8sconfig":{"token":"","ip":"","host":"https://127.0.0.1:32766","port":"","crtfile":"","clientcrtfile":"/Users/amirmalka/.kwok/clusters/kwok-cluster/pki/admin.crt","clientkeyfile":"/Users/amirmalka/.kwok/clusters/kwok-cluster/pki/admin.key"}}

View File

@@ -0,0 +1,220 @@
{
"/v1/default/Pod/fake-pod-1-22gck": {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"name\":\"fake-pod-1-22gck\",\"namespace\":\"default\"},\"spec\":{\"containers\":[{\"image\":\"redis\",\"name\":\"fake-pod-1-22gck\",\"volumeMounts\":[{\"mountPath\":\"/etc/foo\",\"name\":\"foo\",\"readOnly\":true}]}],\"volumes\":[{\"name\":\"foo\",\"secret\":{\"optional\":true,\"secretName\":\"mysecret\"}}]}}\n"
},
"creationTimestamp": "2023-06-22T07:47:38Z",
"name": "fake-pod-1-22gck",
"namespace": "default",
"resourceVersion": "1087189",
"uid": "046753fa-c7b6-46dd-ae18-dd68b8b20cd3",
"labels": {"app": "argo-server"}
},
"spec": {
"containers": [
{
"image": "redis",
"imagePullPolicy": "Always",
"name": "fake-pod-1-22gck",
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/etc/foo",
"name": "foo",
"readOnly": true
},
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "kube-api-access-lrpxm",
"readOnly": true
}
]
}
],
"dnsPolicy": "ClusterFirst",
"enableServiceLinks": true,
"nodeName": "minikube-yiscah",
"preemptionPolicy": "PreemptLowerPriority",
"priority": 0,
"restartPolicy": "Always",
"schedulerName": "default-scheduler",
"securityContext": {},
"serviceAccount": "default",
"serviceAccountName": "default",
"terminationGracePeriodSeconds": 30,
"tolerations": [
{
"effect": "NoExecute",
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"tolerationSeconds": 300
},
{
"effect": "NoExecute",
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"tolerationSeconds": 300
}
],
"volumes": [
{
"name": "foo",
"secret": {
"defaultMode": 420,
"optional": true,
"secretName": "mysecret"
}
},
{
"name": "kube-api-access-lrpxm",
"projected": {
"defaultMode": 420,
"sources": [
{
"serviceAccountToken": {
"expirationSeconds": 3607,
"path": "token"
}
},
{
"configMap": {
"items": [
{
"key": "ca.crt",
"path": "ca.crt"
}
],
"name": "kube-root-ca.crt"
}
},
{
"downwardAPI": {
"items": [
{
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "metadata.namespace"
},
"path": "namespace"
}
]
}
}
]
}
}
]
},
"status": {
"conditions": [
{
"lastProbeTime": null,
"lastTransitionTime": "2023-06-22T07:47:38Z",
"status": "True",
"type": "Initialized"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2023-07-18T05:07:57Z",
"status": "True",
"type": "Ready"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2023-07-18T05:07:57Z",
"status": "True",
"type": "ContainersReady"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2023-06-22T07:47:38Z",
"status": "True",
"type": "PodScheduled"
}
],
"containerStatuses": [
{
"containerID": "docker://a3a1aac00031c6ab85f75cfa17d14ebd71ab15f1fc5c82a262449621a77d7a7e",
"image": "redis:latest",
"imageID": "docker-pullable://redis@sha256:08a82d4bf8a8b4dd94e8f5408cdbad9dd184c1cf311d34176cd3e9972c43f872",
"lastState": {
"terminated": {
"containerID": "docker://1ae623f4faf8cda5dabdc65c342752dfdf1675cb173b46875596c2eb0dae472f",
"exitCode": 255,
"finishedAt": "2023-07-18T05:03:55Z",
"reason": "Error",
"startedAt": "2023-07-17T16:32:35Z"
}
},
"name": "fake-pod-1-22gck",
"ready": true,
"restartCount": 9,
"started": true,
"state": {
"running": {
"startedAt": "2023-07-18T05:07:56Z"
}
}
}
],
"hostIP": "192.168.85.2",
"phase": "Running",
"podIP": "10.244.1.131",
"podIPs": [
{
"ip": "10.244.1.131"
}
],
"qosClass": "BestEffort",
"startTime": "2023-06-22T07:47:38Z"
}
},
"/v1/default/Service/fake-service-1": {
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"name\":\"fake-service-1\",\"namespace\":\"default\"},\"spec\":{\"clusterIP\":\"10.96.0.11\",\"ports\":[{\"port\":80,\"protocol\":\"TCP\",\"targetPort\":9376}],\"selector\":{\"app\":\"argo-server\"},\"type\":\"LoadBalancer\"},\"status\":{\"loadBalancer\":{\"ingress\":[{\"ip\":\"192.0.2.127\"}]}}}\n"
},
"creationTimestamp": "2023-07-09T06:22:27Z",
"name": "fake-service-1",
"namespace": "default",
"resourceVersion": "981856",
"uid": "dd629eb1-6779-4298-a70f-0bdbd046d409"
},
"spec": {
"allocateLoadBalancerNodePorts": true,
"clusterIP": "10.96.0.11",
"clusterIPs": [
"10.96.0.11"
],
"externalTrafficPolicy": "Cluster",
"internalTrafficPolicy": "Cluster",
"ipFamilies": [
"IPv4"
],
"ipFamilyPolicy": "SingleStack",
"ports": [
{
"nodePort": 30706,
"port": 80,
"protocol": "TCP",
"targetPort": 9376
}
],
"selector": {
"app": "argo-server"
},
"sessionAffinity": "None",
"type": "LoadBalancer"
},
"status": {
"loadBalancer": {}
}
}
}

File diff suppressed because one or more lines are too long

View File

@@ -7,17 +7,19 @@ import (
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/apis"
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/rego"
"github.com/open-policy-agent/opa/topdown/builtins"
"github.com/open-policy-agent/opa/types"
"golang.org/x/exp/slices"
)
// ConvertFrameworksToPolicies convert list of frameworks to list of policies
func ConvertFrameworksToPolicies(frameworks []reporthandling.Framework, version string) *cautils.Policies {
func ConvertFrameworksToPolicies(frameworks []reporthandling.Framework, version string, excludedRules map[string]bool, scanningScope reporthandling.ScanningScopeType) *cautils.Policies {
policies := cautils.NewPolicies()
policies.Set(frameworks, version)
policies.Set(frameworks, version, excludedRules, scanningScope)
return policies
}
@@ -37,12 +39,19 @@ func ConvertFrameworksToSummaryDetails(summaryDetails *reportsummary.SummaryDeta
ScoreFactor: frameworks[i].Controls[j].BaseScore,
Description: frameworks[i].Controls[j].Description,
Remediation: frameworks[i].Controls[j].Remediation,
Category: frameworks[i].Controls[j].Category,
}
if frameworks[i].Controls[j].GetActionRequiredAttribute() == string(apis.SubStatusManualReview) {
c.Status = apis.StatusSkipped
c.StatusInfo.InnerStatus = apis.StatusSkipped
c.StatusInfo.SubStatus = apis.SubStatusManualReview
c.StatusInfo.InnerInfo = string(apis.SubStatusManualReviewInfo)
}
controls[frameworks[i].Controls[j].ControlID] = c
summaryDetails.Controls[id] = c
}
}
if cautils.StringInSlice(policies.Frameworks, frameworks[i].Name) != cautils.ValueNotFound {
if slices.Contains(policies.Frameworks, frameworks[i].Name) {
summaryDetails.Frameworks = append(summaryDetails.Frameworks, reportsummary.FrameworkSummary{
Name: frameworks[i].Name,
Controls: controls,
@@ -86,3 +95,17 @@ var cosignHasSignatureDefinition = func(bctx rego.BuiltinContext, a *ast.Term) (
}
return ast.BooleanTerm(has_signature(string(aStr))), nil
}
var imageNameNormalizeDeclaration = &rego.Function{
Name: "image.parse_normalized_name",
Decl: types.NewFunction(types.Args(types.S), types.S),
Memoize: true,
}
var imageNameNormalizeDefinition = func(bctx rego.BuiltinContext, a *ast.Term) (*ast.Term, error) {
aStr, err := builtins.StringOperand(a.Value, 1)
if err != nil {
return nil, fmt.Errorf("invalid parameter type: %v", err)
}
normalizedName, err := normalize_image_name(string(aStr))
return ast.StringTerm(normalizedName), err
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/mocks"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
@@ -13,18 +14,31 @@ import (
func TestConvertFrameworksToPolicies(t *testing.T) {
fw0 := mocks.MockFramework_0006_0013()
fw1 := mocks.MockFramework_0044()
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "")
scanningScope := cautils.GetScanningScope(&cautils.ScanInfo{InputPatterns: []string{""}})
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "", nil, scanningScope)
assert.Equal(t, 2, len(policies.Frameworks))
assert.Equal(t, 3, len(policies.Controls))
// with excluded rules map
excludedRulesMap := map[string]bool{
"alert-rw-hostpath": true,
}
fw0 = mocks.MockFramework_0006_0013()
fw1 = mocks.MockFramework_0044()
policies = ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "", excludedRulesMap, scanningScope)
assert.Equal(t, 2, len(policies.Frameworks))
assert.Equal(t, 2, len(policies.Controls))
}
func TestInitializeSummaryDetails(t *testing.T) {
fw0 := mocks.MockFramework_0006_0013()
fw1 := mocks.MockFramework_0044()
scanningScope := cautils.GetScanningScope(&cautils.ScanInfo{InputPatterns: []string{""}})
summaryDetails := reportsummary.SummaryDetails{}
frameworks := []reporthandling.Framework{*fw0, *fw1}
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "")
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "", nil, scanningScope)
ConvertFrameworksToSummaryDetails(&summaryDetails, frameworks, policies)
assert.Equal(t, 2, len(summaryDetails.Frameworks))
assert.Equal(t, 3, len(summaryDetails.Controls))
// assert.Equal(t, 3, len(summaryDetails.Controls))
}

View File

@@ -0,0 +1,73 @@
package policyhandler
import (
"sync"
"time"
)
// TimedCache provides functionality for managing a timed cache.
// The timed cache holds a value for a specified time duration (TTL).
// After the TTL has passed, the value is invalidated.
//
// The cache is thread safe.
type TimedCache[T any] struct {
value T
isSet bool
ttl time.Duration
expiration int64
mutex sync.RWMutex
}
func NewTimedCache[T any](ttl time.Duration) *TimedCache[T] {
cache := &TimedCache[T]{
ttl: ttl,
isSet: false,
}
// start the invalidate task only when the ttl is greater than 0 (cache is enabled)
if ttl > 0 {
go cache.invalidateTask()
}
return cache
}
func (c *TimedCache[T]) Set(value T) {
c.mutex.Lock()
defer c.mutex.Unlock()
// cache is disabled
if c.ttl == 0 {
return
}
c.isSet = true
c.value = value
c.expiration = time.Now().Add(c.ttl).UnixNano()
}
func (c *TimedCache[T]) Get() (T, bool) {
c.mutex.RLock()
defer c.mutex.RUnlock()
if !c.isSet || time.Now().UnixNano() > c.expiration {
return c.value, false
}
return c.value, true
}
func (c *TimedCache[T]) invalidateTask() {
for {
<-time.After(c.ttl)
if time.Now().UnixNano() > c.expiration {
c.Invalidate()
}
}
}
func (c *TimedCache[T]) Invalidate() {
c.mutex.Lock()
defer c.mutex.Unlock()
c.isSet = false
}

View File

@@ -0,0 +1,75 @@
package policyhandler
import (
"testing"
"time"
)
func TestTimedCache(t *testing.T) {
tests := []struct {
name string
// value ttl
ttl time.Duration
// value to set
value int
// time to wait before checking if value exists
wait time.Duration
// number of times to check if value exists (with wait in between)
checks int
// should the value exist in cache
exists bool
// expected cache value
wantVal int
}{
{
name: "value exists before ttl",
ttl: time.Second * 5,
value: 42,
wait: time.Second * 1,
checks: 2,
exists: true,
wantVal: 42,
},
{
name: "value does not exist after ttl",
ttl: time.Second * 3,
value: 55,
wait: time.Second * 4,
checks: 1,
exists: false,
},
{
name: "cache is disabled (ttl = 0) always returns false",
ttl: 0,
value: 55,
wait: 0,
checks: 1,
exists: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cache := NewTimedCache[int](tt.ttl)
cache.Set(tt.value)
for i := 0; i < tt.checks; i++ {
// Wait for the specified duration
time.Sleep(tt.wait)
// Get the value from the cache
value, exists := cache.Get()
// Check if value exists
if exists != tt.exists {
t.Errorf("Expected exists to be %v, got %v", tt.exists, exists)
}
// Check value
if exists && value != tt.wantVal {
t.Errorf("Expected value to be %d, got %d", tt.wantVal, value)
}
}
})
}
}

View File

@@ -2,9 +2,11 @@ package policyhandler
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/armosec/armoapi-go/armotypes"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/kubescape/v2/core/cautils"
@@ -14,46 +16,130 @@ import (
"go.opentelemetry.io/otel"
)
func (policyHandler *PolicyHandler) getPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier, policiesAndResources *cautils.OPASessionObj) error {
const (
PoliciesCacheTtlEnvVar = "POLICIES_CACHE_TTL"
)
var policyHandlerInstance *PolicyHandler
// PolicyHandler
type PolicyHandler struct {
getters *cautils.Getters
cachedPolicyIdentifiers *TimedCache[[]string]
cachedFrameworks *TimedCache[[]reporthandling.Framework]
cachedExceptions *TimedCache[[]armotypes.PostureExceptionPolicy]
cachedControlInputs *TimedCache[map[string][]string]
}
// NewPolicyHandler creates and returns an instance of the `PolicyHandler`. The function initializes the `PolicyHandler` only if it hasn't been previously created.
// The PolicyHandler supports caching of downloaded policies and exceptions by setting the `POLICIES_CACHE_TTL` environment variable (default is no caching).
func NewPolicyHandler() *PolicyHandler {
if policyHandlerInstance == nil {
cacheTtl := getPoliciesCacheTtl()
policyHandlerInstance = &PolicyHandler{
cachedPolicyIdentifiers: NewTimedCache[[]string](cacheTtl),
cachedFrameworks: NewTimedCache[[]reporthandling.Framework](cacheTtl),
cachedExceptions: NewTimedCache[[]armotypes.PostureExceptionPolicy](cacheTtl),
cachedControlInputs: NewTimedCache[map[string][]string](cacheTtl),
}
}
return policyHandlerInstance
}
func (policyHandler *PolicyHandler) CollectPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
opaSessionObj := cautils.NewOPASessionObj(ctx, nil, nil, scanInfo)
policyHandler.getters = &scanInfo.Getters
// get policies, exceptions and controls inputs
policies, exceptions, controlInputs, err := policyHandler.getPolicies(ctx, policyIdentifier)
if err != nil {
return opaSessionObj, err
}
opaSessionObj.Policies = policies
opaSessionObj.Exceptions = exceptions
opaSessionObj.RegoInputData.PostureControlInputs = controlInputs
return opaSessionObj, nil
}
func (policyHandler *PolicyHandler) getPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) (policies []reporthandling.Framework, exceptions []armotypes.PostureExceptionPolicy, controlInputs map[string][]string, err error) {
ctx, span := otel.Tracer("").Start(ctx, "policyHandler.getPolicies")
defer span.End()
logger.L().Info("Downloading/Loading policy definitions")
cautils.StartSpinner()
defer cautils.StopSpinner()
logger.L().Start("Loading policies")
policies, err := policyHandler.getScanPolicies(ctx, policyIdentifier)
// get policies
policies, err = policyHandler.getScanPolicies(ctx, policyIdentifier)
if err != nil {
return err
return nil, nil, nil, err
}
if len(policies) == 0 {
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(policyIdentifier), ", "))
return nil, nil, nil, fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(policyIdentifier), ", "))
}
policiesAndResources.Policies = policies
logger.L().StopSuccess("Loaded policies")
logger.L().Start("Loading exceptions")
// get exceptions
exceptionPolicies, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)
if err == nil {
policiesAndResources.Exceptions = exceptionPolicies
} else {
if exceptions, err = policyHandler.getExceptions(); err != nil {
logger.L().Ctx(ctx).Warning("failed to load exceptions", helpers.Error(err))
}
logger.L().StopSuccess("Loaded exceptions")
logger.L().Start("Loading account configurations")
// get account configuration
controlsInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.ClusterName)
if err == nil {
policiesAndResources.RegoInputData.PostureControlInputs = controlsInputs
} else {
if controlInputs, err = policyHandler.getControlInputs(); err != nil {
logger.L().Ctx(ctx).Warning(err.Error())
}
cautils.StopSpinner()
logger.L().Success("Downloaded/Loaded policy")
return nil
logger.L().StopSuccess("Loaded account configurations")
return policies, exceptions, controlInputs, nil
}
// getScanPolicies - get policies from cache or downloads them. The function returns an error if the policies could not be downloaded.
func (policyHandler *PolicyHandler) getScanPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
policyIdentifiersSlice := policyIdentifierToSlice(policyIdentifier)
// check if policies are cached
if cachedPolicies, policiesExist := policyHandler.cachedFrameworks.Get(); policiesExist {
// check if the cached policies are the same as the requested policies, otherwise download the policies
if cachedIdentifiers, identifiersExist := policyHandler.cachedPolicyIdentifiers.Get(); identifiersExist && cautils.StringSlicesAreEqual(cachedIdentifiers, policyIdentifiersSlice) {
logger.L().Info("Using cached policies")
return deepCopyPolicies(cachedPolicies)
}
logger.L().Debug("Cached policies are not the same as the requested policies")
policyHandler.cachedPolicyIdentifiers.Invalidate()
policyHandler.cachedFrameworks.Invalidate()
}
policies, err := policyHandler.downloadScanPolicies(ctx, policyIdentifier)
if err == nil {
policyHandler.cachedFrameworks.Set(policies)
policyHandler.cachedPolicyIdentifiers.Set(policyIdentifiersSlice)
}
return policies, err
}
func deepCopyPolicies(src []reporthandling.Framework) ([]reporthandling.Framework, error) {
data, err := json.Marshal(src)
if err != nil {
return nil, err
}
var dst []reporthandling.Framework
err = json.Unmarshal(data, &dst)
if err != nil {
return nil, err
}
return dst, nil
}
func (policyHandler *PolicyHandler) downloadScanPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
frameworks := []reporthandling.Framework{}
switch getScanKind(policyIdentifier) {
@@ -102,10 +188,30 @@ func (policyHandler *PolicyHandler) getScanPolicies(ctx context.Context, policyI
return frameworks, nil
}
func policyIdentifierToSlice(rules []cautils.PolicyIdentifier) []string {
s := []string{}
for i := range rules {
s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Identifier))
func (policyHandler *PolicyHandler) getExceptions() ([]armotypes.PostureExceptionPolicy, error) {
if cachedExceptions, exist := policyHandler.cachedExceptions.Get(); exist {
logger.L().Info("Using cached exceptions")
return cachedExceptions, nil
}
return s
exceptions, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)
if err == nil {
policyHandler.cachedExceptions.Set(exceptions)
}
return exceptions, err
}
func (policyHandler *PolicyHandler) getControlInputs() (map[string][]string, error) {
if cachedControlInputs, exist := policyHandler.cachedControlInputs.Get(); exist {
logger.L().Info("Using cached control inputs")
return cachedControlInputs, nil
}
controlInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.ClusterName)
if err == nil {
policyHandler.cachedControlInputs.Set(controlInputs)
}
return controlInputs, err
}

Some files were not shown because too many files have changed in this diff Show More