Compare commits

...

175 Commits

Author SHA1 Message Date
David Wertenteil
55dbafb9b0 Merge branch 'master' of github.com:dwertent/kubescape 2022-08-01 10:03:31 +03:00
David Wertenteil
a6c19bc286 adding pprof 2022-08-01 09:49:50 +03:00
David Wertenteil
c7450adc77 Merge remote-tracking branch 'armosec/dev' 2022-08-01 09:06:39 +03:00
David Wertenteil
54b502629f Merge branch 'dev' into master 2022-07-31 18:07:10 +03:00
David Wertenteil
8a25d0d293 Merge pull request #576 from amirmalka/dev
helm chart name fix
2022-07-31 18:01:03 +03:00
David Wertenteil
a4af46fcf9 cleaning the readme 2022-07-31 17:35:44 +03:00
David Wertenteil
e9d3b573b3 adding heap api 2022-07-31 15:47:11 +03:00
Amir Malka
32922c6263 helm chart name fix 2022-07-25 15:25:07 +03:00
Moshe Rappaport
5abca6711e Merge pull request #574 from Moshe-Rappaport-CA/dev
Fix get account credentials from ENV
2022-07-25 10:43:41 +03:00
David Wertenteil
875b98415b Merge pull request #571 from amirmalka/dev
Support windows build
2022-07-25 10:26:48 +03:00
Moshe-Rappaport-CA
d577b1a135 Merge remote-tracking branch 'upstream/dev' into dev 2022-07-25 10:20:18 +03:00
David Wertenteil
2cd52e43b0 Merge pull request #573 from amirmalka/include-helm-chart-name
include helm chart name
2022-07-25 09:09:43 +03:00
Moshe-Rappaport-CA
69bdc358eb Fix support account from ENV 2022-07-24 15:52:10 +03:00
Amir Malka
2b2034f2da include helm chart name 2022-07-24 15:10:45 +03:00
Amir Malka
fb114a17a3 Support windows build 2022-07-21 18:51:37 +03:00
David Wertenteil
948681b82e Merge pull request #569 from dwertent/master
Remove URLs from python build file
2022-07-19 11:15:14 +03:00
David Wertenteil
5bd532dd57 remove URLs from python build file 2022-07-19 10:50:46 +03:00
David Wertenteil
aef74d6480 use exteranl go logger 2022-07-18 23:59:56 +03:00
David Wertenteil
7b38b5dc96 Merge pull request #566 from dwertent/master
Fixed repo scanning relative path scanning
2022-07-18 19:00:03 +03:00
David Wertenteil
9f5d9fe36b update go sum 2022-07-18 18:50:15 +03:00
David Wertenteil
643d0620d7 Support relative paths in repo scanning 2022-07-18 18:22:49 +03:00
David Wertenteil
8ecc1839a0 Merge remote-tracking branch 'armosec/dev' 2022-07-18 17:31:16 +03:00
David Wertenteil
31aeae8bd1 Merge pull request #565 from amirmalka/dev
added commit information per file
2022-07-18 17:02:12 +03:00
Amir Malka
26bbcae0bd updated README 2022-07-18 16:44:59 +03:00
Amir Malka
0feca50ebb fix README 2022-07-18 15:39:01 +03:00
Amir Malka
895f330e14 Replaced install_dependencie with Makefile, updated readme 2022-07-18 13:42:58 +03:00
Amir Malka
30f454de08 updated workflow to run also on PR to dev 2022-07-18 11:01:43 +03:00
Amir Malka
8347fa7874 update build 2022-07-17 18:56:20 +03:00
Amir Malka
6b5d335519 update submodule commit 2022-07-17 18:53:17 +03:00
Amir Malka
44f0473a09 fix tests 2022-07-17 18:35:33 +03:00
Amir Malka
a6bae01476 missing tag for go test 2022-07-17 17:58:37 +03:00
Amir Malka
c356246f82 update build process with libgit2 2022-07-17 17:48:19 +03:00
Amir Malka
a6d9badc5f remove file commit information for windows 2022-07-17 12:12:40 +03:00
Amir Malka
5bf179810b use git2go to get file commit information 2022-07-17 10:08:09 +03:00
David Wertenteil
fbb75d6dd1 update go deps 2022-07-12 09:34:13 +03:00
David Wertenteil
ac9db6706c Merge pull request #563 from armosec/decrease-max-report-size
Decrease max report size
2022-07-11 16:27:22 +03:00
Amir Malka
9b489f1e5c Decrease max report size
align max report size with BE configuration
2022-07-11 14:56:29 +03:00
David Wertenteil
ab788eaaa2 Merge pull request #562 from dwertent/master
Fixed URLs
2022-07-10 14:02:10 +03:00
David Wertenteil
826106090b update URL 2022-07-10 14:01:02 +03:00
David Wertenteil
e6e9a74766 fixed tests 2022-07-10 13:57:46 +03:00
David Wertenteil
5fee3efb35 Merge remote-tracking branch 'armosec/dev' 2022-07-10 13:46:34 +03:00
David Wertenteil
a3d77a76aa printing error when failed to load exceptions 2022-07-10 09:53:34 +03:00
David Wertenteil
3a958294f3 Merge pull request #555 from Moshe-Rappaport-CA/dev
Working with worker pool in host sensor
2022-07-07 11:30:45 +03:00
Moshe-Rappaport-CA
086a518a53 Refactor for code aesthetics 2022-07-07 10:38:19 +03:00
David Wertenteil
705fabb32b update dev and stage urls 2022-07-07 07:59:52 +03:00
Moshe-Rappaport-CA
3d37a6ac2f Modify the ScanningTarget to be a cluster or file 2022-07-06 17:08:03 +03:00
Moshe-Rappaport-CA
dd79e348d3 go mod tidy 2022-07-06 11:56:24 +03:00
Moshe-Rappaport-CA
a913d3eb32 Merge remote-tracking branch 'upstream/dev' into dev 2022-07-06 11:40:50 +03:00
Moshe-Rappaport-CA
f32049bdb3 Change "[info] Scanning. cluster:” to be dynamic by targetScan 2022-07-06 11:40:13 +03:00
David Wertenteil
bbcc7a502d Merge pull request #545 from slashben/dev
Replacing the documentation links to the new URL
2022-07-06 11:12:46 +03:00
David Wertenteil
1a94004de4 Merge pull request #557 from dwertent/master
Fixed windows support, update limit size
2022-07-05 16:27:18 +03:00
David Wertenteil
424f2cc403 update report size limit to 4mb 2022-07-05 15:13:02 +03:00
Moshe-Rappaport-CA
bae960fd5b Edit the noOfWorkers to be the minimum between the number of pods and 10 2022-07-05 14:38:58 +03:00
David Wertenteil
eab77a9e61 Merge pull request #556 from dwertent/master
Helm scanning support
2022-07-05 12:16:18 +03:00
David Wertenteil
fc78d9143b use filepath join in unitests 2022-07-05 11:44:07 +03:00
David Wertenteil
034dbca30c update go mod 2022-07-05 11:20:44 +03:00
David Wertenteil
a41adc6c9e update readme 2022-07-05 11:13:39 +03:00
David Wertenteil
bd170938c5 update readme 2022-07-05 11:12:58 +03:00
David Wertenteil
e91a73a32e validate no workloads found 2022-07-05 11:11:17 +03:00
David Wertenteil
099886e1bb mixed merge 2022-07-05 10:34:33 +03:00
David Wertenteil
c05dc8d7ae Merge pull request #548 from amirmalka/dev
helm chart scanning
2022-07-05 09:53:55 +03:00
David Wertenteil
3cebfb3065 Merge branch 'dev' into dev 2022-07-05 09:51:32 +03:00
Moshe-Rappaport-CA
4e9f4a8010 Merge remote-tracking branch 'upstream/dev' into dev 2022-07-03 15:52:42 +03:00
Moshe-Rappaport-CA
94d99da821 Ignore a case where the chan is closed 2022-07-03 15:51:07 +03:00
Ben Hirschberg
ee1b596358 Merge branch 'armosec:dev' into dev 2022-06-30 22:19:02 +03:00
David Wertenteil
ed5abd5791 Merge pull request #553 from dwertent/master
Handling edge cases when scanning files
2022-06-30 19:49:43 +03:00
David Wertenteil
898b847211 fixed printer 2022-06-30 19:41:29 +03:00
David Wertenteil
889dd15772 handke invalide files 2022-06-30 19:33:43 +03:00
Moshe-Rappaport-CA
81f0cecb79 Merge remote-tracking branch 'upstream/dev' into dev 2022-06-29 11:02:11 +03:00
David Wertenteil
d46d77411b Merge pull request #549 from armosec/fileutils-update
remove error when reading yaml
2022-06-29 09:09:27 +03:00
Amir Malka
ee4f4d8af1 remove error when reading yaml
Remove an error if we try to read YAML file which is not a map[string]interface{}
2022-06-28 11:31:11 +03:00
Amir Malka
ea1426a24b helm chart scanning 2022-06-27 17:28:25 +03:00
Moshe-Rappaport-CA
120677a91f support in wokerpool in host sensor 2022-06-22 18:40:00 +03:00
shm12
bd78e4c4de Merge pull request #546 from shm12/dev
New host sensor resources
2022-06-22 13:44:07 +03:00
shm12
e3f70b6cd6 Added host sensor new resources 2022-06-21 17:55:18 +03:00
Benyamin Hirschberg
616712cf79 Replacing the documentation links to the new URL 2022-06-20 17:59:21 +03:00
David Wertenteil
a5007df1bc Merge pull request #544 from dwertent/master
fixed docker version
2022-06-19 14:43:49 +03:00
David Wertenteil
d6720b67ed fixed docker version 2022-06-19 14:40:48 +03:00
Amir Malka
2261fd6adb Merge pull request #541 from dwertent/master
Final fixes
2022-06-19 14:14:01 +03:00
David Wertenteil
9334ad6991 fixed typo 2022-06-19 14:11:04 +03:00
David Wertenteil
7b5e4143c3 fixed test for win 2022-06-19 12:47:42 +03:00
David Wertenteil
e63e5502cd fixed test 2022-06-19 11:55:47 +03:00
David Wertenteil
154794e774 fixed test 2022-06-19 11:46:43 +03:00
David Wertenteil
4aa71725dd ignore empty file 2022-06-19 11:19:29 +03:00
David Wertenteil
9bd2e7fea4 upgrade go version 2022-06-19 09:05:32 +03:00
David Wertenteil
e6d3e7d7da fixed test 2022-06-19 08:55:56 +03:00
David Wertenteil
35bb15b5df Merge pull request #540 from 06kellyjac/update_and_install_details
update deps and add nix/nixos install details
2022-06-19 08:55:04 +03:00
David Wertenteil
e54d61fd87 Merge pull request #532 from vladklokun/print-results-to-html
feat: Print results to html
2022-06-19 08:48:30 +03:00
06kellyjac
f28b2836c7 add nixos/nix and go install instructions 2022-06-17 18:44:58 +01:00
06kellyjac
d196f1f327 update dependencies 2022-06-17 18:41:53 +01:00
David Wertenteil
995f90ca53 update pkg 2022-06-16 16:41:25 +03:00
David Wertenteil
c1da380c9b fixed test 2022-06-16 12:13:55 +03:00
David Wertenteil
77f77b8c7d update opa pkg 2022-06-16 12:06:52 +03:00
David Wertenteil
b3c1aec461 fixed unitests 2022-06-15 13:43:45 +03:00
David Wertenteil
ef242b52bb adding file path to wl 2022-06-15 13:38:20 +03:00
Vlad Klokun
af1d5694dc feat: add HTML as an output for scan results 2022-06-14 16:01:07 +03:00
David Wertenteil
a5ac47ff6d Merge pull request #536 from dwertent/master
fixed go mod
2022-06-13 16:13:18 +03:00
David Wertenteil
b03a4974c4 fixed go mod 2022-06-13 15:56:46 +03:00
David Wertenteil
8385cd0bd7 Merge pull request #535 from dwertent/master
fixed build files
2022-06-12 17:40:25 +03:00
David Wertenteil
ca67aa7f5f fixed build files 2022-06-12 17:37:30 +03:00
David Wertenteil
9b9ed514c8 Merge pull request #530 from dwertent/master
beta URL support
2022-06-12 17:34:59 +03:00
David Wertenteil
11b7f6ab2f do not submit invalide account ID 2022-06-12 17:31:44 +03:00
David Wertenteil
021f2074b8 validating slice length 2022-06-12 13:54:45 +03:00
shm12
c1ba2d4b3c Added gitignore for vscode git history files 2022-06-09 16:43:49 +03:00
shm12
141ad17ece Fixed web URL in git repo scanning 2022-06-09 16:42:39 +03:00
shm12
74c81e2270 Fixed relative path in git repo scan 2022-06-09 16:42:03 +03:00
shm12
7bb124b6fe Fix backward competability of file scanning 2022-06-09 15:53:28 +03:00
shm12
8a8ff10b19 Added default pattern, and type filtering in listFiles 2022-06-09 14:55:00 +03:00
shm12
1eef32dd8e Use clone in remote git repositories 2022-06-09 14:53:48 +03:00
David Wertenteil
d7b5dd416d ignore last commit 2022-06-09 10:42:58 +03:00
David Wertenteil
536a94de45 adding git data to file 2022-06-08 16:26:02 +03:00
David Wertenteil
d8ef471eb2 support installation of a fixed version 2022-06-08 13:28:48 +03:00
David Wertenteil
ff07a80078 moved testdata repo 2022-06-07 18:10:32 +03:00
David Wertenteil
310d31a3b1 beta url support 2022-06-07 18:08:38 +03:00
David Wertenteil
8a1ef7da87 submit git scanning 2022-06-07 17:39:02 +03:00
David Wertenteil
c142779ee8 adding client build 2022-06-07 09:20:32 +03:00
David Wertenteil
640f366c7e adding grafana dashboard 2022-05-31 15:58:59 +03:00
David Wertenteil
9f36c1d6de supporting github.repository_owner 2022-05-29 08:57:56 +03:00
David Wertenteil
b3c8c078a8 Merge pull request #519 from amirmalka/dev
implemented LocalGitRepository for working with a local git folder
2022-05-29 08:43:45 +03:00
David Wertenteil
3ff2b0d6ff Merge pull request #523 from dwertent/master
Support client ID and secret key flags
2022-05-26 11:50:35 +03:00
David Wertenteil
35b2b350a0 print submit error 2022-05-26 11:42:24 +03:00
David Wertenteil
046ea1d79f support secret key and account ID from cmd 2022-05-26 11:01:26 +03:00
Rotem Refael
3081508863 Merge pull request #522 from dwertent/master
Adding http request logs
2022-05-25 17:56:50 +03:00
David Wertenteil
4a757c1bf1 adding logs 2022-05-25 17:26:12 +03:00
David Wertenteil
dec4bcca00 Merge pull request #521 from dwertent/master
Do not submit results every scan with Prometheus
2022-05-24 10:18:20 +03:00
Amir Malka
5443039b8c updated commit date to time.Time and added length checking for remote URLs 2022-05-24 10:09:04 +03:00
David Wertenteil
95e68f49f3 do not submit results every scan with Prometheus 2022-05-24 09:33:40 +03:00
Amir Malka
7e90956b50 implemented LocalGitRepository for working with a local git folder 2022-05-23 17:11:17 +03:00
rcohencyberarmor
0c84c8f1f3 Merge pull request #518 from dwertent/master
Image vuln data integration
2022-05-23 10:51:31 +03:00
David Wertenteil
56b3239e30 loading from file fallback 2022-05-23 10:10:41 +03:00
David Wertenteil
f8e85941da update loading customer config 2022-05-23 09:47:40 +03:00
David Wertenteil
15081aa9c3 update auth url 2022-05-22 15:45:55 +03:00
David Wertenteil
ac03a2bda3 load data from config.json 2022-05-22 15:21:06 +03:00
rcohencyberarmor
b7ffa22f3a Merge pull request #517 from dwertent/master
httphandler using channel for queueing requests
2022-05-22 11:22:31 +03:00
David Wertenteil
ac5e7069da update readme 2022-05-22 10:59:48 +03:00
David Wertenteil
5a83f38bca send prometheus triggering to queue 2022-05-22 09:57:30 +03:00
David Wertenteil
3abd59e290 use channels for triggering scan 2022-05-19 14:18:11 +03:00
David Wertenteil
d08fdf2e9e update status busy to support more than one req 2022-05-19 12:01:28 +03:00
David Wertenteil
bad2f54e72 Merge pull request #515 from dwertent/master
fixed triggerd all frameworks
2022-05-18 17:29:55 +03:00
David Wertenteil
fc9b713851 fixed triggerd all frameworks 2022-05-18 17:28:46 +03:00
rcohencyberarmor
245200840d Merge pull request #514 from dwertent/master
using Buildx in githubactions
2022-05-18 16:11:47 +03:00
David Wertenteil
3f87610e8c using Buildx in githubactions 2022-05-18 16:09:29 +03:00
David Wertenteil
c285cb1bcc Merge pull request #513 from dwertent/master
REST api support
2022-05-18 15:16:00 +03:00
David Wertenteil
63968b564b update k8s-interface pkg 2022-05-18 14:36:55 +03:00
David Wertenteil
e237c48186 merged 2022-05-18 14:24:53 +03:00
David Wertenteil
622b121535 adding scan request log 2022-05-18 13:22:33 +03:00
Bezbran
20774d4a40 Merge pull request #510 from Daniel-GrunbergerCA/master
Set number of worker nodes based on scheduable nodes (based on taints) & set status to 'skipped' when there are no image vulns
2022-05-18 09:40:51 +03:00
DanielGrunbergerCA
7bb6bb85ec go mod 2022-05-18 09:35:58 +03:00
DanielGrunbergerCA
da908a84bc update k8s-iface for http handler 2022-05-18 09:35:07 +03:00
DanielGrunbergerCA
b515e259c0 Merge remote-tracking branch 'upstream/dev' 2022-05-18 09:33:38 +03:00
DanielGrunbergerCA
facd551518 update k8s-interface version 2022-05-18 09:33:13 +03:00
David Wertenteil
0fc569d9d9 fixed import 2022-05-18 00:35:45 +03:00
David Wertenteil
da27a27ad5 adding status rest api 2022-05-18 00:34:15 +03:00
DanielGrunbergerCA
5d4a20f622 fix test 2022-05-17 16:01:03 +03:00
DanielGrunbergerCA
70b15a373b unit test for isEmptyImgVulns 2022-05-17 15:32:49 +03:00
DanielGrunbergerCA
01353f81b3 unit test for isMaterNodeTaints 2022-05-16 17:29:29 +03:00
DanielGrunbergerCA
22f10b6581 go mod 2022-05-16 17:02:55 +03:00
DanielGrunbergerCA
785178ffb1 show skipped for scan without imgvuln 2022-05-16 16:55:37 +03:00
DanielGrunbergerCA
f9b5c58402 pull worker nodes based on taints 2022-05-16 16:36:00 +03:00
David Wertenteil
8ed6d63ce5 Merge pull request #509 from Daniel-GrunbergerCA/fix-eks
Fix eks and support http for all endpoints
2022-05-16 15:22:49 +03:00
DanielGrunbergerCA
990a7c2052 update go mod 2022-05-16 14:28:09 +03:00
DanielGrunbergerCA
09b0c09472 support http and https for all endpoints 2022-05-16 14:13:04 +03:00
DanielGrunbergerCA
f83c38b58e update k8s-interface 2022-05-16 11:51:55 +03:00
DanielGrunbergerCA
51e600797a Merge remote-tracking branch 'upstream/dev' into fix-eks 2022-05-16 11:43:40 +03:00
David Wertenteil
39d6d1fd26 Merge pull request #506 from dwertent/master
Adding `view` flag
2022-05-11 23:11:48 +03:00
David Wertenteil
2dff63b101 rm format-vers flag from examples 2022-05-11 08:22:31 +03:00
David Wertenteil
b928892f0a erge remote-tracking branch 'armosec/dev' 2022-05-11 08:20:32 +03:00
David Wertenteil
c0188ea51d Merge pull request #505 from amirmalka/dev
Updated readme - Lens extension
2022-05-10 15:48:54 +03:00
DanielGrunbergerCA
6382edeb6e Merge remote-tracking branch 'upstream/dev' 2022-05-10 09:05:18 +03:00
Amir Malka
61d6c2dd1f Updated readme - Lens extension 2022-05-09 18:03:01 +03:00
Amir Malka
44194f0b4e Updated readme - Lens extension 2022-05-09 16:46:50 +03:00
DanielGrunbergerCA
7103c7d32c fix url 2022-05-04 11:11:29 +03:00
DanielGrunbergerCA
b4e1663cd1 make parse func 2022-05-03 16:24:19 +03:00
shm12
47412c89ca Merge pull request #501 from dwertent/master
Report ks version
2022-05-03 14:45:56 +03:00
DanielGrunbergerCA
60ec6e8294 support env with http 2022-05-03 12:58:33 +03:00
135 changed files with 6727 additions and 2135 deletions

View File

@@ -28,29 +28,76 @@ jobs:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v1
- name: Set up Go
uses: actions/setup-go@v2
- uses: actions/checkout@v3
with:
go-version: 1.17
submodules: recursive
- name: Cache Go modules (Linux)
if: matrix.os == 'ubuntu-latest'
uses: actions/cache@v3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Cache Go modules (macOS)
if: matrix.os == 'macos-latest'
uses: actions/cache@v3
with:
path: |
~/Library/Caches/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Cache Go modules (Windows)
if: matrix.os == 'windows-latest'
uses: actions/cache@v3
with:
path: |
~\AppData\Local\go-build
~\go\pkg\mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.18
# - name: Test cmd pkg
# run: cd cmd && go test -v ./...
- name: Install MSYS2 & libgit2 (Windows)
shell: cmd
run: .\build.bat all
if: matrix.os == 'windows-latest'
- name: Install libgit2 (Linux/macOS)
run: make libgit2
if: matrix.os != 'windows-latest'
- name: Test core pkg
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: go test -v ./...
run: go test -tags=static -v ./...
- name: Test httphandler pkg
run: cd httphandler && go test -v ./...
run: cd httphandler && go test -tags=static -v ./...
- name: Build
env:
RELEASE: v2.0.${{ github.run_number }}
CLIENT: release
ArmoBEServer: api.armo.cloud
ArmoAuthServer: auth.armo.cloud
ArmoERServer: report.armo.cloud
ArmoWebsite: portal.armo.cloud
CGO_ENABLED: 0
CGO_ENABLED: 1
run: python3 --version && python3 build.py
- name: Smoke Testing
@@ -92,6 +139,8 @@ jobs:
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
- name: Set image version
id: image-version
@@ -101,28 +150,28 @@ jobs:
id: image-name
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
- name: Build the Docker image
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
- name: Re-Tag Image to latest
run: docker tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Quay.io
env: # Or as an environment variable
env:
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
- name: Build the Docker image
run: docker buildx build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:latest --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg client=image-release --push --platform linux/amd64,linux/arm64
# - name: Login to GitHub Container Registry
# uses: docker/login-action@v1
# with:
# registry: ghcr.io
# username: ${{ github.actor }}
# password: ${{ secrets.GITHUB_TOKEN }}
- name: Push Docker image
run: |
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
# TODO - Wait for casign to support fixed tags -> https://github.com/sigstore/cosign/issues/1424
# - name: Install cosign
# uses: sigstore/cosign-installer@main

View File

@@ -11,12 +11,47 @@ jobs:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v1
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Cache Go modules (Linux)
if: matrix.os == 'ubuntu-latest'
uses: actions/cache@v3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Cache Go modules (macOS)
if: matrix.os == 'macos-latest'
uses: actions/cache@v3
with:
path: |
~/Library/Caches/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Cache Go modules (Windows)
if: matrix.os == 'windows-latest'
uses: actions/cache@v3
with:
path: |
~\AppData\Local\go-build
~\go\pkg\mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set up Go
uses: actions/setup-go@v2
uses: actions/setup-go@v3
with:
go-version: 1.17
go-version: 1.18
# - name: Test cmd pkg
# run: cd cmd && go test -v ./...
@@ -29,22 +64,32 @@ jobs:
# - name: Test cmd pkg
# run: cd cmd && go test -v ./...
- name: Install MSYS2 & libgit2 (Windows)
shell: cmd
run: .\build.bat all
if: matrix.os == 'windows-latest'
- name: Install libgit2 (Linux/macOS)
run: make libgit2
if: matrix.os != 'windows-latest'
- name: Test core pkg
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: go test -v ./...
run: go test -tags=static -v ./...
- name: Test httphandler pkg
run: cd httphandler && go test -v ./...
run: cd httphandler && go test -tags=static -v ./...
- name: Build
env:
RELEASE: v2.0.${{ github.run_number }}
CLIENT: release-dev
ArmoBEServer: api.armo.cloud
ArmoAuthServer: auth.armo.cloud
ArmoERServer: report.armo.cloud
ArmoWebsite: portal.armo.cloud
CGO_ENABLED: 0
CGO_ENABLED: 1
run: python3 --version && python3 build.py
- name: Smoke Testing
@@ -71,6 +116,8 @@ jobs:
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
- name: Set image version
id: image-version
@@ -80,21 +127,17 @@ jobs:
id: image-name
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
- name: Build the Docker image
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Quay.io
env:
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
# - name: Login to GitHub Container Registry
# uses: docker/login-action@v1
# with:
# registry: ghcr.io
# username: ${{ github.actor }}
# password: ${{ secrets.GITHUB_TOKEN }}
- name: Push Docker image
run: |
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
- name: Build the Docker image
run: docker buildx build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg client=image-dev --push --platform linux/amd64,linux/arm64

View File

@@ -1,48 +0,0 @@
name: master-pr
on:
pull_request:
branches: [ master ]
types: [ edited, opened, synchronize, reopened ]
jobs:
build:
name: Create cross-platform build
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v1
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.17
# - name: Test cmd pkg
# run: cd cmd && go test -v ./...
- name: Test core pkg
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: go test -v ./...
- name: Test httphandler pkg
run: cd httphandler && go test -v ./...
- name: Build
env:
RELEASE: v2.0.${{ github.run_number }}
ArmoBEServer: api.armo.cloud
ArmoAuthServer: auth.armo.cloud
ArmoERServer: report.armo.cloud
ArmoWebsite: portal.armo.cloud
CGO_ENABLED: 0
run: python3 --version && python3 build.py
- name: Smoke Testing
env:
RELEASE: v2.0.${{ github.run_number }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape

93
.github/workflows/pr_checks.yaml vendored Normal file
View File

@@ -0,0 +1,93 @@
name: pr-checks
on:
pull_request:
branches: [ master, dev ]
types: [ edited, opened, synchronize, reopened ]
jobs:
build:
name: Create cross-platform build
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Cache Go modules (Linux)
if: matrix.os == 'ubuntu-latest'
uses: actions/cache@v3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Cache Go modules (macOS)
if: matrix.os == 'macos-latest'
uses: actions/cache@v3
with:
path: |
~/Library/Caches/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Cache Go modules (Windows)
if: matrix.os == 'windows-latest'
uses: actions/cache@v3
with:
path: |
~\AppData\Local\go-build
~\go\pkg\mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.18
- name: Install MSYS2 & libgit2 (Windows)
shell: cmd
run: .\build.bat all
if: matrix.os == 'windows-latest'
- name: Install libgit2 (Linux/macOS)
run: make libgit2
if: matrix.os != 'windows-latest'
# - name: Test cmd pkg
# run: cd cmd && go test -v ./...
- name: Test core pkg
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: go test -tags=static -v ./...
- name: Test httphandler pkg
run: cd httphandler && go test -tags=static -v ./...
- name: Build
env:
RELEASE: v2.0.${{ github.run_number }}
CLIENT: test
ArmoBEServer: api.armo.cloud
ArmoAuthServer: auth.armo.cloud
ArmoERServer: report.armo.cloud
ArmoWebsite: portal.armo.cloud
CGO_ENABLED: 1
run: python3 --version && python3 build.py
- name: Smoke Testing
env:
RELEASE: v2.0.${{ github.run_number }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape

1
.gitignore vendored
View File

@@ -4,4 +4,5 @@
*vender*
*.pyc*
.idea
.history
ca.srl

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "git2go"]
path = git2go
url = https://github.com/libgit2/git2go.git

20
Makefile Normal file
View File

@@ -0,0 +1,20 @@
.PHONY: test all build libgit2
# default task invoked while running make
all: libgit2 build
export CGO_ENABLED=1
# build and install libgit2
libgit2:
git submodule update --init --recursive
cd git2go; make install-static
# go build tags
TAGS = "static"
build:
go build -v -tags=$(TAGS) .
test:
go test -v -tags=$(TAGS) ./...

218
README.md
View File

@@ -28,7 +28,7 @@ Kubescape integrates natively with other DevOps tools, including Jenkins, Circle
# TL;DR
## Install:
```
```sh
curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh | /bin/bash
```
@@ -36,9 +36,13 @@ curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh |
[Install on macOS](#install-on-macos)
[Install on NixOS or Linux/macOS via nix](#install-on-nixos-or-with-nix-community)
[Install using Go](#install-using-go)
## Run:
```
kubescape scan --submit --enable-host-scan --format-version v2 --verbose
```sh
kubescape scan --submit --enable-host-scan --verbose
```
<img src="docs/summary.png">
@@ -70,7 +74,7 @@ Want to contribute? Want to discuss something? Have an issue?
# Options and examples
[Kubescape docs](https://hub.armo.cloud/docs)
[Kubescape docs](https://hub.armosec.io/docs)
## Playground
* [Kubescape playground](https://www.katacoda.com/pathaksaiyam/scenarios/kubescape)
@@ -85,6 +89,8 @@ Want to contribute? Want to discuss something? Have an issue?
* [Configure and run customized frameworks](https://youtu.be/12Sanq_rEhs)
* Customize controls configurations. [Kubescape CLI](https://youtu.be/955psg6TVu4), [Kubescape SaaS](https://youtu.be/lIMVSVhH33o)
<details><summary>Windows</summary>
## Install on Windows
**Requires powershell v5.0+**
@@ -98,15 +104,55 @@ Note: if you get an error you might need to change the execution policy (i.e. en
``` powershell
Set-ExecutionPolicy RemoteSigned -scope CurrentUser
```
</details>
<details><summary>MacOS</summary>
## Install on macOS
1. ```
1. ```sh
brew tap armosec/kubescape
```
2. ```
2. ```sh
brew install kubescape
```
</details>
<details><summary>Nix/NixOS</summary>
## Install on NixOS or with nix (Community)
Direct issues installing `kubescape` via `nix` through the channels mentioned [here](https://nixos.wiki/wiki/Support)
You can use `nix` on Linux or macOS and on other platforms unofficially.
Try it out in an ephemeral shell: `nix-shell -p kubescape`
Install declarative as usual
NixOS:
```nix
# your other config ...
environment.systemPackages = with pkgs; [
# your other packages ...
kubescape
];
```
home-manager:
```nix
# your other config ...
home.packages = with pkgs; [
# your other packages ...
kubescape
];
```
Or to your profile (not preferred): `nix-env --install -A nixpkgs.kubescape`
</details>
## Usage & Examples
@@ -118,7 +164,7 @@ Set-ExecutionPolicy RemoteSigned -scope CurrentUser
kubescape scan --submit --enable-host-scan --verbose
```
> Read [here](https://hub.armo.cloud/docs/host-sensor) more about the `enable-host-scan` flag
> Read [here](https://hub.armosec.io/docs/host-sensor) more about the `enable-host-scan` flag
#### Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
```
@@ -132,7 +178,7 @@ kubescape scan framework mitre --submit
```
#### Scan a running Kubernetes cluster with a specific control using the control name or control ID. [List of controls](https://hub.armo.cloud/docs/controls)
#### Scan a running Kubernetes cluster with a specific control using the control name or control ID. [List of controls](https://hub.armosec.io/docs/controls)
```
kubescape scan control "Privileged container"
```
@@ -147,14 +193,14 @@ kubescape scan --include-namespaces development,staging,production
kubescape scan --exclude-namespaces kube-system,kube-public
```
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI)
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI) Submit the results in case the directory is a git repo. [docs](https://hub.armosec.io/docs/repository-scanning)
```
kubescape scan *.yaml
kubescape scan *.yaml --submit
```
#### Scan kubernetes manifest files from a public github repository
#### Scan kubernetes manifest files from a git repository [and submit the results](https://hub.armosec.io/docs/repository-scanning)
```
kubescape scan https://github.com/armosec/kubescape
kubescape scan https://github.com/armosec/kubescape --submit
```
#### Display all scanned resources (including the resources who passed)
@@ -193,16 +239,11 @@ kubescape scan --format prometheus
kubescape scan --exceptions examples/exceptions/exclude-kube-namespaces.json
```
#### Scan Helm charts - Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
#### Scan Helm charts
```
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan -
kubescape scan </path/to/directory> --submit
```
e.g.
```
helm template bitnami/mysql --generate-name --dry-run | kubescape scan -
```
> Kubescape will load the default values file
### Offline/Air-gaped Environment Support
@@ -238,36 +279,10 @@ kubescape scan framework nsa --use-from /path/nsa.json
```
## Scan Periodically using Helm - Contributed by [@yonahd](https://github.com/yonahd)
[Please follow the instructions here](https://hub.armo.cloud/docs/installation-of-armo-in-cluster)
## Scan Periodically using Helm
[Please follow the instructions here](https://hub.armosec.io/docs/installation-of-armo-in-cluster)
[helm chart repo](https://github.com/armosec/armo-helm)
## Scan using docker image
Official Docker image `quay.io/armosec/kubescape`
```
docker run -v "$(pwd)/example.yaml:/app/example.yaml quay.io/armosec/kubescape scan /app/example.yaml
```
If you wish, you can [build the docker image on your own](build/README.md)
# Submit data manually
Use the `submit` command if you wish to submit data manually
## Submit scan results manually
> Support forward compatibility by using the `--format-version v2` flag
First, scan your cluster using the `json` format flag: `kubescape scan framework <name> --format json --format-version v2 --output path/to/results.json`.
Now you can submit the results to the Kubescape SaaS version -
```
kubescape submit results path/to/results.json
```
# Integrations
## VS Code Extension
@@ -276,6 +291,113 @@ kubescape submit results path/to/results.json
Scan the YAML files while writing them using the [vs code extension](https://github.com/armosec/vscode-kubescape/blob/master/README.md)
## Lens Extension
View Kubescape scan results directly in [Lens IDE](https://k8slens.dev/) using kubescape [Lens extension](https://github.com/armosec/lens-kubescape/blob/master/README.md)
# Building Kubescape
<details><summary>Windows</summary>
## Windows
1. Install MSYS2 & build libgit _(needed only for the first time)_
```
build.bat all
```
> You can install MSYS2 separately by running `build.bat install` and build libgit2 separately by running `build.bat build`
2. Build kubescape
```
make build
```
OR
```
go build -tags=static .
```
</details>
<details><summary>Linux / MacOS</summary>
## Linux / MacOS
1. Install libgit2 dependency _(needed only for the first time)_
```
make libgit2
```
> `cmake` is required to build libgit2. You can install it by running `sudo apt-get install cmake` (Linux) or `brew install cmake` (macOS)
2. Build kubescape
```
make build
```
OR
```
go build -tags=static .
```
3. Test
```
make test
```
</details>
## VS code configuration samples
You can use the samples files below to setup your VS code environment for building and debugging purposes.
<details><summary>.vscode/settings.json</summary>
```json5
// .vscode/settings.json
{
"go.testTags": "static",
"go.buildTags": "static",
"go.toolsEnvVars": {
"CGO_ENABLED": "1"
}
}
```
</details>
<details><summary>.vscode/launch.json</summary>
```json5
// .vscode/launch.json
{
"version": "0.2.0",
"configurations": [
{
"name": "Launch Package",
"type": "go",
"request": "launch",
"mode": "auto",
"program": "${workspaceFolder}/main.go",
"args": [
"scan",
"--logger",
"debug"
],
"buildFlags": "-tags=static"
}
]
}
```
</details>
# Under the hood

51
build.bat Normal file
View File

@@ -0,0 +1,51 @@
@ECHO OFF
IF "%1"=="install" goto Install
IF "%1"=="build" goto Build
IF "%1"=="all" goto All
IF "%1"=="" goto Error ELSE goto Error
:Install
if exist C:\MSYS64\ (
echo "MSYS2 already installed"
) else (
mkdir temp_install & cd temp_install
echo "Downloading MSYS2..."
curl -L https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-x86_64-20220603.exe > msys2-x86_64-20220603.exe
echo "Installing MSYS2..."
msys2-x86_64-20220603.exe install --root C:\MSYS64 --confirm-command
cd .. && rmdir /s /q temp_install
)
echo "Adding MSYS2 to path..."
SET "PATH=C:\MSYS64\mingw64\bin;C:\MSYS64\usr\bin;%PATH%"
echo %PATH%
echo "Installing MSYS2 packages..."
pacman -S --needed --noconfirm make
pacman -S --needed --noconfirm mingw-w64-x86_64-cmake
pacman -S --needed --noconfirm mingw-w64-x86_64-gcc
pacman -S --needed --noconfirm mingw-w64-x86_64-pkg-config
pacman -S --needed --noconfirm msys2-w32api-runtime
IF "%1"=="all" GOTO Build
GOTO End
:Build
SET "PATH=C:\MSYS2\mingw64\bin;C:\MSYS2\usr\bin;%PATH%"
make libgit2
GOTO End
:All
GOTO Install
:Error
echo "Error: Unknown option"
GOTO End
:End

View File

@@ -5,79 +5,65 @@ import platform
import subprocess
BASE_GETTER_CONST = "github.com/armosec/kubescape/v2/core/cautils/getter"
BE_SERVER_CONST = BASE_GETTER_CONST + ".ArmoBEURL"
ER_SERVER_CONST = BASE_GETTER_CONST + ".ArmoERURL"
WEBSITE_CONST = BASE_GETTER_CONST + ".ArmoFEURL"
AUTH_SERVER_CONST = BASE_GETTER_CONST + ".armoAUTHURL"
def checkStatus(status, msg):
def check_status(status, msg):
if status != 0:
sys.stderr.write(msg)
exit(status)
def getBuildDir():
currentPlatform = platform.system()
buildDir = "./build/"
def get_build_dir():
current_platform = platform.system()
build_dir = "./build/"
if currentPlatform == "Windows": buildDir += "windows-latest"
elif currentPlatform == "Linux": buildDir += "ubuntu-latest"
elif currentPlatform == "Darwin": buildDir += "macos-latest"
else: raise OSError("Platform %s is not supported!" % (currentPlatform))
if current_platform == "Windows": build_dir += "windows-latest"
elif current_platform == "Linux": build_dir += "ubuntu-latest"
elif current_platform == "Darwin": build_dir += "macos-latest"
else: raise OSError("Platform %s is not supported!" % (current_platform))
return buildDir
return build_dir
def getPackageName():
packageName = "kubescape"
# if platform.system() == "Windows": packageName += ".exe"
def get_package_name():
package_name = "kubescape"
# if platform.system() == "Windows": package_name += ".exe"
return packageName
return package_name
def main():
print("Building Kubescape")
# print environment variables
# print(os.environ)
# Set some variables
packageName = getPackageName()
buildUrl = "github.com/armosec/kubescape/v2/core/cautils.BuildNumber"
releaseVersion = os.getenv("RELEASE")
ArmoBEServer = os.getenv("ArmoBEServer")
ArmoERServer = os.getenv("ArmoERServer")
ArmoWebsite = os.getenv("ArmoWebsite")
ArmoAuthServer = os.getenv("ArmoAuthServer")
package_name = get_package_name()
build_url = "github.com/armosec/kubescape/v2/core/cautils.BuildNumber"
release_version = os.getenv("RELEASE")
client_var = "github.com/armosec/kubescape/v2/core/cautils.Client"
client_name = os.getenv("CLIENT")
# Create build directory
buildDir = getBuildDir()
build_dir = get_build_dir()
ks_file = os.path.join(buildDir, packageName)
ks_file = os.path.join(build_dir, package_name)
hash_file = ks_file + ".sha256"
if not os.path.isdir(buildDir):
os.makedirs(buildDir)
if not os.path.isdir(build_dir):
os.makedirs(build_dir)
# Build kubescape
ldflags = "-w -s"
if releaseVersion:
ldflags += " -X {}={}".format(buildUrl, releaseVersion)
if ArmoBEServer:
ldflags += " -X {}={}".format(BE_SERVER_CONST, ArmoBEServer)
if ArmoERServer:
ldflags += " -X {}={}".format(ER_SERVER_CONST, ArmoERServer)
if ArmoWebsite:
ldflags += " -X {}={}".format(WEBSITE_CONST, ArmoWebsite)
if ArmoAuthServer:
ldflags += " -X {}={}".format(AUTH_SERVER_CONST, ArmoAuthServer)
if release_version:
ldflags += " -X {}={}".format(build_url, release_version)
if client_name:
ldflags += " -X {}={}".format(client_var, client_name)
build_command = ["go", "build", "-o", ks_file, "-ldflags" ,ldflags]
build_command = ["go", "build", "-tags=static", "-o", ks_file, "-ldflags" ,ldflags]
print("Building kubescape and saving here: {}".format(ks_file))
print("Build command: {}".format(" ".join(build_command)))
status = subprocess.call(build_command)
checkStatus(status, "Failed to build kubescape")
check_status(status, "Failed to build kubescape")
sha256 = hashlib.sha256()
with open(ks_file, "rb") as kube:

View File

@@ -1,23 +1,28 @@
FROM golang:1.17-alpine as builder
#ENV GOPROXY=https://goproxy.io,direct
FROM golang:1.18-alpine as builder
ARG image_version
ARG client
ENV RELEASE=$image_version
ENV CLIENT=$client
ENV GO111MODULE=
ENV CGO_ENABLED=0
ENV CGO_ENABLED=1
# Install required python/pip
ENV PYTHONUNBUFFERED=1
RUN apk add --update --no-cache python3 && ln -sf python3 /usr/bin/python
RUN apk add --update --no-cache python3 git openssl-dev musl-dev gcc make cmake pkgconfig && ln -sf python3 /usr/bin/python
RUN python3 -m ensurepip
RUN pip3 install --no-cache --upgrade pip setuptools
WORKDIR /work
ADD . .
# install libgit2
WORKDIR /work
RUN rm -rf git2go && make libgit2
# build kubescape server
WORKDIR /work/httphandler
RUN python build.py

View File

@@ -1,9 +1,9 @@
package config
import (
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/meta"
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
logger "github.com/dwertent/go-logger"
"github.com/spf13/cobra"
)

View File

@@ -4,9 +4,9 @@ import (
"fmt"
"strings"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/meta"
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
logger "github.com/dwertent/go-logger"
"github.com/spf13/cobra"
)

View File

@@ -3,9 +3,9 @@ package config
import (
"os"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/meta"
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
logger "github.com/dwertent/go-logger"
"github.com/spf13/cobra"
)

View File

@@ -24,7 +24,9 @@ func GetDeleteCmd(ks meta.IKubescape) *cobra.Command {
Run: func(cmd *cobra.Command, args []string) {
},
}
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
deleteCmd.AddCommand(getExceptionsCmd(ks, &deleteInfo))

View File

@@ -4,9 +4,9 @@ import (
"fmt"
"strings"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/meta"
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
logger "github.com/dwertent/go-logger"
"github.com/spf13/cobra"
)
@@ -26,7 +26,7 @@ func getExceptionsCmd(ks meta.IKubescape, deleteInfo *v1.Delete) *cobra.Command
if len(exceptionsNames) == 0 {
logger.L().Fatal("missing exceptions names")
}
if err := ks.DeleteExceptions(&v1.DeleteExceptions{Account: deleteInfo.Account, Exceptions: exceptionsNames}); err != nil {
if err := ks.DeleteExceptions(&v1.DeleteExceptions{Credentials: deleteInfo.Credentials, Exceptions: exceptionsNames}); err != nil {
logger.L().Fatal(err.Error())
}
},

View File

@@ -6,10 +6,10 @@ import (
"strings"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/core"
"github.com/armosec/kubescape/v2/core/meta"
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
logger "github.com/dwertent/go-logger"
"github.com/spf13/cobra"
)
@@ -72,7 +72,10 @@ func GeDownloadCmd(ks meta.IKubescape) *cobra.Command {
return nil
},
}
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If not specified, will save in `~/.kubescape/<policy name>.json`")
return downloadCmd

View File

@@ -5,10 +5,10 @@ import (
"strings"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/core"
"github.com/armosec/kubescape/v2/core/meta"
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
logger "github.com/dwertent/go-logger"
"github.com/spf13/cobra"
)
@@ -27,7 +27,7 @@ var (
kubescape list controls --id
Control documentation:
https://hub.armo.cloud/docs/controls
https://hub.armosec.io/docs/controls
`
)
@@ -59,7 +59,9 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
return nil
},
}
listCmd.PersistentFlags().StringVar(&listPolicies.Account, "account", "", "Armo portal account ID. Default will load account ID from configMap or config file")
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-printer'/'json'")
listCmd.PersistentFlags().BoolVarP(&listPolicies.ListIDs, "id", "", false, "List control ID's instead of controls names")

View File

@@ -14,10 +14,10 @@ import (
"github.com/armosec/kubescape/v2/cmd/version"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/core"
"github.com/armosec/kubescape/v2/core/meta"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"github.com/spf13/cobra"
)
@@ -47,9 +47,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
rootCmd := &cobra.Command{
Use: "kubescape",
Version: cautils.BuildNumber,
Short: "Kubescape is a tool for testing Kubernetes security posture",
Long: `Based on NSA \ MITRE ATT&CK® and other frameworks specifications`,
Short: "Kubescape is a tool for testing Kubernetes security posture. Docs: https://hub.armosec.io/docs",
Example: ksExamples,
}

View File

@@ -6,8 +6,8 @@ import (
"strings"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"github.com/mattn/go-isatty"
)

View File

@@ -9,10 +9,10 @@ import (
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/meta"
"github.com/armosec/opa-utils/reporthandling"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"github.com/enescakir/emoji"
"github.com/spf13/cobra"
)
@@ -31,7 +31,7 @@ var (
Run 'kubescape list controls' for the list of supported controls
Control documentation:
https://hub.armo.cloud/docs/controls
https://hub.armosec.io/docs/controls
`
)
@@ -59,7 +59,7 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
RunE: func(cmd *cobra.Command, args []string) error {
// flagValidationControl(scanInfo)
scanInfo.PolicyIdentifier = []reporthandling.PolicyIdentifier{}
scanInfo.PolicyIdentifier = []cautils.PolicyIdentifier{}
if len(args) == 0 {
scanInfo.ScanAll = true

View File

@@ -9,9 +9,10 @@ import (
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/meta"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"github.com/enescakir/emoji"
"github.com/spf13/cobra"
)

View File

@@ -63,14 +63,16 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
},
}
scanCmd.PersistentFlags().StringVarP(&scanInfo.Account, "account", "", "", "ARMO portal account ID. Default will load account ID from configMap or config file")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
scanCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "kube-context", "", "", "Kube context. Default will use the current-context")
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
scanCmd.PersistentFlags().StringVar(&scanInfo.UseArtifactsFrom, "use-artifacts-from", "", "Load artifacts from local directory. If not used will download them")
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system,kube-public")
scanCmd.PersistentFlags().Float32VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer","json","junit","prometheus","pdf"`)
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer", "json", "junit", "prometheus", "pdf", "html"`)
scanCmd.PersistentFlags().StringVar(&scanInfo.IncludeNamespaces, "include-namespaces", "", "scan specific namespaces. e.g: --include-namespaces ns-a,ns-b")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to ARMO backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
@@ -91,7 +93,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
scanCmd.PersistentFlags().MarkHidden("silent") // this flag should be deprecated since we added the --logger support
// scanCmd.PersistentFlags().MarkHidden("format-version") // meant for testing different output approaches and not for common use
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://raw.githubusercontent.com/armosec/kubescape/master/hostsensorutils/hostsensor.yaml")
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://github.com/armosec/kubescape/blob/master/core/pkg/hostsensorutils/hostsensor.yaml")
hostF.NoOptDefVal = "true"
hostF.DefValue = "false, for no TTY in stdin"

View File

@@ -3,9 +3,9 @@ package submit
import (
"fmt"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/meta"
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
logger "github.com/dwertent/go-logger"
"github.com/spf13/cobra"
)
@@ -21,7 +21,7 @@ func getExceptionsCmd(ks meta.IKubescape, submitInfo *metav1.Submit) *cobra.Comm
return nil
},
Run: func(cmd *cobra.Command, args []string) {
if err := ks.SubmitExceptions(submitInfo.Account, args[0]); err != nil {
if err := ks.SubmitExceptions(&submitInfo.Credentials, args[0]); err != nil {
logger.L().Fatal(err.Error())
}
},

View File

@@ -4,11 +4,11 @@ import (
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/meta"
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
reporterv1 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v1"
@@ -27,7 +27,7 @@ func getRBACCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
k8s := k8sinterface.NewKubernetesApi()
// get config
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
clusterConfig := getTenantConfig(&submitInfo.Credentials, "", k8s)
if err := clusterConfig.SetTenant(); err != nil {
logger.L().Error("failed setting account ID", helpers.Error(err))
}
@@ -60,9 +60,9 @@ func getKubernetesApi() *k8sinterface.KubernetesApi {
}
return k8sinterface.NewKubernetesApi()
}
func getTenantConfig(Account, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
func getTenantConfig(credentials *cautils.Credentials, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), Account, clusterName)
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), credentials, clusterName)
}
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), Account, clusterName)
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), credentials, clusterName)
}

View File

@@ -7,14 +7,14 @@ import (
"time"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/meta"
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
reporterv1 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v1"
reporterv2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v2"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"github.com/armosec/opa-utils/reporthandling"
"github.com/google/uuid"
@@ -69,7 +69,7 @@ func getResultsCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
k8s := getKubernetesApi()
// get config
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
clusterConfig := getTenantConfig(&submitInfo.Credentials, "", k8s)
if err := clusterConfig.SetTenant(); err != nil {
logger.L().Error("failed setting account ID", helpers.Error(err))
}

View File

@@ -20,7 +20,9 @@ func GetSubmitCmd(ks meta.IKubescape) *cobra.Command {
Run: func(cmd *cobra.Command, args []string) {
},
}
submitCmd.PersistentFlags().StringVarP(&submitInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
submitCmd.PersistentFlags().StringVarP(&submitInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
submitCmd.PersistentFlags().StringVarP(&submitInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
submitCmd.PersistentFlags().StringVarP(&submitInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
submitCmd.AddCommand(getExceptionsCmd(ks, &submitInfo))
submitCmd.AddCommand(getResultsCmd(ks, &submitInfo))

View File

@@ -11,7 +11,7 @@ import (
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
logger "github.com/dwertent/go-logger"
corev1 "k8s.io/api/core/v1"
)
@@ -69,7 +69,10 @@ type ITenantConfig interface {
// getters
GetContextName() string
GetAccountID() string
GetTennatEmail() string
GetTenantEmail() string
GetToken() string
GetClientID() string
GetSecretKey() string
GetConfigObj() *ConfigObj
// GetBackendAPI() getter.IBackend
// GenerateURL()
@@ -87,8 +90,7 @@ type LocalConfig struct {
}
func NewLocalConfig(
backendAPI getter.IBackend, customerGUID, clusterName string) *LocalConfig {
var configObj *ConfigObj
backendAPI getter.IBackend, credentials *Credentials, clusterName string) *LocalConfig {
lc := &LocalConfig{
backendAPI: backendAPI,
@@ -96,20 +98,14 @@ func NewLocalConfig(
}
// get from configMap
if existsConfigFile() { // get from file
configObj, _ = loadConfigFromFile()
} else {
configObj = &ConfigObj{}
}
if configObj != nil {
lc.configObj = configObj
}
if customerGUID != "" {
lc.configObj.AccountID = customerGUID // override config customerGUID
loadConfigFromFile(lc.configObj)
}
updateCredentials(lc.configObj, credentials)
if clusterName != "" {
lc.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
}
getAccountFromEnv(lc.configObj)
lc.backendAPI.SetAccountID(lc.configObj.AccountID)
lc.backendAPI.SetClientID(lc.configObj.ClientID)
@@ -119,9 +115,12 @@ func NewLocalConfig(
}
func (lc *LocalConfig) GetConfigObj() *ConfigObj { return lc.configObj }
func (lc *LocalConfig) GetTennatEmail() string { return lc.configObj.CustomerAdminEMail }
func (lc *LocalConfig) GetTenantEmail() string { return lc.configObj.CustomerAdminEMail }
func (lc *LocalConfig) GetAccountID() string { return lc.configObj.AccountID }
func (lc *LocalConfig) GetClientID() string { return lc.configObj.ClientID }
func (lc *LocalConfig) GetSecretKey() string { return lc.configObj.SecretKey }
func (lc *LocalConfig) GetContextName() string { return lc.configObj.ClusterName }
func (lc *LocalConfig) GetToken() string { return lc.configObj.Token }
func (lc *LocalConfig) IsConfigFound() bool { return existsConfigFile() }
func (lc *LocalConfig) SetTenant() error {
@@ -190,8 +189,8 @@ type ClusterConfig struct {
configObj *ConfigObj
}
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, customerGUID, clusterName string) *ClusterConfig {
var configObj *ConfigObj
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, credentials *Credentials, clusterName string) *ClusterConfig {
// var configObj *ConfigObj
c := &ClusterConfig{
k8s: k8s,
backendAPI: backendAPI,
@@ -200,23 +199,20 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
configMapNamespace: getConfigMapNamespace(),
}
// get from configMap
// first, load from configMap
if c.existsConfigMap() {
configObj, _ = c.loadConfigFromConfigMap()
c.loadConfigFromConfigMap()
}
if configObj == nil && existsConfigFile() { // get from file
configObj, _ = loadConfigFromFile()
}
if configObj != nil {
c.configObj = configObj
}
if customerGUID != "" {
c.configObj.AccountID = customerGUID // override config customerGUID
// second, load from file
if existsConfigFile() { // get from file
loadConfigFromFile(c.configObj)
}
updateCredentials(c.configObj, credentials)
if clusterName != "" {
c.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
}
getAccountFromEnv(c.configObj)
if c.configObj.ClusterName == "" {
c.configObj.ClusterName = AdoptClusterName(k8sinterface.GetContextName())
@@ -234,7 +230,10 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
func (c *ClusterConfig) GetDefaultNS() string { return c.configMapNamespace }
func (c *ClusterConfig) GetAccountID() string { return c.configObj.AccountID }
func (c *ClusterConfig) GetTennatEmail() string { return c.configObj.CustomerAdminEMail }
func (c *ClusterConfig) GetClientID() string { return c.configObj.ClientID }
func (c *ClusterConfig) GetSecretKey() string { return c.configObj.SecretKey }
func (c *ClusterConfig) GetTenantEmail() string { return c.configObj.CustomerAdminEMail }
func (c *ClusterConfig) GetToken() string { return c.configObj.Token }
func (c *ClusterConfig) IsConfigFound() bool { return existsConfigFile() || c.existsConfigMap() }
func (c *ClusterConfig) SetTenant() error {
@@ -282,18 +281,26 @@ func (c *ClusterConfig) ToMapString() map[string]interface{} {
}
return m
}
func (c *ClusterConfig) loadConfigFromConfigMap() (*ConfigObj, error) {
func (c *ClusterConfig) loadConfigFromConfigMap() error {
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
if err != nil {
return nil, err
return err
}
if bData, err := json.Marshal(configMap.Data); err == nil {
return readConfig(bData)
}
return nil, nil
return loadConfigFromData(c.configObj, configMap.Data)
}
func loadConfigFromData(co *ConfigObj, data map[string]string) error {
var e error
if jsonConf, ok := data["config.json"]; ok {
e = readConfig([]byte(jsonConf), co)
}
if bData, err := json.Marshal(data); err == nil {
e = readConfig(bData, co)
}
return e
}
func (c *ClusterConfig) existsConfigMap() bool {
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
// TODO - check if has customerGUID
@@ -411,28 +418,27 @@ func (c *ClusterConfig) updateConfigData(configMap *corev1.ConfigMap) {
}
}
}
func loadConfigFromFile() (*ConfigObj, error) {
func loadConfigFromFile(configObj *ConfigObj) error {
dat, err := os.ReadFile(ConfigFileFullPath())
if err != nil {
return nil, err
return err
}
return readConfig(dat)
return readConfig(dat, configObj)
}
func readConfig(dat []byte) (*ConfigObj, error) {
func readConfig(dat []byte, configObj *ConfigObj) error {
if len(dat) == 0 {
return nil, nil
return nil
}
configObj := &ConfigObj{}
if err := json.Unmarshal(dat, configObj); err != nil {
return nil, err
return err
}
if configObj.AccountID == "" {
configObj.AccountID = configObj.CustomerGUID
}
configObj.CustomerGUID = ""
return configObj, nil
return nil
}
// Check if the customer is submitted
@@ -479,15 +485,34 @@ func getConfigMapNamespace() string {
return "default"
}
func getAccountFromEnv(configObj *ConfigObj) {
func getAccountFromEnv(credentials *Credentials) {
// load from env
if accountID := os.Getenv("KS_ACCOUNT_ID"); accountID != "" {
configObj.AccountID = accountID
if accountID := os.Getenv("KS_ACCOUNT_ID"); credentials.Account == "" && accountID != "" {
credentials.Account = accountID
}
if clientID := os.Getenv("KS_CLIENT_ID"); clientID != "" {
configObj.ClientID = clientID
if clientID := os.Getenv("KS_CLIENT_ID"); credentials.ClientID == "" && clientID != "" {
credentials.ClientID = clientID
}
if secretKey := os.Getenv("KS_SECRET_KEY"); secretKey != "" {
configObj.SecretKey = secretKey
if secretKey := os.Getenv("KS_SECRET_KEY"); credentials.SecretKey == "" && secretKey != "" {
credentials.SecretKey = secretKey
}
}
func updateCredentials(configObj *ConfigObj, credentials *Credentials) {
if credentials == nil {
credentials = &Credentials{}
}
getAccountFromEnv(credentials)
if credentials.Account != "" {
configObj.AccountID = credentials.Account // override config Account
}
if credentials.ClientID != "" {
configObj.ClientID = credentials.ClientID // override config ClientID
}
if credentials.SecretKey != "" {
configObj.SecretKey = credentials.SecretKey // override config SecretKey
}
}

View File

@@ -0,0 +1,193 @@
package cautils
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
)
func mockConfigObj() *ConfigObj {
return &ConfigObj{
AccountID: "aaa",
ClientID: "bbb",
SecretKey: "ccc",
ClusterName: "ddd",
CustomerAdminEMail: "ab@cd",
Token: "eee",
}
}
func mockLocalConfig() *LocalConfig {
return &LocalConfig{
backendAPI: nil,
configObj: mockConfigObj(),
}
}
func mockClusterConfig() *ClusterConfig {
return &ClusterConfig{
backendAPI: nil,
configObj: mockConfigObj(),
}
}
func TestConfig(t *testing.T) {
co := mockConfigObj()
cop := ConfigObj{}
assert.NoError(t, json.Unmarshal(co.Config(), &cop))
assert.Equal(t, co.AccountID, cop.AccountID)
assert.Equal(t, co.ClientID, cop.ClientID)
assert.Equal(t, co.SecretKey, cop.SecretKey)
assert.Equal(t, "", cop.ClusterName) // Not copied to bytes
assert.Equal(t, "", cop.CustomerAdminEMail) // Not copied to bytes
assert.Equal(t, "", cop.Token) // Not copied to bytes
}
func TestITenantConfig(t *testing.T) {
var lc ITenantConfig
var c ITenantConfig
lc = mockLocalConfig()
c = mockClusterConfig()
co := mockConfigObj()
// test LocalConfig methods
assert.Equal(t, co.AccountID, lc.GetAccountID())
assert.Equal(t, co.ClientID, lc.GetClientID())
assert.Equal(t, co.SecretKey, lc.GetSecretKey())
assert.Equal(t, co.ClusterName, lc.GetContextName())
assert.Equal(t, co.CustomerAdminEMail, lc.GetTenantEmail())
assert.Equal(t, co.Token, lc.GetToken())
// test ClusterConfig methods
assert.Equal(t, co.AccountID, c.GetAccountID())
assert.Equal(t, co.ClientID, c.GetClientID())
assert.Equal(t, co.SecretKey, c.GetSecretKey())
assert.Equal(t, co.ClusterName, c.GetContextName())
assert.Equal(t, co.CustomerAdminEMail, c.GetTenantEmail())
assert.Equal(t, co.Token, c.GetToken())
}
func TestUpdateConfigData(t *testing.T) {
c := mockClusterConfig()
configMap := &corev1.ConfigMap{}
c.updateConfigData(configMap)
assert.Equal(t, c.GetAccountID(), configMap.Data["accountID"])
assert.Equal(t, c.GetClientID(), configMap.Data["clientID"])
assert.Equal(t, c.GetSecretKey(), configMap.Data["secretKey"])
}
func TestReadConfig(t *testing.T) {
com := mockConfigObj()
co := &ConfigObj{}
b, e := json.Marshal(com)
assert.NoError(t, e)
readConfig(b, co)
assert.Equal(t, com.AccountID, co.AccountID)
assert.Equal(t, com.ClientID, co.ClientID)
assert.Equal(t, com.SecretKey, co.SecretKey)
assert.Equal(t, com.ClusterName, co.ClusterName)
assert.Equal(t, com.CustomerAdminEMail, co.CustomerAdminEMail)
assert.Equal(t, com.Token, co.Token)
}
func TestLoadConfigFromData(t *testing.T) {
// use case: all data is in base config
{
c := mockClusterConfig()
co := mockConfigObj()
configMap := &corev1.ConfigMap{}
c.updateConfigData(configMap)
c.configObj = &ConfigObj{}
loadConfigFromData(c.configObj, configMap.Data)
assert.Equal(t, c.GetAccountID(), co.AccountID)
assert.Equal(t, c.GetClientID(), co.ClientID)
assert.Equal(t, c.GetSecretKey(), co.SecretKey)
assert.Equal(t, c.GetContextName(), co.ClusterName)
assert.Equal(t, c.GetTenantEmail(), co.CustomerAdminEMail)
assert.Equal(t, c.GetToken(), co.Token)
}
// use case: all data is in config.json
{
c := mockClusterConfig()
co := mockConfigObj()
configMap := &corev1.ConfigMap{
Data: make(map[string]string),
}
configMap.Data["config.json"] = string(c.GetConfigObj().Config())
c.configObj = &ConfigObj{}
loadConfigFromData(c.configObj, configMap.Data)
assert.Equal(t, c.GetAccountID(), co.AccountID)
assert.Equal(t, c.GetClientID(), co.ClientID)
assert.Equal(t, c.GetSecretKey(), co.SecretKey)
}
// use case: some data is in config.json
{
c := mockClusterConfig()
configMap := &corev1.ConfigMap{
Data: make(map[string]string),
}
// add to map
configMap.Data["clientID"] = c.configObj.ClientID
configMap.Data["secretKey"] = c.configObj.SecretKey
// delete the content
c.configObj.ClientID = ""
c.configObj.SecretKey = ""
configMap.Data["config.json"] = string(c.GetConfigObj().Config())
loadConfigFromData(c.configObj, configMap.Data)
assert.NotEmpty(t, c.GetAccountID())
assert.NotEmpty(t, c.GetClientID())
assert.NotEmpty(t, c.GetSecretKey())
}
// use case: some data is in config.json
{
c := mockClusterConfig()
configMap := &corev1.ConfigMap{
Data: make(map[string]string),
}
c.configObj.AccountID = "tttt"
// add to map
configMap.Data["accountID"] = mockConfigObj().AccountID
configMap.Data["clientID"] = c.configObj.ClientID
configMap.Data["secretKey"] = c.configObj.SecretKey
// delete the content
c.configObj.ClientID = ""
c.configObj.SecretKey = ""
configMap.Data["config.json"] = string(c.GetConfigObj().Config())
loadConfigFromData(c.configObj, configMap.Data)
assert.Equal(t, mockConfigObj().AccountID, c.GetAccountID())
assert.NotEmpty(t, c.GetClientID())
assert.NotEmpty(t, c.GetSecretKey())
}
}

View File

@@ -19,7 +19,7 @@ type OPASessionObj struct {
Policies []reporthandling.Framework // list of frameworks to scan
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
ResourceSource map[string]string // resources sources, map[<rtesource ID>]<resource result>
ResourceSource map[string]reporthandling.Source // resources sources, map[<rtesource ID>]<resource result>
PostureReport *reporthandling.PostureReport // scan results v1 - Remove
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
@@ -39,7 +39,7 @@ func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SRe
ResourcesResult: make(map[string]resourcesresults.Result),
InfoMap: make(map[string]apis.StatusInfo),
ResourceToControlsMap: make(map[string][]string),
ResourceSource: make(map[string]string),
ResourceSource: make(map[string]reporthandling.Source),
SessionID: scanInfo.ScanID,
PostureReport: &reporthandling.PostureReport{
ClusterName: ClusterName,
@@ -62,6 +62,11 @@ func NewOPASessionObjMock() *OPASessionObj {
ReportID: "",
JobID: "",
},
Metadata: &reporthandlingv2.Metadata{
ScanMetadata: reporthandlingv2.ScanMetadata{
ScanningTarget: 0,
},
},
}
}

View File

@@ -9,8 +9,12 @@ import (
"strings"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/dwertent/go-logger/helpers"
"github.com/armosec/opa-utils/objectsenvelopes"
"github.com/armosec/opa-utils/objectsenvelopes/localworkload"
logger "github.com/dwertent/go-logger"
"gopkg.in/yaml.v2"
)
@@ -26,23 +30,55 @@ const (
JSON_FILE_FORMAT FileFormat = "json"
)
func LoadResourcesFromFiles(inputPatterns []string) (map[string][]workloadinterface.IMetadata, error) {
files, errs := listFiles(inputPatterns)
// LoadResourcesFromHelmCharts scans a given path (recuresively) for helm charts, renders the templates and returns a map of workloads and a map of chart names
func LoadResourcesFromHelmCharts(basePath string) (map[string][]workloadinterface.IMetadata, map[string]string) {
directories, _ := listDirs(basePath)
helmDirectories := make([]string, 0)
for _, dir := range directories {
if ok, _ := IsHelmDirectory(dir); ok {
helmDirectories = append(helmDirectories, dir)
}
}
sourceToWorkloads := map[string][]workloadinterface.IMetadata{}
sourceToChartName := map[string]string{}
for _, helmDir := range helmDirectories {
chart, err := NewHelmChart(helmDir)
if err == nil {
wls, errs := chart.GetWorkloadsWithDefaultValues()
if len(errs) > 0 {
logger.L().Error(fmt.Sprintf("Rendering of Helm chart template failed: %v", errs))
continue
}
chartName := chart.GetName()
for k, v := range wls {
sourceToWorkloads[k] = v
sourceToChartName[k] = chartName
}
}
}
return sourceToWorkloads, sourceToChartName
}
func LoadResourcesFromFiles(input, rootPath string) map[string][]workloadinterface.IMetadata {
files, errs := listFiles(input)
if len(errs) > 0 {
logger.L().Error(fmt.Sprintf("%v", errs))
}
if len(files) == 0 {
return nil, nil
return nil
}
workloads, errs := loadFiles(files)
workloads, errs := loadFiles(rootPath, files)
if len(errs) > 0 {
logger.L().Error(fmt.Sprintf("%v", errs))
}
return workloads, nil
return workloads
}
func loadFiles(filePaths []string) (map[string][]workloadinterface.IMetadata, []error) {
func loadFiles(rootPath string, filePaths []string) (map[string][]workloadinterface.IMetadata, []error) {
workloads := make(map[string][]workloadinterface.IMetadata, 0)
errs := []error{}
for i := range filePaths {
@@ -51,15 +87,30 @@ func loadFiles(filePaths []string) (map[string][]workloadinterface.IMetadata, []
errs = append(errs, err)
continue
}
if len(f) == 0 {
continue // empty file
}
w, e := ReadFile(f, GetFileFormat(filePaths[i]))
errs = append(errs, e...)
if w != nil {
if _, ok := workloads[filePaths[i]]; !ok {
workloads[filePaths[i]] = []workloadinterface.IMetadata{}
if e != nil {
logger.L().Debug("failed to read file", helpers.String("file", filePaths[i]), helpers.Error(e))
}
if len(w) != 0 {
path := filePaths[i]
if _, ok := workloads[path]; !ok {
workloads[path] = []workloadinterface.IMetadata{}
}
wSlice := workloads[filePaths[i]]
wSlice = append(wSlice, w...)
workloads[filePaths[i]] = wSlice
wSlice := workloads[path]
for j := range w {
lw := localworkload.NewLocalWorkload(w[j].GetObject())
if relPath, err := filepath.Rel(rootPath, path); err == nil {
lw.SetPath(relPath)
} else {
lw.SetPath(path)
}
wSlice = append(wSlice, lw)
}
workloads[path] = wSlice
}
}
return workloads, errs
@@ -68,46 +119,65 @@ func loadFiles(filePaths []string) (map[string][]workloadinterface.IMetadata, []
func loadFile(filePath string) ([]byte, error) {
return os.ReadFile(filePath)
}
func ReadFile(fileContent []byte, fileFromat FileFormat) ([]workloadinterface.IMetadata, []error) {
func ReadFile(fileContent []byte, fileFormat FileFormat) ([]workloadinterface.IMetadata, error) {
switch fileFromat {
switch fileFormat {
case YAML_FILE_FORMAT:
return readYamlFile(fileContent)
case JSON_FILE_FORMAT:
return readJsonFile(fileContent)
default:
return nil, nil // []error{fmt.Errorf("file extension %s not supported", fileFromat)}
return nil, nil
}
}
func listFiles(patterns []string) ([]string, []error) {
files := []string{}
errs := []error{}
for i := range patterns {
if strings.HasPrefix(patterns[i], "http") {
continue
}
if !filepath.IsAbs(patterns[i]) {
o, _ := os.Getwd()
patterns[i] = filepath.Join(o, patterns[i])
}
if IsFile(patterns[i]) {
files = append(files, patterns[i])
} else {
f, err := glob(filepath.Split(patterns[i])) //filepath.Glob(patterns[i])
if err != nil {
errs = append(errs, err)
} else {
files = append(files, f...)
}
}
}
return files, errs
// listFiles returns the list of absolute paths, full file path and list of errors. The list of abs paths and full path have the same length
func listFiles(pattern string) ([]string, []error) {
return listFilesOrDirectories(pattern, false)
}
func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, []error) {
// listDirs returns the list of absolute paths, full directories path and list of errors. The list of abs paths and full path have the same length
func listDirs(pattern string) ([]string, []error) {
return listFilesOrDirectories(pattern, true)
}
func listFilesOrDirectories(pattern string, onlyDirectories bool) ([]string, []error) {
var paths []string
errs := []error{}
if !filepath.IsAbs(pattern) {
o, _ := os.Getwd()
pattern = filepath.Join(o, pattern)
}
if !onlyDirectories && IsFile(pattern) {
paths = append(paths, pattern)
return paths, errs
}
root, shouldMatch := filepath.Split(pattern)
if IsDir(pattern) {
root = pattern
shouldMatch = "*"
}
if shouldMatch == "" {
shouldMatch = "*"
}
f, err := glob(root, shouldMatch, onlyDirectories)
if err != nil {
errs = append(errs, err)
} else {
paths = append(paths, f...)
}
return paths, errs
}
func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, error) {
defer recover()
r := bytes.NewReader(yamlFile)
dec := yaml.NewDecoder(r)
yamlObjs := []workloadinterface.IMetadata{}
@@ -126,19 +196,17 @@ func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, []error) {
yamlObjs = append(yamlObjs, o)
}
}
} else {
errs = append(errs, fmt.Errorf("failed to convert yaml file to map[string]interface, file content: %v", j))
}
}
return yamlObjs, errs
return yamlObjs, nil
}
func readJsonFile(jsonFile []byte) ([]workloadinterface.IMetadata, []error) {
func readJsonFile(jsonFile []byte) ([]workloadinterface.IMetadata, error) {
workloads := []workloadinterface.IMetadata{}
var jsonObj interface{}
if err := json.Unmarshal(jsonFile, &jsonObj); err != nil {
return workloads, []error{err}
return workloads, err
}
convertJsonToWorkload(jsonObj, &workloads)
@@ -184,21 +252,40 @@ func IsJson(filePath string) bool {
return StringInSlice(JSON_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
}
func glob(root, pattern string) ([]string, error) {
func glob(root, pattern string, onlyDirectories bool) ([]string, error) {
var matches []string
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// listing only directotries
if onlyDirectories {
if info.IsDir() {
if matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {
return err
} else if matched {
matches = append(matches, path)
}
}
return nil
}
// listing only files
if info.IsDir() {
return nil
}
fileFormat := GetFileFormat(path)
if !(fileFormat == JSON_FILE_FORMAT || fileFormat == YAML_FILE_FORMAT) {
return nil
}
if matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {
return err
} else if matched {
matches = append(matches, path)
}
return nil
})
if err != nil {
@@ -206,6 +293,8 @@ func glob(root, pattern string) ([]string, error) {
}
return matches, nil
}
// IsFile checks if a given path is a file
func IsFile(name string) bool {
if fi, err := os.Stat(name); err == nil {
if fi.Mode().IsRegular() {
@@ -215,6 +304,16 @@ func IsFile(name string) bool {
return false
}
// IsDir checks if a given path is a directory
func IsDir(name string) bool {
if info, err := os.Stat(name); err == nil {
if info.IsDir() {
return true
}
}
return false
}
func GetFileFormat(filePath string) FileFormat {
if IsYaml(filePath) {
return YAML_FILE_FORMAT

View File

@@ -6,59 +6,100 @@ import (
"strings"
"testing"
"github.com/armosec/opa-utils/objectsenvelopes/localworkload"
"github.com/stretchr/testify/assert"
)
func onlineBoutiquePath() string {
o, _ := os.Getwd()
return filepath.Join(filepath.Dir(o), "../examples/online-boutique/*")
return filepath.Join(filepath.Dir(o), "..", "examples", "online-boutique")
}
func helmChartPath() string {
o, _ := os.Getwd()
return filepath.Join(filepath.Dir(o), "..", "examples", "helm_chart")
}
func TestListFiles(t *testing.T) {
filesPath := onlineBoutiquePath()
files, errs := listFiles([]string{filesPath})
files, errs := listFiles(filesPath)
assert.Equal(t, 0, len(errs))
assert.Equal(t, 12, len(files))
}
func TestLoadResourcesFromFiles(t *testing.T) {
workloads, err := LoadResourcesFromFiles([]string{onlineBoutiquePath()})
assert.NoError(t, err)
workloads := LoadResourcesFromFiles(onlineBoutiquePath(), "")
assert.Equal(t, 12, len(workloads))
for i, w := range workloads {
switch filepath.Base(i) {
case "adservice.yaml":
assert.Equal(t, 2, len(w))
assert.Equal(t, "apps/v1//Deployment/adservice", w[0].GetID())
assert.Equal(t, "/v1//Service/adservice", w[1].GetID())
assert.Equal(t, "apps/v1//Deployment/adservice", getRelativePath(w[0].GetID()))
assert.Equal(t, "/v1//Service/adservice", getRelativePath(w[1].GetID()))
}
}
}
func TestLoadResourcesFromHelmCharts(t *testing.T) {
sourceToWorkloads, sourceToChartName := LoadResourcesFromHelmCharts(helmChartPath())
assert.Equal(t, 6, len(sourceToWorkloads))
for file, workloads := range sourceToWorkloads {
assert.Equalf(t, 1, len(workloads), "expected 1 workload in file %s", file)
w := workloads[0]
assert.True(t, localworkload.IsTypeLocalWorkload(w.GetObject()), "Expected localworkload as object type")
assert.Equal(t, "kubescape", sourceToChartName[file])
switch filepath.Base(file) {
case "serviceaccount.yaml":
assert.Equal(t, "/v1//ServiceAccount/kubescape-discovery", getRelativePath(w.GetID()))
case "clusterrole.yaml":
assert.Equal(t, "rbac.authorization.k8s.io/v1//ClusterRole/-kubescape", getRelativePath(w.GetID()))
case "cronjob.yaml":
assert.Equal(t, "batch/v1//CronJob/-kubescape", getRelativePath(w.GetID()))
case "role.yaml":
assert.Equal(t, "rbac.authorization.k8s.io/v1//Role/-kubescape", getRelativePath(w.GetID()))
case "rolebinding.yaml":
assert.Equal(t, "rbac.authorization.k8s.io/v1//RoleBinding/-kubescape", getRelativePath(w.GetID()))
case "clusterrolebinding.yaml":
assert.Equal(t, "rbac.authorization.k8s.io/v1//ClusterRoleBinding/-kubescape", getRelativePath(w.GetID()))
default:
assert.Failf(t, "missing case for file: %s", filepath.Base(file))
}
}
}
func TestLoadFiles(t *testing.T) {
files, _ := listFiles([]string{onlineBoutiquePath()})
_, err := loadFiles(files)
files, _ := listFiles(onlineBoutiquePath())
_, err := loadFiles("", files)
assert.Equal(t, 0, len(err))
}
func TestListDirs(t *testing.T) {
dirs, _ := listDirs(filepath.Join(onlineBoutiquePath(), "adservice.yaml"))
assert.Equal(t, 0, len(dirs))
expectedDirs := []string{filepath.Join("examples", "helm_chart"), filepath.Join("examples", "helm_chart", "templates")}
dirs, _ = listDirs(helmChartPath())
assert.Equal(t, len(expectedDirs), len(dirs))
for i := range expectedDirs {
assert.Contains(t, dirs[i], expectedDirs[i])
}
}
func TestLoadFile(t *testing.T) {
files, _ := listFiles([]string{strings.Replace(onlineBoutiquePath(), "*", "adservice.yaml", 1)})
files, _ := listFiles(filepath.Join(onlineBoutiquePath(), "adservice.yaml"))
assert.Equal(t, 1, len(files))
_, err := loadFile(files[0])
assert.NoError(t, err)
}
func TestMapResources(t *testing.T) {
// policyHandler := &PolicyHandler{}
// k8sResources, err := policyHandler.loadResources(opaSessionObj.Frameworks, scanInfo)
// files, _ := listFiles([]string{onlineBoutiquePath()})
// bb, err := loadFile(files[0])
// if len(err) > 0 {
// t.Errorf("%v", err)
// }
// for i := range bb {
// t.Errorf("%s", bb[i].ToString())
// }
func getRelativePath(p string) string {
pp := strings.SplitAfter(p, "api=")
return pp[1]
}

View File

@@ -10,9 +10,9 @@ import (
"time"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/opa-utils/reporthandling"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
// =======================================================================================================================
@@ -23,19 +23,19 @@ var (
// ATTENTION!!!
// Changes in this URLs variable names, or in the usage is affecting the build process! BE CAREFUL
armoERURL = "report.armo.cloud"
armoBEURL = "api.armo.cloud"
armoFEURL = "portal.armo.cloud"
armoAUTHURL = "auth.armo.cloud"
armoBEURL = "api.armosec.io"
armoFEURL = "cloud.armosec.io"
armoAUTHURL = "auth.armosec.io"
armoStageERURL = "report-ks.eustage2.cyberarmorsoft.com"
armoStageBEURL = "api-stage.armo.cloud"
armoStageFEURL = "armoui.eustage2.cyberarmorsoft.com"
armoStageAUTHURL = "eggauth.eustage2.cyberarmorsoft.com"
armoStageBEURL = "api-stage.armosec.io"
armoStageFEURL = "armoui-stage.armosec.io"
armoStageAUTHURL = "eggauth-stage.armosec.io"
armoDevERURL = "report.eudev3.cyberarmorsoft.com"
armoDevBEURL = "api-dev.armo.cloud"
armoDevFEURL = "armoui-dev.eudev3.cyberarmorsoft.com"
armoDevAUTHURL = "eggauth.eudev3.cyberarmorsoft.com"
armoDevBEURL = "api-dev.armosec.io"
armoDevFEURL = "cloud-dev.armosec.io"
armoDevAUTHURL = "eggauth-dev.armosec.io"
)
// Armo API for downloading policies
@@ -147,7 +147,8 @@ func (armoAPI *ArmoAPI) IsLoggedIn() bool { return armoAPI.loggedIn
func (armoAPI *ArmoAPI) GetClientID() string { return armoAPI.clientID }
func (armoAPI *ArmoAPI) GetSecretKey() string { return armoAPI.secretKey }
func (armoAPI *ArmoAPI) GetFrontendURL() string { return armoAPI.feURL }
func (armoAPI *ArmoAPI) GetAPIURL() string { return armoAPI.apiURL }
func (armoAPI *ArmoAPI) GetApiURL() string { return armoAPI.apiURL }
func (armoAPI *ArmoAPI) GetAuthURL() string { return armoAPI.authURL }
func (armoAPI *ArmoAPI) GetReportReceiverURL() string { return armoAPI.erURL }
func (armoAPI *ArmoAPI) SetAccountID(accountID string) { armoAPI.accountID = accountID }
func (armoAPI *ArmoAPI) SetClientID(clientID string) { armoAPI.clientID = clientID }

View File

@@ -13,8 +13,7 @@ var NativeFrameworks = []string{"nsa", "mitre", "armobest", "devopsbest"}
func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
u.Path = "api/v1/armoFrameworks"
q := u.Query()
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
@@ -31,8 +30,7 @@ func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
func (armoAPI *ArmoAPI) getListFrameworkURL() string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
u.Path = "api/v1/armoFrameworks"
q := u.Query()
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
@@ -42,8 +40,7 @@ func (armoAPI *ArmoAPI) getListFrameworkURL() string {
}
func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
u.Path = "api/v1/armoPostureExceptions"
q := u.Query()
@@ -58,8 +55,7 @@ func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
func (armoAPI *ArmoAPI) exceptionsURL(exceptionsPolicyName string) string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
u.Path = "api/v1/postureExceptionPolicy"
q := u.Query()
@@ -81,8 +77,7 @@ func (armoAPI *ArmoAPI) getAccountConfigDefault(clusterName string) string {
func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
u.Path = "api/v1/armoCustomerConfiguration"
q := u.Query()
@@ -97,24 +92,21 @@ func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
func (armoAPI *ArmoAPI) getAccountURL() string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
u.Path = "api/v1/createTenant"
return u.String()
}
func (armoAPI *ArmoAPI) getApiToken() string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.authURL
u.Path = "frontegg/identity/resources/auth/v1/api-token"
u.Scheme, u.Host = parseHost(armoAPI.GetAuthURL())
u.Path = "identity/resources/auth/v1/api-token"
return u.String()
}
func (armoAPI *ArmoAPI) getOpenidCustomers() string {
u := url.URL{}
u.Scheme = "https"
u.Host = armoAPI.apiURL
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
u.Path = "api/v1/openid_customers"
return u.String()
}
@@ -173,3 +165,12 @@ func (armoAPI *ArmoAPI) getCustomerGUIDFallBack() string {
}
return "11111111-1111-1111-1111-111111111111"
}
func parseHost(host string) (string, string) {
if strings.HasPrefix(host, "http://") {
return "http", strings.Replace(host, "http://", "", 1)
}
// default scheme
return "https", strings.Replace(host, "https://", "", 1)
}

92
core/cautils/helmchart.go Normal file
View File

@@ -0,0 +1,92 @@
package cautils
import (
"path/filepath"
"strings"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/opa-utils/objectsenvelopes/localworkload"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
helmchart "helm.sh/helm/v3/pkg/chart"
helmloader "helm.sh/helm/v3/pkg/chart/loader"
helmchartutil "helm.sh/helm/v3/pkg/chartutil"
helmengine "helm.sh/helm/v3/pkg/engine"
)
type HelmChart struct {
chart *helmchart.Chart
path string
}
func IsHelmDirectory(path string) (bool, error) {
return helmchartutil.IsChartDir(path)
}
func NewHelmChart(path string) (*HelmChart, error) {
chart, err := helmloader.Load(path)
if err != nil {
return nil, err
}
return &HelmChart{
chart: chart,
path: path,
}, nil
}
func (hc *HelmChart) GetName() string {
return hc.chart.Name()
}
func (hc *HelmChart) GetDefaultValues() map[string]interface{} {
return hc.chart.Values
}
// GetWorkloads renders chart template using the default values and returns a map of source file to its workloads
func (hc *HelmChart) GetWorkloadsWithDefaultValues() (map[string][]workloadinterface.IMetadata, []error) {
return hc.GetWorkloads(hc.GetDefaultValues())
}
// GetWorkloads renders chart template using the provided values and returns a map of source (absolute) file path to its workloads
func (hc *HelmChart) GetWorkloads(values map[string]interface{}) (map[string][]workloadinterface.IMetadata, []error) {
vals, err := helmchartutil.ToRenderValues(hc.chart, values, helmchartutil.ReleaseOptions{}, nil)
if err != nil {
return nil, []error{err}
}
sourceToFile, err := helmengine.Render(hc.chart, vals)
if err != nil {
return nil, []error{err}
}
workloads := make(map[string][]workloadinterface.IMetadata, 0)
errs := []error{}
for path, renderedYaml := range sourceToFile {
if !IsYaml(strings.ToLower(path)) {
continue
}
wls, e := ReadFile([]byte(renderedYaml), YAML_FILE_FORMAT)
if e != nil {
logger.L().Debug("failed to read rendered yaml file", helpers.String("file", path), helpers.Error(e))
}
if len(wls) == 0 {
continue
}
// separate base path and file name. We do not use the os.Separator because the paths returned from the helm engine are not OS specific (e.g. mychart/templates/myfile.yaml)
if firstPathSeparatorIndex := strings.Index(path, string("/")); firstPathSeparatorIndex != -1 {
absPath := filepath.Join(hc.path, path[firstPathSeparatorIndex:])
workloads[absPath] = []workloadinterface.IMetadata{}
for i := range wls {
lw := localworkload.NewLocalWorkload(wls[i].GetObject())
lw.SetPath(absPath)
workloads[absPath] = append(workloads[absPath], lw)
}
}
}
return workloads, errs
}

View File

@@ -0,0 +1,133 @@
package cautils
import (
_ "embed"
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/armosec/opa-utils/objectsenvelopes/localworkload"
"github.com/stretchr/testify/suite"
)
type HelmChartTestSuite struct {
suite.Suite
helmChartPath string
expectedFiles []string
expectedDefaultValues map[string]interface{}
}
func TestHelmChartTestSuite(t *testing.T) {
suite.Run(t, new(HelmChartTestSuite))
}
func (s *HelmChartTestSuite) SetupSuite() {
o, _ := os.Getwd()
s.helmChartPath = filepath.Join(filepath.Dir(o), "..", "examples", "helm_chart")
s.expectedFiles = []string{
filepath.Join(s.helmChartPath, "templates", "clusterrolebinding.yaml"),
filepath.Join(s.helmChartPath, "templates", "clusterrole.yaml"),
filepath.Join(s.helmChartPath, "templates", "serviceaccount.yaml"),
filepath.Join(s.helmChartPath, "templates", "rolebinding.yaml"),
filepath.Join(s.helmChartPath, "templates", "role.yaml"),
filepath.Join(s.helmChartPath, "templates", "cronjob.yaml"),
}
var obj interface{}
file, _ := ioutil.ReadFile(filepath.Join("testdata", "helm_expected_default_values.json"))
_ = json.Unmarshal([]byte(file), &obj)
s.expectedDefaultValues = obj.(map[string]interface{})
}
func (s *HelmChartTestSuite) TestInvalidHelmDirectory() {
_, err := NewHelmChart("/invalid_path")
s.Error(err)
}
func (s *HelmChartTestSuite) TestValidHelmDirectory() {
chart, err := NewHelmChart(s.helmChartPath)
s.NoError(err)
s.NotNil(chart)
}
func (s *HelmChartTestSuite) TestGetName() {
chart, _ := NewHelmChart(s.helmChartPath)
s.Equal("kubescape", chart.GetName())
}
func (s *HelmChartTestSuite) TestGetDefaultValues() {
chart, _ := NewHelmChart(s.helmChartPath)
values := chart.GetDefaultValues()
valuesJson, _ := json.Marshal(values)
expectedValuesJson, _ := json.Marshal(s.expectedDefaultValues)
s.JSONEq(string(valuesJson), string(expectedValuesJson))
}
func (s *HelmChartTestSuite) TestGetWorkloadsWithOverride() {
chart, err := NewHelmChart(s.helmChartPath)
s.NoError(err, "Expected a valid helm chart")
values := chart.GetDefaultValues()
// Default pullPolicy value = Always
pullPolicyValue := values["image"].(map[string]interface{})["pullPolicy"].(string)
s.Equal(pullPolicyValue, "Always")
// Override default value
values["image"].(map[string]interface{})["pullPolicy"] = "Never"
fileToWorkloads, errs := chart.GetWorkloads(values)
s.Len(errs, 0)
s.Lenf(fileToWorkloads, len(s.expectedFiles), "Expected %d files", len(s.expectedFiles))
for _, expectedFile := range s.expectedFiles {
s.Contains(fileToWorkloads, expectedFile)
s.FileExists(expectedFile)
s.GreaterOrEqualf(len(fileToWorkloads[expectedFile]), 1, "Expected at least one workload in %q", expectedFile)
for i := range fileToWorkloads[expectedFile] {
pathInWorkload := fileToWorkloads[expectedFile][i].(*localworkload.LocalWorkload).GetPath()
s.Equal(pathInWorkload, expectedFile, "Expected GetPath() to return a valid path on workload")
}
if strings.Contains(expectedFile, "cronjob.yaml") {
jsonBytes, _ := json.Marshal(fileToWorkloads[expectedFile][0].GetObject())
s.Contains(string(jsonBytes), "\"imagePullPolicy\":\"Never\"", "Expected to overriden value of imagePullPolicy to be 'Never'")
}
}
}
func (s *HelmChartTestSuite) TestGetWorkloadsMissingValue() {
chart, _ := NewHelmChart(s.helmChartPath)
values := chart.GetDefaultValues()
delete(values, "image")
fileToWorkloads, errs := chart.GetWorkloads(values)
s.Nil(fileToWorkloads)
s.Len(errs, 1, "Expected an error due to missing value")
expectedErrMsg := "<.Values.image.repository>: nil pointer"
s.Containsf(errs[0].Error(), expectedErrMsg, "expected error containing %q, got %q", expectedErrMsg, errs[0])
}
func (s *HelmChartTestSuite) TestIsHelmDirectory() {
ok, err := IsHelmDirectory(s.helmChartPath)
s.True(ok)
s.NoError(err)
o, _ := os.Getwd()
nonHelmDir := filepath.Join(filepath.Dir(o), "../examples/online-boutique")
ok, err = IsHelmDirectory(nonHelmDir)
s.False(ok)
s.Contains(err.Error(), "no Chart.yaml exists in directory")
}

View File

@@ -0,0 +1,246 @@
package cautils
import (
"fmt"
"path"
"strings"
"time"
"github.com/armosec/go-git-url/apis"
gitv5 "github.com/go-git/go-git/v5"
configv5 "github.com/go-git/go-git/v5/config"
plumbingv5 "github.com/go-git/go-git/v5/plumbing"
git2go "github.com/libgit2/git2go/v33"
)
type LocalGitRepository struct {
goGitRepo *gitv5.Repository
git2GoRepo *git2go.Repository
head *plumbingv5.Reference
config *configv5.Config
fileToLastCommit map[string]*git2go.Commit
}
func NewLocalGitRepository(path string) (*LocalGitRepository, error) {
goGitRepo, err := gitv5.PlainOpenWithOptions(path, &gitv5.PlainOpenOptions{DetectDotGit: true})
if err != nil {
return nil, err
}
head, err := goGitRepo.Head()
if err != nil {
return nil, err
}
if !head.Name().IsBranch() {
return nil, fmt.Errorf("current HEAD reference is not a branch")
}
config, err := goGitRepo.Config()
if err != nil {
return nil, err
}
if len(config.Remotes) == 0 {
return nil, fmt.Errorf("no remotes found")
}
l := &LocalGitRepository{
goGitRepo: goGitRepo,
head: head,
config: config,
}
if repoRoot, err := l.GetRootDir(); err == nil {
git2GoRepo, err := git2go.OpenRepository(repoRoot)
if err != nil {
return l, err
}
l.git2GoRepo = git2GoRepo
}
return l, nil
}
// GetBranchName get current branch name
func (g *LocalGitRepository) GetBranchName() string {
return g.head.Name().Short()
}
// GetRemoteUrl get default remote URL
func (g *LocalGitRepository) GetRemoteUrl() (string, error) {
branchName := g.GetBranchName()
if branchRef, branchFound := g.config.Branches[branchName]; branchFound {
remoteName := branchRef.Remote
if len(g.config.Remotes[remoteName].URLs) == 0 {
return "", fmt.Errorf("expected to find URLs for remote '%s', branch '%s'", remoteName, branchName)
}
return g.config.Remotes[remoteName].URLs[0], nil
}
const defaultRemoteName string = "origin"
if len(g.config.Remotes[defaultRemoteName].URLs) == 0 {
return "", fmt.Errorf("expected to find URLs for remote '%s'", defaultRemoteName)
}
return g.config.Remotes[defaultRemoteName].URLs[0], nil
}
// GetName get origin name without the .git suffix
func (g *LocalGitRepository) GetName() (string, error) {
originUrl, err := g.GetRemoteUrl()
if err != nil {
return "", err
}
baseName := path.Base(originUrl)
// remove .git
return strings.TrimSuffix(baseName, ".git"), nil
}
// GetLastCommit get latest commit object
func (g *LocalGitRepository) GetLastCommit() (*apis.Commit, error) {
cIter, err := g.goGitRepo.Log(&gitv5.LogOptions{})
if err != nil {
return nil, err
}
commit, err := cIter.Next()
defer cIter.Close()
if err != nil {
return nil, err
}
return &apis.Commit{
SHA: commit.Hash.String(),
Author: apis.Committer{
Name: commit.Author.Name,
Email: commit.Author.Email,
Date: commit.Author.When,
},
Message: commit.Message,
Committer: apis.Committer{},
Files: []apis.Files{},
}, nil
}
func (g *LocalGitRepository) getAllCommits() ([]*git2go.Commit, error) {
logItr, itrErr := g.git2GoRepo.Walk()
if itrErr != nil {
return nil, itrErr
}
pushErr := logItr.PushHead()
if pushErr != nil {
return nil, pushErr
}
var allCommits []*git2go.Commit
err := logItr.Iterate(func(commit *git2go.Commit) bool {
if commit != nil {
allCommits = append(allCommits, commit)
return true
}
return false
})
if err != nil {
return nil, err
}
if err != nil {
return nil, err
}
return allCommits, nil
}
func (g *LocalGitRepository) GetFileLastCommit(filePath string) (*apis.Commit, error) {
if len(g.fileToLastCommit) == 0 {
filePathToCommitTime := map[string]time.Time{}
filePathToCommit := map[string]*git2go.Commit{}
allCommits, _ := g.getAllCommits()
// builds a map of all files to their last commit
for _, commit := range allCommits {
// Ignore merge commits (2+ parents)
if commit.ParentCount() <= 1 {
tree, err := commit.Tree()
if err != nil {
continue
}
// ParentCount can be either 1 or 0 (initial commit)
// In case it's the initial commit, prevTree is nil
var prevTree *git2go.Tree
if commit.ParentCount() == 1 {
prevCommit := commit.Parent(0)
prevTree, err = prevCommit.Tree()
if err != nil {
continue
}
}
diff, err := g.git2GoRepo.DiffTreeToTree(prevTree, tree, nil)
if err != nil {
continue
}
numDeltas, err := diff.NumDeltas()
if err != nil {
continue
}
for i := 0; i < numDeltas; i++ {
delta, err := diff.Delta(i)
if err != nil {
continue
}
deltaFilePath := delta.NewFile.Path
commitTime := commit.Author().When
// In case we have the commit information for the file which is not the latest - we override it
if currentCommitTime, exists := filePathToCommitTime[deltaFilePath]; exists {
if currentCommitTime.Before(commitTime) {
filePathToCommitTime[deltaFilePath] = commitTime
filePathToCommit[deltaFilePath] = commit
}
} else {
filePathToCommitTime[deltaFilePath] = commitTime
filePathToCommit[deltaFilePath] = commit
}
}
}
}
g.fileToLastCommit = filePathToCommit
}
if relevantCommit, exists := g.fileToLastCommit[filePath]; exists {
return g.getCommit(relevantCommit), nil
}
return nil, fmt.Errorf("failed to get commit information for file: %s", filePath)
}
func (g *LocalGitRepository) getCommit(commit *git2go.Commit) *apis.Commit {
return &apis.Commit{
SHA: commit.Id().String(),
Author: apis.Committer{
Name: commit.Author().Name,
Email: commit.Author().Email,
Date: commit.Author().When,
},
Message: commit.Message(),
Committer: apis.Committer{},
Files: []apis.Files{},
}
}
func (g *LocalGitRepository) GetRootDir() (string, error) {
wt, err := g.goGitRepo.Worktree()
if err != nil {
return "", fmt.Errorf("failed to get repo root")
}
return wt.Filesystem.Root(), nil
}

View File

@@ -0,0 +1,175 @@
package cautils
import (
"archive/zip"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/suite"
)
var TEST_REPOS = [...]string{"localrepo", "withoutremotes"}
type LocalGitRepositoryTestSuite struct {
suite.Suite
archives map[string]*zip.ReadCloser
gitRepositoryPaths map[string]string
destinationPath string
}
func unzipFile(zipPath, destinationFolder string) (*zip.ReadCloser, error) {
archive, err := zip.OpenReader(zipPath)
if err != nil {
return nil, err
}
for _, f := range archive.File {
filePath := filepath.Join(destinationFolder, f.Name)
if !strings.HasPrefix(filePath, filepath.Clean(destinationFolder)+string(os.PathSeparator)) {
return nil, fmt.Errorf("invalid file path")
}
if f.FileInfo().IsDir() {
os.MkdirAll(filePath, os.ModePerm)
continue
}
if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil {
return nil, err
}
dstFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return nil, err
}
fileInArchive, err := f.Open()
if err != nil {
return nil, err
}
if _, err := io.Copy(dstFile, fileInArchive); err != nil {
return nil, err
}
dstFile.Close()
fileInArchive.Close()
}
return archive, err
}
func (s *LocalGitRepositoryTestSuite) SetupSuite() {
s.archives = make(map[string]*zip.ReadCloser)
s.gitRepositoryPaths = make(map[string]string)
destinationPath := filepath.Join(".", "testdata", "temp")
s.destinationPath = destinationPath
os.RemoveAll(destinationPath)
for _, repo := range TEST_REPOS {
zippedFixturePath := filepath.Join(".", "testdata", repo+".git")
gitRepositoryPath := filepath.Join(destinationPath, repo)
archive, err := unzipFile(zippedFixturePath, destinationPath)
if err == nil {
s.archives[repo] = archive
s.gitRepositoryPaths[repo] = gitRepositoryPath
}
}
}
func TestLocalGitRepositoryTestSuite(t *testing.T) {
suite.Run(t, new(LocalGitRepositoryTestSuite))
}
func (s *LocalGitRepositoryTestSuite) TearDownSuite() {
if s.archives != nil {
for _, archive := range s.archives {
if archive != nil {
archive.Close()
}
}
}
os.RemoveAll(s.destinationPath)
}
func (s *LocalGitRepositoryTestSuite) TestInvalidRepositoryPath() {
if _, err := NewLocalGitRepository("/invalidpath"); s.Error(err) {
s.Equal("repository does not exist", err.Error())
}
}
func (s *LocalGitRepositoryTestSuite) TestRepositoryWithoutRemotes() {
if _, err := NewLocalGitRepository(s.gitRepositoryPaths["withoutremotes"]); s.Error(err) {
s.Equal("no remotes found", err.Error())
}
}
func (s *LocalGitRepositoryTestSuite) TestGetBranchName() {
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
s.Equal("master", localRepo.GetBranchName())
}
}
func (s *LocalGitRepositoryTestSuite) TestGetName() {
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
if name, err := localRepo.GetName(); s.NoError(err) {
s.Equal("localrepo", name)
}
}
}
func (s *LocalGitRepositoryTestSuite) TestGetOriginUrl() {
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
if url, err := localRepo.GetRemoteUrl(); s.NoError(err) {
s.Equal("git@github.com:testuser/localrepo", url)
}
}
}
func (s *LocalGitRepositoryTestSuite) TestGetLastCommit() {
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
if commit, err := localRepo.GetLastCommit(); s.NoError(err) {
s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
s.Equal("Amir Malka", commit.Author.Name)
s.Equal("amirm@armosec.io", commit.Author.Email)
s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
s.Equal("added file B\n", commit.Message)
}
}
}
func (s *LocalGitRepositoryTestSuite) TestGetFileLastCommit() {
s.Run("fileA", func() {
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
if commit, err := localRepo.GetFileLastCommit("fileA"); s.NoError(err) {
s.Equal("9fae4be19624297947d2b605cefbff516628612d", commit.SHA)
s.Equal("Amir Malka", commit.Author.Name)
s.Equal("amirm@armosec.io", commit.Author.Email)
s.Equal("2022-05-22 18:55:48 +0300 +0300", commit.Author.Date.String())
s.Equal("added file A\n", commit.Message)
}
}
})
s.Run("fileB", func() {
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
if commit, err := localRepo.GetFileLastCommit("dirA/fileB"); s.NoError(err) {
s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
s.Equal("Amir Malka", commit.Author.Name)
s.Equal("amirm@armosec.io", commit.Author.Email)
s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
s.Equal("added file B\n", commit.Message)
}
}
})
}

View File

@@ -1,31 +0,0 @@
package helpers
import "time"
type StringObj struct {
key string
value string
}
type ErrorObj struct {
key string
value error
}
type IntObj struct {
key string
value int
}
type InterfaceObj struct {
key string
value interface{}
}
func Error(e error) *ErrorObj { return &ErrorObj{key: "error", value: e} }
func Int(k string, v int) *IntObj { return &IntObj{key: k, value: v} }
func String(k, v string) *StringObj { return &StringObj{key: k, value: v} }
func Interface(k string, v interface{}) *InterfaceObj { return &InterfaceObj{key: k, value: v} }
func Time() *StringObj {
return &StringObj{key: "time", value: time.Now().Format("2006-01-02 15:04:05")}
}

View File

@@ -1,69 +0,0 @@
package helpers
import (
"strings"
)
type Level int8
const (
UnknownLevel Level = iota - -1
DebugLevel
InfoLevel //default
SuccessLevel
WarningLevel
ErrorLevel
FatalLevel
_defaultLevel = InfoLevel
_minLevel = DebugLevel
_maxLevel = FatalLevel
)
func ToLevel(level string) Level {
switch strings.ToLower(level) {
case "debug":
return DebugLevel
case "info":
return InfoLevel
case "success":
return SuccessLevel
case "warning", "warn":
return WarningLevel
case "error":
return ErrorLevel
case "fatal":
return FatalLevel
default:
return UnknownLevel
}
}
func (l Level) String() string {
switch l {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case SuccessLevel:
return "success"
case WarningLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
}
return ""
}
func (l Level) Skip(l2 Level) bool {
return l < l2
}
func SupportedLevels() []string {
levels := []string{}
for i := _minLevel; i <= _maxLevel; i++ {
levels = append(levels, i.String())
}
return levels
}

View File

@@ -1,62 +0,0 @@
package helpers
type IDetails interface {
Key() string
Value() interface{}
}
// ======================================================================================
// ============================== String ================================================
// ======================================================================================
// Key
func (s *StringObj) Key() string {
return s.key
}
// Value
func (s *StringObj) Value() interface{} {
return s.value
}
// ======================================================================================
// =============================== Error ================================================
// ======================================================================================
// Key
func (s *ErrorObj) Key() string {
return s.key
}
// Value
func (s *ErrorObj) Value() interface{} {
return s.value
}
// ======================================================================================
// ================================= Int ================================================
// ======================================================================================
// Key
func (s *IntObj) Key() string {
return s.key
}
// Value
func (s *IntObj) Value() interface{} {
return s.value
}
// ======================================================================================
// =========================== Interface ================================================
// ======================================================================================
// Key
func (s *InterfaceObj) Key() string {
return s.key
}
// Value
func (s *InterfaceObj) Value() interface{} {
return s.value
}

View File

@@ -1,81 +0,0 @@
package logger
import (
"os"
"strings"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/cautils/logger/nonelogger"
"github.com/armosec/kubescape/v2/core/cautils/logger/prettylogger"
"github.com/armosec/kubescape/v2/core/cautils/logger/zaplogger"
)
type ILogger interface {
Fatal(msg string, details ...helpers.IDetails) // print log and exit 1
Error(msg string, details ...helpers.IDetails)
Success(msg string, details ...helpers.IDetails)
Warning(msg string, details ...helpers.IDetails)
Info(msg string, details ...helpers.IDetails)
Debug(msg string, details ...helpers.IDetails)
SetLevel(level string) error
GetLevel() string
SetWriter(w *os.File)
GetWriter() *os.File
LoggerName() string
}
var l ILogger
// Return initialized logger. If logger not initialized, will call InitializeLogger() with the default value
func L() ILogger {
if l == nil {
InitDefaultLogger()
}
return l
}
/* InitLogger initialize desired logger
Use:
InitLogger("<logger name>")
Supported logger names (call ListLoggersNames() for listing supported loggers)
- "zap": Logger from package "go.uber.org/zap"
- "pretty", "colorful": Human friendly colorful logger
- "none", "mock", "empty", "ignore": Logger will not print anything
Default:
- "pretty"
e.g.
InitLogger("none") -> will initialize the mock logger
*/
func InitLogger(loggerName string) {
switch strings.ToLower(loggerName) {
case zaplogger.LoggerName:
l = zaplogger.NewZapLogger()
case prettylogger.LoggerName, "colorful":
l = prettylogger.NewPrettyLogger()
case nonelogger.LoggerName, "mock", "empty", "ignore":
l = nonelogger.NewNoneLogger()
default:
InitDefaultLogger()
}
}
func InitDefaultLogger() {
l = prettylogger.NewPrettyLogger()
}
func DisableColor(flag bool) {
prettylogger.DisableColor(flag)
}
func ListLoggersNames() []string {
return []string{prettylogger.LoggerName, zaplogger.LoggerName, nonelogger.LoggerName}
}

View File

@@ -1,28 +0,0 @@
package nonelogger
import (
"os"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
)
const LoggerName string = "none"
type NoneLogger struct {
}
func NewNoneLogger() *NoneLogger {
return &NoneLogger{}
}
func (nl *NoneLogger) GetLevel() string { return "" }
func (nl *NoneLogger) LoggerName() string { return LoggerName }
func (nl *NoneLogger) SetWriter(w *os.File) {}
func (nl *NoneLogger) GetWriter() *os.File { return nil }
func (nl *NoneLogger) SetLevel(level string) error { return nil }
func (nl *NoneLogger) Fatal(msg string, details ...helpers.IDetails) {}
func (nl *NoneLogger) Error(msg string, details ...helpers.IDetails) {}
func (nl *NoneLogger) Warning(msg string, details ...helpers.IDetails) {}
func (nl *NoneLogger) Success(msg string, details ...helpers.IDetails) {}
func (nl *NoneLogger) Info(msg string, details ...helpers.IDetails) {}
func (nl *NoneLogger) Debug(msg string, details ...helpers.IDetails) {}

View File

@@ -1,37 +0,0 @@
package prettylogger
import (
"io"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/fatih/color"
)
var prefixError = color.New(color.Bold, color.FgHiRed).FprintfFunc()
var prefixWarning = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
var prefixInfo = color.New(color.Bold, color.FgCyan).FprintfFunc()
var prefixSuccess = color.New(color.Bold, color.FgHiGreen).FprintfFunc()
var prefixDebug = color.New(color.Bold, color.FgWhite).FprintfFunc()
var message = color.New().FprintfFunc()
func prefix(l helpers.Level) func(w io.Writer, format string, a ...interface{}) {
switch l {
case helpers.DebugLevel:
return prefixDebug
case helpers.InfoLevel:
return prefixInfo
case helpers.SuccessLevel:
return prefixSuccess
case helpers.WarningLevel:
return prefixWarning
case helpers.ErrorLevel, helpers.FatalLevel:
return prefixError
}
return message
}
func DisableColor(flag bool) {
if flag {
color.NoColor = true
}
}

View File

@@ -1,82 +0,0 @@
package prettylogger
import (
"fmt"
"os"
"sync"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
)
const LoggerName string = "pretty"
type PrettyLogger struct {
writer *os.File
level helpers.Level
mutex sync.Mutex
}
func NewPrettyLogger() *PrettyLogger {
return &PrettyLogger{
writer: os.Stderr, // default to stderr
level: helpers.InfoLevel,
mutex: sync.Mutex{},
}
}
func (pl *PrettyLogger) GetLevel() string { return pl.level.String() }
func (pl *PrettyLogger) SetWriter(w *os.File) { pl.writer = w }
func (pl *PrettyLogger) GetWriter() *os.File { return pl.writer }
func (pl *PrettyLogger) LoggerName() string { return LoggerName }
func (pl *PrettyLogger) SetLevel(level string) error {
pl.level = helpers.ToLevel(level)
if pl.level == helpers.UnknownLevel {
return fmt.Errorf("level '%s' unknown", level)
}
return nil
}
func (pl *PrettyLogger) Fatal(msg string, details ...helpers.IDetails) {
pl.print(helpers.FatalLevel, msg, details...)
os.Exit(1)
}
func (pl *PrettyLogger) Error(msg string, details ...helpers.IDetails) {
pl.print(helpers.ErrorLevel, msg, details...)
}
func (pl *PrettyLogger) Warning(msg string, details ...helpers.IDetails) {
pl.print(helpers.WarningLevel, msg, details...)
}
func (pl *PrettyLogger) Info(msg string, details ...helpers.IDetails) {
pl.print(helpers.InfoLevel, msg, details...)
}
func (pl *PrettyLogger) Debug(msg string, details ...helpers.IDetails) {
pl.print(helpers.DebugLevel, msg, details...)
}
func (pl *PrettyLogger) Success(msg string, details ...helpers.IDetails) {
pl.print(helpers.SuccessLevel, msg, details...)
}
func (pl *PrettyLogger) print(level helpers.Level, msg string, details ...helpers.IDetails) {
if !level.Skip(pl.level) {
pl.mutex.Lock()
prefix(level)(pl.writer, "[%s] ", level.String())
if d := detailsToString(details); d != "" {
msg = fmt.Sprintf("%s. %s", msg, d)
}
message(pl.writer, fmt.Sprintf("%s\n", msg))
pl.mutex.Unlock()
}
}
func detailsToString(details []helpers.IDetails) string {
s := ""
for i := range details {
s += fmt.Sprintf("%s: %v", details[i].Key(), details[i].Value())
if i < len(details)-1 {
s += "; "
}
}
return s
}

View File

@@ -1,79 +0,0 @@
package zaplogger
import (
"os"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
const LoggerName string = "zap"
type ZapLogger struct {
zapL *zap.Logger
cfg zap.Config
}
func NewZapLogger() *ZapLogger {
ec := zap.NewProductionEncoderConfig()
ec.EncodeTime = zapcore.RFC3339TimeEncoder
cfg := zap.NewProductionConfig()
cfg.DisableCaller = true
cfg.DisableStacktrace = true
cfg.Encoding = "json"
cfg.EncoderConfig = ec
zapLogger, err := cfg.Build()
if err != nil {
panic(err)
}
return &ZapLogger{
zapL: zapLogger,
cfg: cfg,
}
}
func (zl *ZapLogger) GetLevel() string { return zl.cfg.Level.Level().String() }
func (zl *ZapLogger) SetWriter(w *os.File) {}
func (zl *ZapLogger) GetWriter() *os.File { return nil }
func (zl *ZapLogger) LoggerName() string { return LoggerName }
func (zl *ZapLogger) SetLevel(level string) error {
l := zapcore.Level(1)
err := l.Set(level)
if err == nil {
zl.cfg.Level.SetLevel(l)
}
return err
}
func (zl *ZapLogger) Fatal(msg string, details ...helpers.IDetails) {
zl.zapL.Fatal(msg, detailsToZapFields(details)...)
}
func (zl *ZapLogger) Error(msg string, details ...helpers.IDetails) {
zl.zapL.Error(msg, detailsToZapFields(details)...)
}
func (zl *ZapLogger) Warning(msg string, details ...helpers.IDetails) {
zl.zapL.Warn(msg, detailsToZapFields(details)...)
}
func (zl *ZapLogger) Success(msg string, details ...helpers.IDetails) {
zl.zapL.Info(msg, detailsToZapFields(details)...)
}
func (zl *ZapLogger) Info(msg string, details ...helpers.IDetails) {
zl.zapL.Info(msg, detailsToZapFields(details)...)
}
func (zl *ZapLogger) Debug(msg string, details ...helpers.IDetails) {
zl.zapL.Debug(msg, detailsToZapFields(details)...)
}
func detailsToZapFields(details []helpers.IDetails) []zapcore.Field {
zapFields := []zapcore.Field{}
for i := range details {
zapFields = append(zapFields, zap.Any(details[i].Key(), details[i].Value()))
}
return zapFields
}

View File

@@ -8,6 +8,13 @@ type RootInfo struct {
ArmoBEURLs string // armo url
ArmoBEURLsDep string // armo url
}
type Credentials struct {
Account string
ClientID string
SecretKey string
}
// func (rootInfo *RootInfo) InitLogger() {

View File

@@ -8,21 +8,37 @@ import (
"path/filepath"
"strings"
"github.com/armosec/armoapi-go/armotypes"
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
giturl "github.com/armosec/go-git-url"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/opa-utils/reporthandling"
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"github.com/google/uuid"
)
type ScanningContext string
const (
ScanCluster string = "cluster"
ScanLocalFiles string = "yaml"
ContextCluster ScanningContext = "cluster"
ContextFile ScanningContext = "single-file"
ContextDir ScanningContext = "local-dir"
ContextGitURL ScanningContext = "git-url"
ContextGitLocal ScanningContext = "git-local"
)
const ( // deprecated
ScopeCluster = "cluster"
ScopeYAML = "yaml"
)
const (
// ScanCluster string = "cluster"
// ScanLocalFiles string = "yaml"
localControlInputsFilename string = "controls-inputs.json"
localExceptionsFilename string = "exceptions.json"
)
@@ -77,33 +93,39 @@ const (
ControlViewType ViewTypes = "control"
)
type PolicyIdentifier struct {
Name string // policy name e.g. nsa,mitre,c-0012
Kind apisv1.NotificationPolicyKind // policy kind e.g. Framework,Control,Rule
Designators armotypes.PortalDesignator
}
type ScanInfo struct {
Getters // TODO - remove from object
PolicyIdentifier []reporthandling.PolicyIdentifier // TODO - remove from object
UseExceptions string // Load file with exceptions configuration
ControlsInputs string // Load file with inputs for controls
UseFrom []string // Load framework from local file (instead of download). Use when running offline
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
VerboseMode bool // Display all of the input resources and not only failed resources
View string // Display all of the input resources and not only failed resources
Format string // Format results (table, json, junit ...)
Output string // Store results in an output file, Output file name
FormatVersion string // Output object can be differnet between versions, this is for testing and backward compatibility
ExcludedNamespaces string // used for host scanner namespace
IncludeNamespaces string // DEPRECATED?
InputPatterns []string // Yaml files input patterns
Silent bool // Silent mode - Do not print progress logs
FailThreshold float32 // Failure score threshold
Submit bool // Submit results to Armo BE
ScanID string // Report id of the current scan
HostSensorEnabled BoolPtrFlag // Deploy ARMO K8s host scanner to collect data from certain controls
HostSensorYamlPath string // Path to hostsensor file
Local bool // Do not submit results
Account string // account ID
KubeContext string // context name
FrameworkScan bool // false if scanning control
ScanAll bool // true if scan all frameworks
Getters // TODO - remove from object
PolicyIdentifier []PolicyIdentifier // TODO - remove from object
UseExceptions string // Load file with exceptions configuration
ControlsInputs string // Load file with inputs for controls
UseFrom []string // Load framework from local file (instead of download). Use when running offline
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
VerboseMode bool // Display all of the input resources and not only failed resources
View string // Display all of the input resources and not only failed resources
Format string // Format results (table, json, junit ...)
Output string // Store results in an output file, Output file name
FormatVersion string // Output object can be differnet between versions, this is for testing and backward compatibility
ExcludedNamespaces string // used for host scanner namespace
IncludeNamespaces string //
InputPatterns []string // Yaml files input patterns
Silent bool // Silent mode - Do not print progress logs
FailThreshold float32 // Failure score threshold
Submit bool // Submit results to Armo BE
ScanID string // Report id of the current scan
HostSensorEnabled BoolPtrFlag // Deploy ARMO K8s host scanner to collect data from certain controls
HostSensorYamlPath string // Path to hostsensor file
Local bool // Do not submit results
Credentials Credentials // account ID
KubeContext string // context name
FrameworkScan bool // false if scanning control
ScanAll bool // true if scan all frameworks
}
type Getters struct {
@@ -183,18 +205,18 @@ func (scanInfo *ScanInfo) setOutputFile() {
}
}
func (scanInfo *ScanInfo) GetScanningEnvironment() string {
if len(scanInfo.InputPatterns) != 0 {
return ScanLocalFiles
}
return ScanCluster
}
// func (scanInfo *ScanInfo) GetScanningEnvironment() string {
// if len(scanInfo.InputPatterns) != 0 {
// return ScanLocalFiles
// }
// return ScanCluster
// }
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind apisv1.NotificationPolicyKind) {
for _, policy := range policies {
if !scanInfo.contains(policy) {
newPolicy := reporthandling.PolicyIdentifier{}
newPolicy.Kind = reporthandling.NotificationPolicyKind(kind) // reporthandling.KindFramework
newPolicy := PolicyIdentifier{}
newPolicy.Kind = kind
newPolicy.Name = policy
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
}
@@ -241,71 +263,189 @@ func scanInfoToScanMetadata(scanInfo *ScanInfo) *reporthandlingv2.Metadata {
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
metadata.ScanMetadata.ControlsInputs = scanInfo.ControlsInputs
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Cluster
if scanInfo.GetScanningEnvironment() == ScanLocalFiles {
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.File
}
inputFiles := ""
if len(scanInfo.InputPatterns) > 0 {
inputFiles = scanInfo.InputPatterns[0]
}
switch GetScanningContext(inputFiles) {
case ContextCluster:
// cluster
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Cluster
case ContextFile:
// local file
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.File
case ContextGitURL:
// url
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Repo
case ContextGitLocal:
// local-git
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.GitLocal
case ContextDir:
// directory
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Directory
}
setContextMetadata(&metadata.ContextMetadata, inputFiles)
return metadata
}
func setContextMetadata(contextMetadata *reporthandlingv2.ContextMetadata, input string) {
func (scanInfo *ScanInfo) GetScanningContext() ScanningContext {
input := ""
if len(scanInfo.InputPatterns) > 0 {
input = scanInfo.InputPatterns[0]
}
return GetScanningContext(input)
}
// GetScanningContext get scanning context from the input param
func GetScanningContext(input string) ScanningContext {
// cluster
if input == "" {
contextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{
ContextName: k8sinterface.GetContextName(),
}
return
return ContextCluster
}
// url
if gitParser, err := giturl.NewGitURL(input); err == nil {
if gitParser.GetBranch() == "" {
gitParser.SetDefaultBranch()
}
contextMetadata.RepoContextMetadata = &reporthandlingv2.RepoContextMetadata{
Repo: gitParser.GetRepo(),
Owner: gitParser.GetOwner(),
Branch: gitParser.GetBranch(),
}
return
if _, err := giturl.NewGitURL(input); err == nil {
return ContextGitURL
}
if !filepath.IsAbs(input) {
if !filepath.IsAbs(input) { // parse path
if o, err := os.Getwd(); err == nil {
input = filepath.Join(o, input)
}
}
// local git repo
if _, err := NewLocalGitRepository(input); err == nil {
return ContextGitLocal
}
// single file
if IsFile(input) {
contextMetadata.FileContextMetadata = &reporthandlingv2.FileContextMetadata{
FilePath: input,
HostName: getHostname(),
}
return
return ContextFile
}
// dir/glob
if !IsFile(input) {
return ContextDir
}
func setContextMetadata(contextMetadata *reporthandlingv2.ContextMetadata, input string) {
switch GetScanningContext(input) {
case ContextCluster:
contextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{
ContextName: k8sinterface.GetContextName(),
}
case ContextGitURL:
// url
context, err := metadataGitURL(input)
if err != nil {
logger.L().Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
}
contextMetadata.RepoContextMetadata = context
case ContextDir:
contextMetadata.DirectoryContextMetadata = &reporthandlingv2.DirectoryContextMetadata{
BasePath: input,
BasePath: getAbsPath(input),
HostName: getHostname(),
}
return
case ContextFile:
contextMetadata.FileContextMetadata = &reporthandlingv2.FileContextMetadata{
FilePath: getAbsPath(input),
HostName: getHostname(),
}
case ContextGitLocal:
// local
context, err := metadataGitLocal(input)
if err != nil {
logger.L().Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
}
contextMetadata.RepoContextMetadata = context
}
}
func metadataGitURL(input string) (*reporthandlingv2.RepoContextMetadata, error) {
context := &reporthandlingv2.RepoContextMetadata{}
gitParser, err := giturl.NewGitAPI(input)
if err != nil {
return context, fmt.Errorf("%w", err)
}
if gitParser.GetBranchName() == "" {
gitParser.SetDefaultBranchName()
}
context.Provider = gitParser.GetProvider()
context.Repo = gitParser.GetRepoName()
context.Owner = gitParser.GetOwnerName()
context.Branch = gitParser.GetBranchName()
context.RemoteURL = gitParser.GetURL().String()
commit, err := gitParser.GetLatestCommit()
if err != nil {
return context, fmt.Errorf("%w", err)
}
context.LastCommit = reporthandling.LastCommit{
Hash: commit.SHA,
Date: commit.Committer.Date,
CommitterName: commit.Committer.Name,
}
return context, nil
}
func metadataGitLocal(input string) (*reporthandlingv2.RepoContextMetadata, error) {
gitParser, err := NewLocalGitRepository(input)
if err != nil {
return nil, fmt.Errorf("%w", err)
}
remoteURL, err := gitParser.GetRemoteUrl()
if err != nil {
return nil, fmt.Errorf("%w", err)
}
context := &reporthandlingv2.RepoContextMetadata{}
gitParserURL, err := giturl.NewGitURL(remoteURL)
if err != nil {
return context, fmt.Errorf("%w", err)
}
gitParserURL.SetBranchName(gitParser.GetBranchName())
context.Provider = gitParserURL.GetProvider()
context.Repo = gitParserURL.GetRepoName()
context.Owner = gitParserURL.GetOwnerName()
context.Branch = gitParserURL.GetBranchName()
context.RemoteURL = gitParserURL.GetURL().String()
commit, err := gitParser.GetLastCommit()
if err != nil {
return context, fmt.Errorf("%w", err)
}
context.LastCommit = reporthandling.LastCommit{
Hash: commit.SHA,
Date: commit.Committer.Date,
CommitterName: commit.Committer.Name,
}
return context, nil
}
func getHostname() string {
if h, e := os.Hostname(); e == nil {
return h
}
return ""
}
func getAbsPath(p string) string {
if !filepath.IsAbs(p) { // parse path
if o, err := os.Getwd(); err == nil {
return filepath.Join(o, p)
}
}
return p
}
// ScanningContextToScanningScope convert the context to the deprecated scope
func ScanningContextToScanningScope(scanningContext ScanningContext) string {
if scanningContext == ContextCluster {
return ScopeCluster
}
return ScopeYAML
}

View File

@@ -18,34 +18,6 @@ func TestSetContextMetadata(t *testing.T) {
assert.Nil(t, ctx.HelmContextMetadata)
assert.Nil(t, ctx.RepoContextMetadata)
}
{
ctx := reporthandlingv2.ContextMetadata{}
setContextMetadata(&ctx, "file")
assert.Nil(t, ctx.ClusterContextMetadata)
assert.NotNil(t, ctx.DirectoryContextMetadata)
assert.Nil(t, ctx.FileContextMetadata)
assert.Nil(t, ctx.HelmContextMetadata)
assert.Nil(t, ctx.RepoContextMetadata)
hostName := getHostname()
assert.Contains(t, ctx.DirectoryContextMetadata.BasePath, "file")
assert.Equal(t, hostName, ctx.DirectoryContextMetadata.HostName)
}
{
ctx := reporthandlingv2.ContextMetadata{}
setContextMetadata(&ctx, "scaninfo_test.go")
assert.Nil(t, ctx.ClusterContextMetadata)
assert.Nil(t, ctx.DirectoryContextMetadata)
assert.NotNil(t, ctx.FileContextMetadata)
assert.Nil(t, ctx.HelmContextMetadata)
assert.Nil(t, ctx.RepoContextMetadata)
hostName := getHostname()
assert.Contains(t, ctx.FileContextMetadata.FilePath, "scaninfo_test.go")
assert.Equal(t, hostName, ctx.FileContextMetadata.HostName)
}
{
ctx := reporthandlingv2.ContextMetadata{}
setContextMetadata(&ctx, "https://github.com/armosec/kubescape")
@@ -65,3 +37,11 @@ func TestSetContextMetadata(t *testing.T) {
func TestGetHostname(t *testing.T) {
assert.NotEqual(t, "", getHostname())
}
func TestGetScanningContext(t *testing.T) {
assert.Equal(t, ContextCluster, GetScanningContext(""))
// assert.Equal(t, ContextDir, GetScanningContext("/"))
assert.Equal(t, ContextGitURL, GetScanningContext("https://github.com/armosec/kubescape"))
// assert.Equal(t, ContextFile, GetScanningContext(path.Join(".", "testdata", "localrepo.git")))
// assert.Equal(t, ContextGitLocal, GetScanningContext(path.Join(".", "testdata")))
}

View File

@@ -0,0 +1,40 @@
{
"affinity": {},
"configMap": {
"create": false,
"params": {
"clusterName": "<MyK8sClusterName>",
"customerGUID": "<MyGUID>"
}
},
"fullnameOverride": "",
"image": {
"imageName": "kubescape",
"pullPolicy": "Always",
"repository": "quay.io/armosec",
"tag": "latest"
},
"imagePullSecrets": [],
"nameOverride": "",
"nodeSelector": {},
"podAnnotations": {},
"podSecurityContext": {},
"resources": {
"limits": {
"cpu": "500m",
"memory": "512Mi"
},
"requests": {
"cpu": "200m",
"memory": "256Mi"
}
},
"schedule": "* * 1 * *",
"securityContext": {},
"serviceAccount": {
"annotations": {},
"create": true,
"name": "kubescape-discovery"
},
"tolerations": []
}

BIN
core/cautils/testdata/localrepo.git vendored Normal file

Binary file not shown.

BIN
core/cautils/testdata/withoutremotes.git vendored Normal file

Binary file not shown.

View File

@@ -7,9 +7,10 @@ import (
"os"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/utils-go/boolutils"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"golang.org/x/mod/semver"
)
@@ -17,6 +18,7 @@ const SKIP_VERSION_CHECK_DEPRECATED = "KUBESCAPE_SKIP_UPDATE_CHECK"
const SKIP_VERSION_CHECK = "KS_SKIP_UPDATE_CHECK"
var BuildNumber string
var Client string
const UnknownBuildNumber = "unknown"
@@ -48,10 +50,12 @@ type VersionCheckHandler struct {
}
type VersionCheckRequest struct {
Client string `json:"client"` // kubescape
ClientBuild string `json:"clientBuild"` // client build environment
ClientVersion string `json:"clientVersion"` // kubescape version
Framework string `json:"framework"` // framework name
FrameworkVersion string `json:"frameworkVersion"` // framework version
ScanningTarget string `json:"target"` // scanning target- cluster/yaml
ScanningTarget string `json:"target"` // Deprecated
ScanningContext string `json:"context"` // scanning context- cluster/file/gitURL/localGit/dir
}
type VersionCheckResponse struct {
@@ -74,8 +78,12 @@ func NewVersionCheckRequest(buildNumber, frameworkName, frameworkVersion, scanni
if scanningTarget == "" {
scanningTarget = "unknown"
}
if Client == "" {
Client = "local-build"
}
return &VersionCheckRequest{
Client: "kubescape",
ClientBuild: Client,
ClientVersion: buildNumber,
Framework: frameworkName,
FrameworkVersion: frameworkVersion,

View File

@@ -14,7 +14,10 @@ var (
"KernelVersion",
"LinuxSecurityHardeningStatus",
"OpenPortsList",
"LinuxKernelVariables"}
"LinuxKernelVariables",
"KubeletInfo",
"KubeProxyInfo",
}
CloudResources = []string{"ClusterDescribe"}
)

View File

@@ -8,7 +8,7 @@ import (
func (ks *Kubescape) SetCachedConfig(setConfig *metav1.SetConfig) error {
tenant := getTenantConfig("", "", getKubernetesApi())
tenant := getTenantConfig(nil, "", getKubernetesApi())
if setConfig.Account != "" {
tenant.GetConfigObj().AccountID = setConfig.Account
@@ -25,13 +25,13 @@ func (ks *Kubescape) SetCachedConfig(setConfig *metav1.SetConfig) error {
// View cached configurations
func (ks *Kubescape) ViewCachedConfig(viewConfig *metav1.ViewConfig) error {
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
tenant := getTenantConfig(nil, "", getKubernetesApi()) // change k8sinterface
fmt.Fprintf(viewConfig.Writer, "%s\n", tenant.GetConfigObj().Config())
return nil
}
func (ks *Kubescape) DeleteCachedConfig(deleteConfig *metav1.DeleteConfig) error {
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
tenant := getTenantConfig(nil, "", getKubernetesApi()) // change k8sinterface
return tenant.DeleteCachedConfig()
}

View File

@@ -4,15 +4,15 @@ import (
"fmt"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
func (ks *Kubescape) DeleteExceptions(delExceptions *v1.DeleteExceptions) error {
// load cached config
getTenantConfig(delExceptions.Account, "", getKubernetesApi())
getTenantConfig(&delExceptions.Credentials, "", getKubernetesApi())
// login kubescape SaaS
armoAPI := getter.GetArmoAPIConnector()

View File

@@ -8,9 +8,9 @@ import (
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
var downloadFunc = map[string]func(*metav1.DownloadInfo) error{
@@ -80,7 +80,7 @@ func downloadArtifacts(downloadInfo *metav1.DownloadInfo) error {
}
func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetAccountID(), nil)
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetContextName())
@@ -104,7 +104,7 @@ func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
var err error
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
exceptionsGetter := getExceptionsGetter("")
exceptions := []armotypes.PostureExceptionPolicy{}
@@ -128,9 +128,9 @@ func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
g := getPolicyGetter(nil, tenant.GetTennatEmail(), true, nil)
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
if downloadInfo.Name == "" {
// if framework name not specified - download all frameworks
@@ -170,9 +170,9 @@ func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
func downloadControl(downloadInfo *metav1.DownloadInfo) error {
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
g := getPolicyGetter(nil, tenant.GetTennatEmail(), false, nil)
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
if downloadInfo.Name == "" {
// TODO - support

View File

@@ -6,14 +6,15 @@ import (
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
"github.com/armosec/kubescape/v2/core/pkg/resourcehandler"
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
reporterv2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v2"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"github.com/google/uuid"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/rbac-utils/rbacscanner"
)
@@ -24,11 +25,11 @@ func getKubernetesApi() *k8sinterface.KubernetesApi {
}
return k8sinterface.NewKubernetesApi()
}
func getTenantConfig(Account, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
func getTenantConfig(credentials *cautils.Credentials, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), Account, clusterName)
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), credentials, clusterName)
}
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), Account, clusterName)
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), credentials, clusterName)
}
func getExceptionsGetter(useExceptions string) getter.IExceptionsGetter {
@@ -105,7 +106,7 @@ func getFieldSelector(scanInfo *cautils.ScanInfo) resourcehandler.IFieldSelector
return &resourcehandler.EmptySelector{}
}
func policyIdentifierNames(pi []reporthandling.PolicyIdentifier) string {
func policyIdentifierNames(pi []cautils.PolicyIdentifier) string {
policiesNames := ""
for i := range pi {
policiesNames += pi[i].Name
@@ -123,7 +124,6 @@ func policyIdentifierNames(pi []reporthandling.PolicyIdentifier) string {
func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig) {
/*
If "First run (local config not found)" -
Default/keep-local - Do not send report
Submit - Create tenant & Submit report
@@ -140,8 +140,20 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
return
}
scanningContext := scanInfo.GetScanningContext()
if scanningContext == cautils.ContextFile || scanningContext == cautils.ContextDir {
scanInfo.Submit = false
return
}
if tenantConfig.IsConfigFound() { // config found in cache (submitted)
if !scanInfo.Local {
if tenantConfig.GetAccountID() != "" {
if _, err := uuid.Parse(tenantConfig.GetAccountID()); err != nil {
scanInfo.Submit = false
return
}
}
// Submit report
scanInfo.Submit = true
}
@@ -165,20 +177,6 @@ func getPolicyGetter(loadPoliciesFromFile []string, tennatEmail string, framewor
}
// func setGetArmoAPIConnector(scanInfo *cautils.ScanInfo, customerGUID string) {
// g := getter.GetArmoAPIConnector() // download policy from ARMO backend
// g.SetCustomerGUID(customerGUID)
// scanInfo.PolicyGetter = g
// if scanInfo.ScanAll {
// frameworks, err := g.ListCustomFrameworks(customerGUID)
// if err != nil {
// glog.Error("failed to get custom frameworks") // handle error
// return
// }
// scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
// }
// }
// setConfigInputsGetter sets the config input getter - local file/github release/ArmoAPI
func getConfigInputsGetter(ControlsInputs string, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IControlsInputsGetter {
if len(ControlsInputs) > 0 {

View File

@@ -44,16 +44,16 @@ func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
}
func listFrameworks(listPolicies *metav1.ListPolicies) ([]string, error) {
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
g := getPolicyGetter(nil, tenant.GetTennatEmail(), true, nil)
tenant := getTenantConfig(&listPolicies.Credentials, "", getKubernetesApi()) // change k8sinterface
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
return listFrameworksNames(g), nil
}
func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
tenant := getTenantConfig(&listPolicies.Credentials, "", getKubernetesApi()) // change k8sinterface
g := getPolicyGetter(nil, tenant.GetTennatEmail(), false, nil)
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
l := getter.ListName
if listPolicies.ListIDs {
l = getter.ListID
@@ -63,7 +63,7 @@ func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
func listExceptions(listPolicies *metav1.ListPolicies) ([]string, error) {
// load tenant metav1
getTenantConfig(listPolicies.Account, "", getKubernetesApi())
getTenantConfig(&listPolicies.Credentials, "", getKubernetesApi())
var exceptionsNames []string
armoAPI := getExceptionsGetter("")

View File

@@ -5,13 +5,10 @@ import (
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
"github.com/armosec/kubescape/v2/core/pkg/opaprocessor"
"github.com/armosec/kubescape/v2/core/pkg/policyhandler"
@@ -19,8 +16,9 @@ import (
"github.com/armosec/kubescape/v2/core/pkg/resultshandling"
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/resources"
)
@@ -36,7 +34,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
// ================== setup k8s interface object ======================================
var k8s *k8sinterface.KubernetesApi
if scanInfo.GetScanningEnvironment() == cautils.ScanCluster {
if scanInfo.GetScanningContext() == cautils.ContextCluster {
k8s = getKubernetesApi()
if k8s == nil {
logger.L().Fatal("failed connecting to Kubernetes cluster")
@@ -45,16 +43,11 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
// ================== setup tenant object ======================================
tenantConfig := getTenantConfig(scanInfo.Account, scanInfo.KubeContext, k8s)
tenantConfig := getTenantConfig(&scanInfo.Credentials, scanInfo.KubeContext, k8s)
// Set submit behavior AFTER loading tenant config
setSubmitBehavior(scanInfo, tenantConfig)
// Do not submit yaml scanning
if len(scanInfo.InputPatterns) > 0 {
scanInfo.Submit = false
}
if scanInfo.Submit {
// submit - Create tenant & Submit report
if err := tenantConfig.SetTenant(); err != nil {
@@ -65,7 +58,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
// ================== version testing ======================================
v := cautils.NewIVersionCheckHandler()
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanInfo.GetScanningEnvironment()))
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", cautils.ScanningContextToScanningScope(scanInfo.GetScanningContext())))
// ================== setup host scanner object ======================================
@@ -126,7 +119,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
downloadReleasedPolicy := getter.NewDownloadReleasedPolicy() // download config inputs from github release
// set policy getter only after setting the customerGUID
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetTennatEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetTenantEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions)
@@ -146,7 +139,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
// ===================== policies & resources =====================
policyHandler := policyhandler.NewPolicyHandler(interfaces.resourceHandler)
scanData, err := collectResources(policyHandler, scanInfo)
scanData, err := policyHandler.CollectResources(scanInfo.PolicyIdentifier, scanInfo)
if err != nil {
return resultsHandling, err
}
@@ -156,7 +149,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
reportResults := opaprocessor.NewOPAProcessor(scanData, deps)
if err := reportResults.ProcessRulesListenner(); err != nil {
// TODO - do something
return resultsHandling, err
return resultsHandling, fmt.Errorf("%w", err)
}
// ========================= results handling =====================
@@ -168,47 +161,3 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
return resultsHandling, nil
}
// TODO - remove function
func collectResources(policyHandler *policyhandler.PolicyHandler, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
policyNotification := &reporthandling.PolicyNotification{
Rules: scanInfo.PolicyIdentifier,
KubescapeNotification: reporthandling.KubescapeNotification{
Designators: armotypes.PortalDesignator{},
NotificationType: reporthandling.TypeExecPostureScan,
},
}
switch policyNotification.KubescapeNotification.NotificationType {
case reporthandling.TypeExecPostureScan:
collectedResources, err := policyHandler.CollectResources(policyNotification, scanInfo)
if err != nil {
return nil, err
}
return collectedResources, nil
default:
return nil, fmt.Errorf("notification type '%s' Unknown", policyNotification.KubescapeNotification.NotificationType)
}
}
// func askUserForHostSensor() bool {
// return false
// if !isatty.IsTerminal(os.Stdin.Fd()) {
// return false
// }
// if ssss, err := os.Stdin.Stat(); err == nil {
// // fmt.Printf("Found stdin type: %s\n", ssss.Mode().Type())
// if ssss.Mode().Type()&(fs.ModeDevice|fs.ModeCharDevice) > 0 { //has TTY
// fmt.Fprintf(os.Stderr, "Would you like to scan K8s nodes? [y/N]. This is required to collect valuable data for certain controls\n")
// fmt.Fprintf(os.Stderr, "Use --enable-host-scan flag to suppress this message\n")
// var b []byte = make([]byte, 1)
// if n, err := os.Stdin.Read(b); err == nil {
// if n > 0 && len(b) > 0 && (b[0] == 'y' || b[0] == 'Y') {
// return true
// }
// }
// }
// }
// return false
// }

View File

@@ -3,9 +3,9 @@ package core
import (
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
func (ks *Kubescape) Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error {
@@ -29,11 +29,11 @@ func (ks *Kubescape) Submit(submitInterfaces cliinterfaces.SubmitInterfaces) err
return nil
}
func (ks *Kubescape) SubmitExceptions(accountID, excPath string) error {
func (ks *Kubescape) SubmitExceptions(credentials *cautils.Credentials, excPath string) error {
logger.L().Info("submitting exceptions", helpers.String("path", excPath))
// load cached config
tenantConfig := getTenantConfig(accountID, "", getKubernetesApi())
tenantConfig := getTenantConfig(credentials, "", getKubernetesApi())
if err := tenantConfig.SetTenant(); err != nil {
logger.L().Error("failed setting account ID", helpers.Error(err))
}

View File

@@ -1,6 +1,8 @@
package v1
import "github.com/armosec/kubescape/v2/core/cautils"
type DeleteExceptions struct {
Account string
Exceptions []string
Credentials cautils.Credentials
Exceptions []string
}

View File

@@ -1,9 +1,11 @@
package v1
import "github.com/armosec/kubescape/v2/core/cautils"
type DownloadInfo struct {
Path string // directory to save artifact. Default is "~/.kubescape/"
FileName string // can be empty
Target string // type of artifact to download
Name string // name of artifact to download
Account string // AccountID
Path string // directory to save artifact. Default is "~/.kubescape/"
FileName string // can be empty
Target string // type of artifact to download
Name string // name of artifact to download
Credentials cautils.Credentials
}

View File

@@ -1,10 +1,12 @@
package v1
import "github.com/armosec/kubescape/v2/core/cautils"
type ListPolicies struct {
Target string
ListIDs bool
Account string
Format string
Target string
ListIDs bool
Format string
Credentials cautils.Credentials
}
type ListResponse struct {

View File

@@ -1,9 +1,11 @@
package v1
import "github.com/armosec/kubescape/v2/core/cautils"
type Submit struct {
Account string
Credentials cautils.Credentials
}
type Delete struct {
Account string
Credentials cautils.Credentials
}

View File

@@ -15,8 +15,8 @@ type IKubescape interface {
Download(downloadInfo *metav1.DownloadInfo) error // TODO - return downloaded policies
// submit
Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error // TODO - func should receive object
SubmitExceptions(accountID, excPath string) error // TODO - remove
Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error // TODO - func should receive object
SubmitExceptions(credentials *cautils.Credentials, excPath string) error // TODO - remove
// config
SetCachedConfig(setConfig *metav1.SetConfig) error

View File

@@ -11,8 +11,9 @@ import (
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -35,6 +36,7 @@ type HostSensorHandler struct {
DaemonSet *appsv1.DaemonSet
podListLock sync.RWMutex
gracePeriod int64
workerPool workerPool
}
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, hostSensorYAMLFile string) (*HostSensorHandler, error) {
@@ -54,6 +56,7 @@ func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, hostSensorYAMLFile
HostSensorPodNames: map[string]string{},
HostSensorUnscheduledPodNames: map[string]string{},
gracePeriod: int64(15),
workerPool: NewWorkerPool(),
}
// Don't deploy on cluster with no nodes. Some cloud providers prevents termination of K8s objects for cluster with no nodes!!!
if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 {
@@ -89,7 +92,7 @@ func (hsh *HostSensorHandler) Init() error {
func (hsh *HostSensorHandler) applyYAML() error {
workloads, err := cautils.ReadFile([]byte(hostSensorYAML), cautils.YAML_FILE_FORMAT)
if len(err) != 0 {
if err != nil {
return fmt.Errorf("failed to read YAML files, reason: %v", err)
}

View File

@@ -6,10 +6,11 @@ import (
"sync"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
"github.com/armosec/opa-utils/reporthandling/apis"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"sigs.k8s.io/yaml"
)
@@ -32,7 +33,23 @@ func (hsh *HostSensorHandler) HTTPGetToPod(podName, path string) ([]byte, error)
restProxy := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).ProxyGet("http", podName, fmt.Sprintf("%d", hsh.HostSensorPort), path, map[string]string{})
return restProxy.DoRaw(hsh.k8sObj.Context)
}
func (hsh *HostSensorHandler) getResourcesFromPod(podName, nodeName, resourceKind, path string) (hostsensor.HostSensorDataEnvelope, error) {
// send the request and pack the response as an hostSensorDataEnvelope
resBytes, err := hsh.HTTPGetToPod(podName, path)
if err != nil {
return hostsensor.HostSensorDataEnvelope{}, err
}
hostSensorDataEnvelope := hostsensor.HostSensorDataEnvelope{}
hostSensorDataEnvelope.SetApiVersion(k8sinterface.JoinGroupVersion(hostsensor.GroupHostSensor, hostsensor.Version))
hostSensorDataEnvelope.SetKind(resourceKind)
hostSensorDataEnvelope.SetName(nodeName)
hostSensorDataEnvelope.SetData(resBytes)
return hostSensorDataEnvelope, nil
}
func (hsh *HostSensorHandler) ForwardToPod(podName, path string) ([]byte, error) {
@@ -59,35 +76,26 @@ func (hsh *HostSensorHandler) ForwardToPod(podName, path string) ([]byte, error)
// sendAllPodsHTTPGETRequest fills the raw byte response in the envelope and the node name, but not the GroupVersionKind
// so the caller is responsible to convert the raw data to some structured data and add the GroupVersionKind details
//
// The function produces a worker-pool with a fixed number of workers.
// For each node the request is pushed to the jobs channel, the worker sends the request and pushes the result to the result channel.
// When all workers have finished, the function returns a list of results
func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(path, requestKind string) ([]hostsensor.HostSensorDataEnvelope, error) {
podList, err := hsh.getPodList()
if err != nil {
return nil, fmt.Errorf("failed to sendAllPodsHTTPGETRequest: %v", err)
}
res := make([]hostsensor.HostSensorDataEnvelope, 0, len(podList))
resLock := sync.Mutex{}
wg := sync.WaitGroup{}
wg.Add(len(podList))
for podName := range podList {
go func(podName, path string) {
defer wg.Done()
resBytes, err := hsh.HTTPGetToPod(podName, path)
if err != nil {
logger.L().Error("failed to get data", helpers.String("path", path), helpers.String("podName", podName), helpers.Error(err))
} else {
resLock.Lock()
defer resLock.Unlock()
hostSensorDataEnvelope := hostsensor.HostSensorDataEnvelope{}
hostSensorDataEnvelope.SetApiVersion(k8sinterface.JoinGroupVersion(hostsensor.GroupHostSensor, hostsensor.Version))
hostSensorDataEnvelope.SetKind(requestKind)
hostSensorDataEnvelope.SetName(podList[podName])
hostSensorDataEnvelope.SetData(resBytes)
res = append(res, hostSensorDataEnvelope)
}
}(podName, path)
}
wg.Wait()
res := make([]hostsensor.HostSensorDataEnvelope, 0, len(podList))
var wg sync.WaitGroup
// initialization of the channels
hsh.workerPool.init(len(podList))
hsh.workerPool.hostSensorApplyJobs(podList, path, requestKind)
hsh.workerPool.hostSensorGetResults(&res)
hsh.workerPool.createWorkerPool(hsh, &wg)
hsh.workerPool.waitForDone(&wg)
return res, nil
}
@@ -109,6 +117,18 @@ func (hsh *HostSensorHandler) GetLinuxSecurityHardeningStatus() ([]hostsensor.Ho
return hsh.sendAllPodsHTTPGETRequest("/linuxSecurityHardening", "LinuxSecurityHardeningStatus")
}
// return list of KubeletInfo
func (hsh *HostSensorHandler) GetKubeletInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest("/kubeletInfo", "KubeletInfo")
}
// return list of KubeProxyInfo
func (hsh *HostSensorHandler) GetKubeProxyInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest("/kubeProxyInfo", "KubeProxyInfo")
}
// return list of KubeletCommandLine
func (hsh *HostSensorHandler) GetKubeletCommandLine() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
@@ -228,6 +248,27 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
if len(kcData) > 0 {
res = append(res, kcData...)
}
// GetKubeletInfo
kcData, err = hsh.GetKubeletInfo()
if err != nil {
addInfoToMap(KubeletInfo, infoMap, err)
logger.L().Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
// GetKubeProxyInfo
kcData, err = hsh.GetKubeProxyInfo()
if err != nil {
addInfoToMap(KubeProxyInfo, infoMap, err)
logger.L().Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
logger.L().Debug("Done reading information from host scanner")
return res, infoMap, nil
}

View File

@@ -0,0 +1,96 @@
package hostsensorutils
import (
"sync"
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
const noOfWorkers int = 10
type job struct {
podName string
nodeName string
requestKind string
path string
}
type workerPool struct {
jobs chan job
results chan hostsensor.HostSensorDataEnvelope
done chan bool
noOfWorkers int
}
func NewWorkerPool() workerPool {
wp := workerPool{}
wp.noOfWorkers = noOfWorkers
wp.init()
return wp
}
func (wp *workerPool) init(noOfPods ...int) {
if noOfPods != nil && len(noOfPods) > 0 && noOfPods[0] < noOfWorkers {
wp.noOfWorkers = noOfPods[0]
}
// init the channels
wp.jobs = make(chan job, noOfWorkers)
wp.results = make(chan hostsensor.HostSensorDataEnvelope, noOfWorkers)
wp.done = make(chan bool)
}
// The worker takes a job out of the chan, executes the request, and pushes the result to the results chan
func (wp *workerPool) hostSensorWorker(hsh *HostSensorHandler, wg *sync.WaitGroup) {
defer wg.Done()
for job := range wp.jobs {
hostSensorDataEnvelope, err := hsh.getResourcesFromPod(job.podName, job.nodeName, job.requestKind, job.path)
if err != nil {
logger.L().Error("failed to get data", helpers.String("path", job.path), helpers.String("podName", job.podName), helpers.Error(err))
} else {
wp.results <- hostSensorDataEnvelope
}
}
}
func (wp *workerPool) createWorkerPool(hsh *HostSensorHandler, wg *sync.WaitGroup) {
for i := 0; i < noOfWorkers; i++ {
wg.Add(1)
go wp.hostSensorWorker(hsh, wg)
}
}
func (wp *workerPool) waitForDone(wg *sync.WaitGroup) {
// Waiting for workers to finish
wg.Wait()
close(wp.results)
// Waiting for the results to be processed
<-wp.done
}
func (wp *workerPool) hostSensorGetResults(result *[]hostsensor.HostSensorDataEnvelope) {
go func() {
for res := range wp.results {
*result = append(*result, res)
}
wp.done <- true
}()
}
func (wp *workerPool) hostSensorApplyJobs(podList map[string]string, path, requestKind string) {
go func() {
for podName, nodeName := range podList {
job := job{
podName: podName,
nodeName: nodeName,
requestKind: requestKind,
path: path,
}
wp.jobs <- job
}
close(wp.jobs)
}()
}

View File

@@ -13,8 +13,10 @@ var (
OpenPortsList = "OpenPortsList"
LinuxKernelVariables = "LinuxKernelVariables"
KubeletCommandLine = "KubeletCommandLine"
KubeletInfo = "KubeletInfo"
KubeProxyInfo = "KubeProxyInfo"
MapResourceToApiGroup = map[string]string{
MapHostSensorResourceToApiGroup = map[string]string{
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
OsReleaseFile: "hostdata.kubescape.cloud/v1beta0",
KubeletCommandLine: "hostdata.kubescape.cloud/v1beta0",
@@ -22,11 +24,13 @@ var (
LinuxSecurityHardeningStatus: "hostdata.kubescape.cloud/v1beta0",
OpenPortsList: "hostdata.kubescape.cloud/v1beta0",
LinuxKernelVariables: "hostdata.kubescape.cloud/v1beta0",
KubeletInfo: "hostdata.kubescape.cloud/v1beta0",
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
}
)
func addInfoToMap(resource string, infoMap map[string]apis.StatusInfo, err error) {
group, version := k8sinterface.SplitApiVersion(MapResourceToApiGroup[resource])
group, version := k8sinterface.SplitApiVersion(MapHostSensorResourceToApiGroup[resource])
r := k8sinterface.JoinResourceTriplets(group, version, resource)
infoMap[r] = apis.StatusInfo{
InnerStatus: apis.StatusSkipped,

View File

@@ -7,17 +7,19 @@ import (
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/pkg/score"
"github.com/armosec/opa-utils/objectsenvelopes"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/reporthandling/apis"
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"github.com/open-policy-agent/opa/storage"
"github.com/armosec/k8s-interface/workloadinterface"
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
"github.com/armosec/opa-utils/resources"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/rego"
@@ -62,7 +64,7 @@ func (opap *OPAProcessor) ProcessRulesListenner() error {
}
func (opap *OPAProcessor) Process(policies *cautils.Policies) error {
logger.L().Info("Scanning", helpers.String("cluster", cautils.ClusterName))
opap.loggerStartScanning()
cautils.StartSpinner()
@@ -89,11 +91,30 @@ func (opap *OPAProcessor) Process(policies *cautils.Policies) error {
opap.Report.ReportGenerationTime = time.Now().UTC()
cautils.StopSpinner()
logger.L().Success("Done scanning", helpers.String("cluster", cautils.ClusterName))
opap.loggerDoneScanning()
return errs
}
func (opap *OPAProcessor) loggerStartScanning() {
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
if reporthandlingv2.Cluster == targetScan {
logger.L().Info("Scanning", helpers.String(targetScan.String(), cautils.ClusterName))
} else {
logger.L().Info("Scanning " + targetScan.String())
}
}
func (opap *OPAProcessor) loggerDoneScanning() {
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
if reporthandlingv2.Cluster == targetScan {
logger.L().Success("Done scanning", helpers.String(targetScan.String(), cautils.ClusterName))
} else {
logger.L().Success("Done scanning " + targetScan.String())
}
}
func (opap *OPAProcessor) processControl(control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, error) {
var errs error

View File

@@ -2,7 +2,7 @@ package opaprocessor
import (
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
logger "github.com/dwertent/go-logger"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/k8s-interface/workloadinterface"

View File

@@ -3,9 +3,9 @@ package policyhandler
import (
"fmt"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/pkg/resourcehandler"
"github.com/armosec/opa-utils/reporthandling"
)
// PolicyHandler -
@@ -22,7 +22,7 @@ func NewPolicyHandler(resourceHandler resourcehandler.IResourceHandler) *PolicyH
}
}
func (policyHandler *PolicyHandler) CollectResources(notification *reporthandling.PolicyNotification, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
func (policyHandler *PolicyHandler) CollectResources(policyIdentifier []cautils.PolicyIdentifier, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
opaSessionObj := cautils.NewOPASessionObj(nil, nil, scanInfo)
// validate notification
@@ -30,11 +30,11 @@ func (policyHandler *PolicyHandler) CollectResources(notification *reporthandlin
policyHandler.getters = &scanInfo.Getters
// get policies
if err := policyHandler.getPolicies(notification, opaSessionObj); err != nil {
if err := policyHandler.getPolicies(policyIdentifier, opaSessionObj); err != nil {
return opaSessionObj, err
}
err := policyHandler.getResources(notification, opaSessionObj, scanInfo)
err := policyHandler.getResources(policyIdentifier, opaSessionObj, scanInfo)
if err != nil {
return opaSessionObj, err
}
@@ -46,10 +46,10 @@ func (policyHandler *PolicyHandler) CollectResources(notification *reporthandlin
return opaSessionObj, nil
}
func (policyHandler *PolicyHandler) getResources(notification *reporthandling.PolicyNotification, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
func (policyHandler *PolicyHandler) getResources(policyIdentifier []cautils.PolicyIdentifier, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
opaSessionObj.Report.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
resourcesMap, allResources, armoResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj, &notification.Designators)
resourcesMap, allResources, armoResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj, &policyIdentifier[0].Designators)
if err != nil {
return err
}
@@ -60,3 +60,10 @@ func (policyHandler *PolicyHandler) getResources(notification *reporthandling.Po
return nil
}
func getDesignator(policyIdentifier []cautils.PolicyIdentifier) *armotypes.PortalDesignator {
if len(policyIdentifier) > 0 {
return &policyIdentifier[0].Designators
}
return &armotypes.PortalDesignator{}
}

View File

@@ -4,25 +4,27 @@ import (
"fmt"
"strings"
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/opa-utils/reporthandling"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.PolicyNotification, policiesAndResources *cautils.OPASessionObj) error {
func (policyHandler *PolicyHandler) getPolicies(policyIdentifier []cautils.PolicyIdentifier, policiesAndResources *cautils.OPASessionObj) error {
logger.L().Info("Downloading/Loading policy definitions")
cautils.StartSpinner()
defer cautils.StopSpinner()
policies, err := policyHandler.getScanPolicies(notification)
policies, err := policyHandler.getScanPolicies(policyIdentifier)
if err != nil {
return err
}
if len(policies) == 0 {
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(notification.Rules), ", "))
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(policyIdentifier), ", "))
}
policiesAndResources.Policies = policies
@@ -31,6 +33,8 @@ func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.Pol
exceptionPolicies, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)
if err == nil {
policiesAndResources.Exceptions = exceptionPolicies
} else {
logger.L().Error("failed to load exceptions", helpers.Error(err))
}
// get account configuration
@@ -44,12 +48,12 @@ func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.Pol
return nil
}
func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling.PolicyNotification) ([]reporthandling.Framework, error) {
func (policyHandler *PolicyHandler) getScanPolicies(policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
frameworks := []reporthandling.Framework{}
switch getScanKind(notification) {
case reporthandling.KindFramework: // Download frameworks
for _, rule := range notification.Rules {
switch getScanKind(policyIdentifier) {
case apisv1.KindFramework: // Download frameworks
for _, rule := range policyIdentifier {
receivedFramework, err := policyHandler.getters.PolicyGetter.GetFramework(rule.Name)
if err != nil {
return frameworks, policyDownloadError(err)
@@ -63,11 +67,11 @@ func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling
}
}
}
case reporthandling.KindControl: // Download controls
case apisv1.KindControl: // Download controls
f := reporthandling.Framework{}
var receivedControl *reporthandling.Control
var err error
for _, rule := range notification.Rules {
for _, rule := range policyIdentifier {
receivedControl, err = policyHandler.getters.PolicyGetter.GetControl(rule.Name)
if err != nil {
return frameworks, policyDownloadError(err)
@@ -89,7 +93,7 @@ func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling
return frameworks, nil
}
func policyIdentifierToSlice(rules []reporthandling.PolicyIdentifier) []string {
func policyIdentifierToSlice(rules []cautils.PolicyIdentifier) []string {
s := []string{}
for i := range rules {
s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Name))

View File

@@ -4,12 +4,14 @@ import (
"fmt"
"strings"
"github.com/armosec/opa-utils/reporthandling"
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
"github.com/armosec/kubescape/v2/core/cautils"
)
func getScanKind(notification *reporthandling.PolicyNotification) reporthandling.NotificationPolicyKind {
if len(notification.Rules) > 0 {
return notification.Rules[0].Kind
func getScanKind(policyIdentifier []cautils.PolicyIdentifier) apisv1.NotificationPolicyKind {
if len(policyIdentifier) > 0 {
return policyIdentifier[0].Kind
}
return "unknown"
}

View File

@@ -5,10 +5,10 @@ import (
"fmt"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/pkg/containerscan"
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
func NewArmoAdaptor(armoAPI *getter.ArmoAPI) *ArmoCivAdaptor {
@@ -51,7 +51,7 @@ func (armoCivAdaptor *ArmoCivAdaptor) GetImageVulnerability(imageID *registryvul
pageNumber := 1
request := V2ListRequest{PageSize: &pageSize, PageNum: &pageNumber, InnerFilters: filter, OrderBy: "timestamp:desc"}
requestBody, _ := json.Marshal(request)
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsDetails?customerGUID=%s", armoCivAdaptor.armoAPI.GetAPIURL(), armoCivAdaptor.armoAPI.GetAccountID())
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsDetails?customerGUID=%s", armoCivAdaptor.armoAPI.GetApiURL(), armoCivAdaptor.armoAPI.GetAccountID())
resp, err := armoCivAdaptor.armoAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
if err != nil {
@@ -83,8 +83,7 @@ func (armoCivAdaptor *ArmoCivAdaptor) GetImageVulnerability(imageID *registryvul
}
func (armoCivAdaptor *ArmoCivAdaptor) DescribeAdaptor() string {
// TODO
return ""
return "armo image vulnerabilities scanner, docs: https://hub.armosec.io/docs/configuration-of-image-vulnerabilities"
}
func (armoCivAdaptor *ArmoCivAdaptor) GetImagesInformation(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageInformation, error) {

View File

@@ -14,7 +14,7 @@ func (armoCivAdaptor *ArmoCivAdaptor) getImageLastScanId(imageID *registryvulner
pageNumber := 1
request := V2ListRequest{PageSize: &pageSize, PageNum: &pageNumber, InnerFilters: filter, OrderBy: "timestamp:desc"}
requestBody, _ := json.Marshal(request)
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsSumSummary?customerGUID=%s", armoCivAdaptor.armoAPI.GetAPIURL(), armoCivAdaptor.armoAPI.GetAccountID())
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsSumSummary?customerGUID=%s", armoCivAdaptor.armoAPI.GetApiURL(), armoCivAdaptor.armoAPI.GetAccountID())
resp, err := armoCivAdaptor.armoAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
if err != nil {

View File

@@ -2,15 +2,18 @@ package resourcehandler
import (
"fmt"
"os"
"path/filepath"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/opa-utils/reporthandling"
"k8s.io/apimachinery/pkg/version"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
// FileResourceHandler handle resources from files and URLs
@@ -29,48 +32,139 @@ func NewFileResourceHandler(inputPatterns []string, registryAdaptors *RegistryAd
func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASessionObj, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.ArmoResources, error) {
//
// build resources map
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
k8sResources := setK8sResourceMap(sessionObj.Policies)
allResources := map[string]workloadinterface.IMetadata{}
workloadIDToSource := make(map[string]string, 0)
workloadIDToSource := make(map[string]reporthandling.Source, 0)
armoResources := &cautils.ArmoResources{}
workloads := []workloadinterface.IMetadata{}
// load resource from local file system
sourceToWorkloads, err := cautils.LoadResourcesFromFiles(fileHandler.inputPatterns)
if err != nil {
return nil, allResources, nil, err
if len(fileHandler.inputPatterns) == 0 {
return nil, nil, nil, fmt.Errorf("missing input")
}
for source, ws := range sourceToWorkloads {
workloads = append(workloads, ws...)
for i := range ws {
workloadIDToSource[ws[i].GetID()] = source
}
}
logger.L().Debug("files found in local storage", helpers.Int("files", len(sourceToWorkloads)), helpers.Int("workloads", len(workloads)))
path := fileHandler.inputPatterns[0]
// load resources from url
sourceToWorkloads, err = loadResourcesFromUrl(fileHandler.inputPatterns)
clonedRepo, err := cloneGitRepo(&path)
if err != nil {
return nil, allResources, nil, err
}
if clonedRepo != "" {
defer os.RemoveAll(clonedRepo)
}
// Get repo root
repoRoot := ""
gitRepo, err := cautils.NewLocalGitRepository(path)
if err == nil && gitRepo != nil {
repoRoot, _ = gitRepo.GetRootDir()
}
// load resource from local file system
logger.L().Info("Accessing local objects")
cautils.StartSpinner()
sourceToWorkloads := cautils.LoadResourcesFromFiles(path, repoRoot)
// update workloads and workloadIDToSource
for source, ws := range sourceToWorkloads {
workloads = append(workloads, ws...)
relSource, err := filepath.Rel(repoRoot, source)
if err == nil {
source = relSource
}
var filetype string
if cautils.IsYaml(source) {
filetype = reporthandling.SourceTypeYaml
} else if cautils.IsJson(source) {
filetype = reporthandling.SourceTypeJson
} else {
continue
}
var lastCommit reporthandling.LastCommit
if gitRepo != nil {
commitInfo, _ := gitRepo.GetFileLastCommit(source)
if commitInfo != nil {
lastCommit = reporthandling.LastCommit{
Hash: commitInfo.SHA,
Date: commitInfo.Author.Date,
CommitterName: commitInfo.Author.Name,
CommitterEmail: commitInfo.Author.Email,
Message: commitInfo.Message,
}
}
}
workloadSource := reporthandling.Source{
RelativePath: source,
FileType: filetype,
LastCommit: lastCommit,
}
for i := range ws {
workloadIDToSource[ws[i].GetID()] = source
workloadIDToSource[ws[i].GetID()] = workloadSource
}
}
if len(workloads) == 0 {
logger.L().Debug("files found in local storage", helpers.Int("files", len(sourceToWorkloads)), helpers.Int("workloads", len(workloads)))
}
// load resources from helm charts
helmSourceToWorkloads, helmSourceToChartName := cautils.LoadResourcesFromHelmCharts(path)
for source, ws := range helmSourceToWorkloads {
workloads = append(workloads, ws...)
helmChartName := helmSourceToChartName[source]
relSource, err := filepath.Rel(repoRoot, source)
if err == nil {
source = relSource
}
var lastCommit reporthandling.LastCommit
if gitRepo != nil {
commitInfo, _ := gitRepo.GetFileLastCommit(source)
if commitInfo != nil {
lastCommit = reporthandling.LastCommit{
Hash: commitInfo.SHA,
Date: commitInfo.Author.Date,
CommitterName: commitInfo.Author.Name,
CommitterEmail: commitInfo.Author.Email,
Message: commitInfo.Message,
}
}
}
workloadSource := reporthandling.Source{
RelativePath: source,
FileType: reporthandling.SourceTypeHelmChart,
HelmChartName: helmChartName,
LastCommit: lastCommit,
}
for i := range ws {
workloadIDToSource[ws[i].GetID()] = workloadSource
}
}
if len(helmSourceToWorkloads) > 0 {
logger.L().Debug("helm templates found in local storage", helpers.Int("helmTemplates", len(helmSourceToWorkloads)), helpers.Int("workloads", len(workloads)))
}
// addCommitData(fileHandler.inputPatterns[0], workloadIDToSource)
if len(workloads) == 0 {
return nil, allResources, nil, fmt.Errorf("empty list of workloads - no workloads found")
}
logger.L().Debug("files found in git repo", helpers.Int("files", len(sourceToWorkloads)), helpers.Int("workloads", len(workloads)))
sessionObj.ResourceSource = workloadIDToSource
// map all resources: map["/group/version/kind"][]<k8s workloads>
// map all resources: map["/apiVersion/version/kind"][]<k8s workloads>
mappedResources := mapResources(workloads)
// save only relevant resources
@@ -89,39 +183,12 @@ func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASess
logger.L().Warning("failed to collect images vulnerabilities", helpers.Error(err))
}
return k8sResources, allResources, armoResources, nil
cautils.StopSpinner()
logger.L().Success("Accessed to local objects")
return k8sResources, allResources, armoResources, nil
}
func (fileHandler *FileResourceHandler) GetClusterAPIServerInfo() *version.Info {
return nil
}
// build resources map
func mapResources(workloads []workloadinterface.IMetadata) map[string][]workloadinterface.IMetadata {
allResources := map[string][]workloadinterface.IMetadata{}
for i := range workloads {
groupVersionResource, err := k8sinterface.GetGroupVersionResource(workloads[i].GetKind())
if err != nil {
// TODO - print warning
continue
}
if k8sinterface.IsTypeWorkload(workloads[i].GetObject()) {
w := workloadinterface.NewWorkloadObj(workloads[i].GetObject())
if groupVersionResource.Group != w.GetGroup() || groupVersionResource.Version != w.GetVersion() {
// TODO - print warning
continue
}
}
resourceTriplets := k8sinterface.JoinResourceTriplets(groupVersionResource.Group, groupVersionResource.Version, groupVersionResource.Resource)
if r, ok := allResources[resourceTriplets]; ok {
allResources[resourceTriplets] = append(r, workloads[i])
} else {
allResources[resourceTriplets] = []workloadinterface.IMetadata{workloads[i]}
}
}
return allResources
}

View File

@@ -0,0 +1,86 @@
package resourcehandler
import (
"fmt"
"path/filepath"
giturl "github.com/armosec/go-git-url"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/opa-utils/reporthandling"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
// Clone git repository
func cloneGitRepo(path *string) (string, error) {
var clonedDir string
// Clone git repository if needed
gitURL, err := giturl.NewGitURL(*path)
if err == nil {
logger.L().Info("cloning", helpers.String("repository url", gitURL.GetURL().String()))
cautils.StartSpinner()
clonedDir, err = cloneRepo(gitURL)
cautils.StopSpinner()
if err != nil {
return "", fmt.Errorf("failed to clone git repo '%s', %w", gitURL.GetURL().String(), err)
}
*path = filepath.Join(clonedDir, gitURL.GetPath())
}
return clonedDir, nil
}
// build resources map
func mapResources(workloads []workloadinterface.IMetadata) map[string][]workloadinterface.IMetadata {
allResources := map[string][]workloadinterface.IMetadata{}
for i := range workloads {
groupVersionResource, err := k8sinterface.GetGroupVersionResource(workloads[i].GetKind())
if err != nil {
// TODO - print warning
continue
}
if k8sinterface.IsTypeWorkload(workloads[i].GetObject()) {
w := workloadinterface.NewWorkloadObj(workloads[i].GetObject())
if groupVersionResource.Group != w.GetGroup() || groupVersionResource.Version != w.GetVersion() {
// TODO - print warning
continue
}
}
resourceTriplets := k8sinterface.JoinResourceTriplets(groupVersionResource.Group, groupVersionResource.Version, groupVersionResource.Resource)
if r, ok := allResources[resourceTriplets]; ok {
allResources[resourceTriplets] = append(r, workloads[i])
} else {
allResources[resourceTriplets] = []workloadinterface.IMetadata{workloads[i]}
}
}
return allResources
}
func addCommitData(input string, workloadIDToSource map[string]reporthandling.Source) {
giRepo, err := cautils.NewLocalGitRepository(input)
if err != nil || giRepo == nil {
return
}
for k := range workloadIDToSource {
sourceObj := workloadIDToSource[k]
lastCommit, err := giRepo.GetFileLastCommit(sourceObj.RelativePath)
if err != nil {
continue
}
sourceObj.LastCommit = reporthandling.LastCommit{
Hash: lastCommit.SHA,
Date: lastCommit.Author.Date,
CommitterName: lastCommit.Author.Name,
CommitterEmail: lastCommit.Author.Email,
Message: lastCommit.Message,
}
workloadIDToSource[k] = sourceObj
}
}

View File

@@ -6,11 +6,11 @@ import (
"strings"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
"github.com/armosec/opa-utils/objectsenvelopes"
"github.com/armosec/opa-utils/reporthandling/apis"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"github.com/armosec/k8s-interface/cloudsupport"
"github.com/armosec/k8s-interface/k8sinterface"
@@ -18,6 +18,7 @@ import (
"github.com/armosec/armoapi-go/armotypes"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
k8slabels "k8s.io/apimachinery/pkg/labels"
@@ -85,6 +86,11 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
if len(imgVulnResources) > 0 {
if err := k8sHandler.registryAdaptors.collectImagesVulnerabilities(k8sResourcesMap, allResources, armoResourceMap); err != nil {
logger.L().Warning("failed to collect image vulnerabilities", helpers.Error(err))
cautils.SetInfoMapForResources(fmt.Sprintf("failed to pull image scanning data: %s", err.Error()), imgVulnResources, sessionObj.InfoMap)
} else {
if isEmptyImgVulns(*armoResourceMap) {
cautils.SetInfoMapForResources("image scanning is not configured. for more information: https://hub.armosec.io/docs/configuration-of-image-vulnerabilities", imgVulnResources, sessionObj.InfoMap)
}
}
}
@@ -103,7 +109,7 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
sessionObj.InfoMap = infoMap
}
} else {
cautils.SetInfoMapForResources("enable-host-scan flag not used", hostResources, sessionObj.InfoMap)
cautils.SetInfoMapForResources("enable-host-scan flag not used. For more information: https://hub.armosec.io/docs/host-sensor", hostResources, sessionObj.InfoMap)
}
}
@@ -277,7 +283,7 @@ func getCloudProviderDescription(allResources map[string]workloadinterface.IMeta
if err != nil {
// Return error with useful info on how to configure credentials for getting cloud provider info
logger.L().Debug("failed to get descriptive information", helpers.Error(err))
return provider, fmt.Errorf("failed to get %s descriptive information. Read more: https://hub.armo.cloud/docs/kubescape-integration-with-cloud-providers", strings.ToUpper(provider))
return provider, fmt.Errorf("failed to get %s descriptive information. Read more: https://hub.armosec.io/docs/kubescape-integration-with-cloud-providers", strings.ToUpper(provider))
}
allResources[wl.GetID()] = wl
(*armoResourceMap)[fmt.Sprintf("%s/%s", wl.GetApiVersion(), wl.GetKind())] = []string{wl.GetID()}
@@ -287,17 +293,31 @@ func getCloudProviderDescription(allResources map[string]workloadinterface.IMeta
}
func (k8sHandler *K8sResourceHandler) pullWorkerNodesNumber() (int, error) {
// labels used for control plane
listOptions := metav1.ListOptions{
LabelSelector: "!node-role.kubernetes.io/control-plane,!node-role.kubernetes.io/master",
nodesList, err := k8sHandler.k8s.KubernetesClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
scheduableNodes := v1.NodeList{}
if nodesList != nil {
for _, node := range nodesList.Items {
if len(node.Spec.Taints) == 0 {
scheduableNodes.Items = append(scheduableNodes.Items, node)
} else {
if !isMasterNodeTaints(node.Spec.Taints) {
scheduableNodes.Items = append(scheduableNodes.Items, node)
}
}
}
}
nodesList, err := k8sHandler.k8s.KubernetesClient.CoreV1().Nodes().List(context.TODO(), listOptions)
if err != nil {
return 0, err
}
nodesNumber := 0
if nodesList != nil {
nodesNumber = len(nodesList.Items)
}
return nodesNumber, nil
return len(scheduableNodes.Items), nil
}
// NoSchedule taint with empty value is usually applied to controlplane
func isMasterNodeTaints(taints []v1.Taint) bool {
for _, taint := range taints {
if taint.Effect == v1.TaintEffectNoSchedule && taint.Value == "" {
return true
}
}
return false
}

View File

@@ -0,0 +1,532 @@
package resourcehandler
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
)
func TestIsMasterNodeTaints(t *testing.T) {
noTaintNode := `
{
"apiVersion": "v1",
"kind": "Node",
"metadata": {
"annotations": {
"kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock",
"node.alpha.kubernetes.io/ttl": "0",
"volumes.kubernetes.io/controller-managed-attach-detach": "true"
},
"creationTimestamp": "2022-05-16T10:52:32Z",
"labels": {
"beta.kubernetes.io/arch": "amd64",
"beta.kubernetes.io/os": "linux",
"kubernetes.io/arch": "amd64",
"kubernetes.io/hostname": "danielg-minikube",
"kubernetes.io/os": "linux",
"minikube.k8s.io/commit": "3e64b11ed75e56e4898ea85f96b2e4af0301f43d",
"minikube.k8s.io/name": "danielg-minikube",
"minikube.k8s.io/updated_at": "2022_05_16T13_52_35_0700",
"minikube.k8s.io/version": "v1.25.1",
"node-role.kubernetes.io/control-plane": "",
"node-role.kubernetes.io/master": "",
"node.kubernetes.io/exclude-from-external-load-balancers": ""
},
"name": "danielg-minikube",
"resourceVersion": "9432",
"uid": "fc4afcb6-4ca4-4038-ba54-5e16065a614a"
},
"spec": {
"podCIDR": "10.244.0.0/24",
"podCIDRs": [
"10.244.0.0/24"
]
},
"status": {
"addresses": [
{
"address": "192.168.49.2",
"type": "InternalIP"
},
{
"address": "danielg-minikube",
"type": "Hostname"
}
],
"allocatable": {
"cpu": "4",
"ephemeral-storage": "94850516Ki",
"hugepages-2Mi": "0",
"memory": "10432976Ki",
"pods": "110"
},
"capacity": {
"cpu": "4",
"ephemeral-storage": "94850516Ki",
"hugepages-2Mi": "0",
"memory": "10432976Ki",
"pods": "110"
},
"conditions": [
{
"lastHeartbeatTime": "2022-05-16T14:14:31Z",
"lastTransitionTime": "2022-05-16T10:52:29Z",
"message": "kubelet has sufficient memory available",
"reason": "KubeletHasSufficientMemory",
"status": "False",
"type": "MemoryPressure"
},
{
"lastHeartbeatTime": "2022-05-16T14:14:31Z",
"lastTransitionTime": "2022-05-16T10:52:29Z",
"message": "kubelet has no disk pressure",
"reason": "KubeletHasNoDiskPressure",
"status": "False",
"type": "DiskPressure"
},
{
"lastHeartbeatTime": "2022-05-16T14:14:31Z",
"lastTransitionTime": "2022-05-16T10:52:29Z",
"message": "kubelet has sufficient PID available",
"reason": "KubeletHasSufficientPID",
"status": "False",
"type": "PIDPressure"
},
{
"lastHeartbeatTime": "2022-05-16T14:14:31Z",
"lastTransitionTime": "2022-05-16T10:52:45Z",
"message": "kubelet is posting ready status",
"reason": "KubeletReady",
"status": "True",
"type": "Ready"
}
],
"daemonEndpoints": {
"kubeletEndpoint": {
"Port": 10250
}
},
"images": [
{
"names": [
"requarks/wiki@sha256:dd83fff15e77843ff934b25c28c865ac000edf7653e5d11adad1dd51df87439d"
],
"sizeBytes": 441083858
},
{
"names": [
"mariadb@sha256:821d0411208eaa88f9e1f0daccd1d534f88d19baf724eb9a2777cbedb10b6c66"
],
"sizeBytes": 400782682
},
{
"names": [
"k8s.gcr.io/etcd@sha256:64b9ea357325d5db9f8a723dcf503b5a449177b17ac87d69481e126bb724c263",
"k8s.gcr.io/etcd:3.5.1-0"
],
"sizeBytes": 292558922
},
{
"names": [
"kubernetesui/dashboard@sha256:ec27f462cf1946220f5a9ace416a84a57c18f98c777876a8054405d1428cc92e",
"kubernetesui/dashboard:v2.3.1"
],
"sizeBytes": 220033604
},
{
"names": [
"k8s.gcr.io/kube-apiserver@sha256:f54681a71cce62cbc1b13ebb3dbf1d880f849112789811f98b6aebd2caa2f255",
"k8s.gcr.io/kube-apiserver:v1.23.1"
],
"sizeBytes": 135162256
},
{
"names": [
"k8s.gcr.io/kube-controller-manager@sha256:a7ed87380108a2d811f0d392a3fe87546c85bc366e0d1e024dfa74eb14468604",
"k8s.gcr.io/kube-controller-manager:v1.23.1"
],
"sizeBytes": 124971684
},
{
"names": [
"k8s.gcr.io/kube-proxy@sha256:e40f3a28721588affcf187f3f246d1e078157dabe274003eaa2957a83f7170c8",
"k8s.gcr.io/kube-proxy:v1.23.1"
],
"sizeBytes": 112327826
},
{
"names": [
"quay.io/armosec/kubescape@sha256:6196f766be50d94b45d903a911f5ee95ac99bc392a1324c3e063bec41efd98ba",
"quay.io/armosec/kubescape:v2.0.153"
],
"sizeBytes": 110345054
},
{
"names": [
"nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d"
],
"sizeBytes": 109129446
},
{
"names": [
"quay.io/armosec/action-trigger@sha256:b93707d10ff86aac8dfa42ad37192d6bcf9aceeb4321b21756e438389c26e07c",
"quay.io/armosec/action-trigger:v0.0.5"
],
"sizeBytes": 65127067
},
{
"names": [
"quay.io/armosec/images-vulnerabilities-scan@sha256:a5f9ddc04a7fdce6d52ef85a21f0de567d8e04d418c2bc5bf5d72b151c997625",
"quay.io/armosec/images-vulnerabilities-scan:v0.0.7"
],
"sizeBytes": 61446712
},
{
"names": [
"quay.io/armosec/images-vulnerabilities-scan@sha256:2f879858da89f6542e3223fb18d6d793810cc2ad6e398b66776475e4218b6af5",
"quay.io/armosec/images-vulnerabilities-scan:v0.0.8"
],
"sizeBytes": 61446528
},
{
"names": [
"quay.io/armosec/cluster-collector@sha256:2c4f733d09f7f4090ace04585230bdfacbbc29a3ade38a2e1233d2c0f730d9b6",
"quay.io/armosec/cluster-collector:v0.0.9"
],
"sizeBytes": 53699576
},
{
"names": [
"k8s.gcr.io/kube-scheduler@sha256:8be4eb1593cf9ff2d91b44596633b7815a3753696031a1eb4273d1b39427fa8c",
"k8s.gcr.io/kube-scheduler:v1.23.1"
],
"sizeBytes": 53488305
},
{
"names": [
"k8s.gcr.io/coredns/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e",
"k8s.gcr.io/coredns/coredns:v1.8.6"
],
"sizeBytes": 46829283
},
{
"names": [
"kubernetesui/metrics-scraper@sha256:36d5b3f60e1a144cc5ada820910535074bdf5cf73fb70d1ff1681537eef4e172",
"kubernetesui/metrics-scraper:v1.0.7"
],
"sizeBytes": 34446077
},
{
"names": [
"gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944",
"gcr.io/k8s-minikube/storage-provisioner:v5"
],
"sizeBytes": 31465472
},
{
"names": [
"quay.io/armosec/notification-server@sha256:b6e9b296cd53bd3b2b42c516d8ab43db998acff1124a57aff8d66b3dd7881979",
"quay.io/armosec/notification-server:v0.0.3"
],
"sizeBytes": 20209940
},
{
"names": [
"quay.io/armosec/kube-host-sensor@sha256:82139d2561039726be060df2878ef023c59df7c536fbd7f6d766af5a99569fee",
"quay.io/armosec/kube-host-sensor:latest"
],
"sizeBytes": 11796788
},
{
"names": [
"k8s.gcr.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db",
"k8s.gcr.io/pause:3.6"
],
"sizeBytes": 682696
}
],
"nodeInfo": {
"architecture": "amd64",
"bootID": "828cbe73-120b-43cf-aae0-9e2d15b8c873",
"containerRuntimeVersion": "docker://20.10.12",
"kernelVersion": "5.13.0-40-generic",
"kubeProxyVersion": "v1.23.1",
"kubeletVersion": "v1.23.1",
"machineID": "8de776e053e140d6a14c2d2def3d6bb8",
"operatingSystem": "linux",
"osImage": "Ubuntu 20.04.2 LTS",
"systemUUID": "da12dc19-10bf-4033-a440-2d9aa33d6fe3"
}
}
}
`
var l v1.Node
_ = json.Unmarshal([]byte(noTaintNode), &l)
assert.False(t, isMasterNodeTaints(l.Spec.Taints))
taintNode :=
`
{
"apiVersion": "v1",
"kind": "Node",
"metadata": {
"annotations": {
"kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock",
"node.alpha.kubernetes.io/ttl": "0",
"volumes.kubernetes.io/controller-managed-attach-detach": "true"
},
"creationTimestamp": "2022-05-16T10:52:32Z",
"labels": {
"beta.kubernetes.io/arch": "amd64",
"beta.kubernetes.io/os": "linux",
"kubernetes.io/arch": "amd64",
"kubernetes.io/hostname": "danielg-minikube",
"kubernetes.io/os": "linux",
"minikube.k8s.io/commit": "3e64b11ed75e56e4898ea85f96b2e4af0301f43d",
"minikube.k8s.io/name": "danielg-minikube",
"minikube.k8s.io/updated_at": "2022_05_16T13_52_35_0700",
"minikube.k8s.io/version": "v1.25.1",
"node-role.kubernetes.io/control-plane": "",
"node-role.kubernetes.io/master": "",
"node.kubernetes.io/exclude-from-external-load-balancers": ""
},
"name": "danielg-minikube",
"resourceVersion": "9871",
"uid": "fc4afcb6-4ca4-4038-ba54-5e16065a614a"
},
"spec": {
"podCIDR": "10.244.0.0/24",
"podCIDRs": [
"10.244.0.0/24"
],
"taints": [
{
"effect": "NoSchedule",
"key": "key1",
"value": ""
}
]
},
"status": {
"addresses": [
{
"address": "192.168.49.2",
"type": "InternalIP"
},
{
"address": "danielg-minikube",
"type": "Hostname"
}
],
"allocatable": {
"cpu": "4",
"ephemeral-storage": "94850516Ki",
"hugepages-2Mi": "0",
"memory": "10432976Ki",
"pods": "110"
},
"capacity": {
"cpu": "4",
"ephemeral-storage": "94850516Ki",
"hugepages-2Mi": "0",
"memory": "10432976Ki",
"pods": "110"
},
"conditions": [
{
"lastHeartbeatTime": "2022-05-16T14:24:45Z",
"lastTransitionTime": "2022-05-16T10:52:29Z",
"message": "kubelet has sufficient memory available",
"reason": "KubeletHasSufficientMemory",
"status": "False",
"type": "MemoryPressure"
},
{
"lastHeartbeatTime": "2022-05-16T14:24:45Z",
"lastTransitionTime": "2022-05-16T10:52:29Z",
"message": "kubelet has no disk pressure",
"reason": "KubeletHasNoDiskPressure",
"status": "False",
"type": "DiskPressure"
},
{
"lastHeartbeatTime": "2022-05-16T14:24:45Z",
"lastTransitionTime": "2022-05-16T10:52:29Z",
"message": "kubelet has sufficient PID available",
"reason": "KubeletHasSufficientPID",
"status": "False",
"type": "PIDPressure"
},
{
"lastHeartbeatTime": "2022-05-16T14:24:45Z",
"lastTransitionTime": "2022-05-16T10:52:45Z",
"message": "kubelet is posting ready status",
"reason": "KubeletReady",
"status": "True",
"type": "Ready"
}
],
"daemonEndpoints": {
"kubeletEndpoint": {
"Port": 10250
}
},
"images": [
{
"names": [
"requarks/wiki@sha256:dd83fff15e77843ff934b25c28c865ac000edf7653e5d11adad1dd51df87439d"
],
"sizeBytes": 441083858
},
{
"names": [
"mariadb@sha256:821d0411208eaa88f9e1f0daccd1d534f88d19baf724eb9a2777cbedb10b6c66"
],
"sizeBytes": 400782682
},
{
"names": [
"k8s.gcr.io/etcd@sha256:64b9ea357325d5db9f8a723dcf503b5a449177b17ac87d69481e126bb724c263",
"k8s.gcr.io/etcd:3.5.1-0"
],
"sizeBytes": 292558922
},
{
"names": [
"kubernetesui/dashboard@sha256:ec27f462cf1946220f5a9ace416a84a57c18f98c777876a8054405d1428cc92e",
"kubernetesui/dashboard:v2.3.1"
],
"sizeBytes": 220033604
},
{
"names": [
"k8s.gcr.io/kube-apiserver@sha256:f54681a71cce62cbc1b13ebb3dbf1d880f849112789811f98b6aebd2caa2f255",
"k8s.gcr.io/kube-apiserver:v1.23.1"
],
"sizeBytes": 135162256
},
{
"names": [
"k8s.gcr.io/kube-controller-manager@sha256:a7ed87380108a2d811f0d392a3fe87546c85bc366e0d1e024dfa74eb14468604",
"k8s.gcr.io/kube-controller-manager:v1.23.1"
],
"sizeBytes": 124971684
},
{
"names": [
"k8s.gcr.io/kube-proxy@sha256:e40f3a28721588affcf187f3f246d1e078157dabe274003eaa2957a83f7170c8",
"k8s.gcr.io/kube-proxy:v1.23.1"
],
"sizeBytes": 112327826
},
{
"names": [
"quay.io/armosec/kubescape@sha256:6196f766be50d94b45d903a911f5ee95ac99bc392a1324c3e063bec41efd98ba",
"quay.io/armosec/kubescape:v2.0.153"
],
"sizeBytes": 110345054
},
{
"names": [
"nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d"
],
"sizeBytes": 109129446
},
{
"names": [
"quay.io/armosec/action-trigger@sha256:b93707d10ff86aac8dfa42ad37192d6bcf9aceeb4321b21756e438389c26e07c",
"quay.io/armosec/action-trigger:v0.0.5"
],
"sizeBytes": 65127067
},
{
"names": [
"quay.io/armosec/images-vulnerabilities-scan@sha256:a5f9ddc04a7fdce6d52ef85a21f0de567d8e04d418c2bc5bf5d72b151c997625",
"quay.io/armosec/images-vulnerabilities-scan:v0.0.7"
],
"sizeBytes": 61446712
},
{
"names": [
"quay.io/armosec/images-vulnerabilities-scan@sha256:2f879858da89f6542e3223fb18d6d793810cc2ad6e398b66776475e4218b6af5",
"quay.io/armosec/images-vulnerabilities-scan:v0.0.8"
],
"sizeBytes": 61446528
},
{
"names": [
"quay.io/armosec/cluster-collector@sha256:2c4f733d09f7f4090ace04585230bdfacbbc29a3ade38a2e1233d2c0f730d9b6",
"quay.io/armosec/cluster-collector:v0.0.9"
],
"sizeBytes": 53699576
},
{
"names": [
"k8s.gcr.io/kube-scheduler@sha256:8be4eb1593cf9ff2d91b44596633b7815a3753696031a1eb4273d1b39427fa8c",
"k8s.gcr.io/kube-scheduler:v1.23.1"
],
"sizeBytes": 53488305
},
{
"names": [
"k8s.gcr.io/coredns/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e",
"k8s.gcr.io/coredns/coredns:v1.8.6"
],
"sizeBytes": 46829283
},
{
"names": [
"kubernetesui/metrics-scraper@sha256:36d5b3f60e1a144cc5ada820910535074bdf5cf73fb70d1ff1681537eef4e172",
"kubernetesui/metrics-scraper:v1.0.7"
],
"sizeBytes": 34446077
},
{
"names": [
"gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944",
"gcr.io/k8s-minikube/storage-provisioner:v5"
],
"sizeBytes": 31465472
},
{
"names": [
"quay.io/armosec/notification-server@sha256:b6e9b296cd53bd3b2b42c516d8ab43db998acff1124a57aff8d66b3dd7881979",
"quay.io/armosec/notification-server:v0.0.3"
],
"sizeBytes": 20209940
},
{
"names": [
"quay.io/armosec/kube-host-sensor@sha256:82139d2561039726be060df2878ef023c59df7c536fbd7f6d766af5a99569fee",
"quay.io/armosec/kube-host-sensor:latest"
],
"sizeBytes": 11796788
},
{
"names": [
"k8s.gcr.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db",
"k8s.gcr.io/pause:3.6"
],
"sizeBytes": 682696
}
],
"nodeInfo": {
"architecture": "amd64",
"bootID": "828cbe73-120b-43cf-aae0-9e2d15b8c873",
"containerRuntimeVersion": "docker://20.10.12",
"kernelVersion": "5.13.0-40-generic",
"kubeProxyVersion": "v1.23.1",
"kubeletVersion": "v1.23.1",
"machineID": "8de776e053e140d6a14c2d2def3d6bb8",
"operatingSystem": "linux",
"osImage": "Ubuntu 20.04.2 LTS",
"systemUUID": "da12dc19-10bf-4033-a440-2d9aa33d6fe3"
}
}
}
`
_ = json.Unmarshal([]byte(taintNode), &l)
assert.True(t, isMasterNodeTaints(l.Spec.Taints))
}

View File

@@ -4,7 +4,6 @@ import (
"strings"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
"github.com/armosec/opa-utils/reporthandling"
"k8s.io/utils/strings/slices"
@@ -12,12 +11,47 @@ import (
)
var (
ClusterDescribe = "ClusterDescribe"
ClusterDescribe = "ClusterDescribe"
KubeletConfiguration = "KubeletConfiguration"
OsReleaseFile = "OsReleaseFile"
KernelVersion = "KernelVersion"
LinuxSecurityHardeningStatus = "LinuxSecurityHardeningStatus"
OpenPortsList = "OpenPortsList"
LinuxKernelVariables = "LinuxKernelVariables"
KubeletCommandLine = "KubeletCommandLine"
ImageVulnerabilities = "ImageVulnerabilities"
KubeletInfo = "KubeletInfo"
KubeProxyInfo = "KubeProxyInfo"
MapResourceToApiGroup = map[string]string{
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
OsReleaseFile: "hostdata.kubescape.cloud/v1beta0",
KubeletCommandLine: "hostdata.kubescape.cloud/v1beta0",
KernelVersion: "hostdata.kubescape.cloud/v1beta0",
LinuxSecurityHardeningStatus: "hostdata.kubescape.cloud/v1beta0",
OpenPortsList: "hostdata.kubescape.cloud/v1beta0",
LinuxKernelVariables: "hostdata.kubescape.cloud/v1beta0",
KubeletInfo: "hostdata.kubescape.cloud/v1beta0",
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
}
MapResourceToApiGroupVuln = map[string][]string{
ImageVulnerabilities: {"armo.vuln.images/v1", "image.vulnscan.com/v1"}}
MapResourceToApiGroupCloud = map[string][]string{
ClusterDescribe: {"container.googleapis.com/v1", "eks.amazonaws.com/v1", "management.azure.com/v1"}}
)
func isEmptyImgVulns(armoResourcesMap cautils.ArmoResources) bool {
imgVulnResources := cautils.MapImageVulnResources(&armoResourcesMap)
for _, resource := range imgVulnResources {
if val, ok := armoResourcesMap[resource]; ok {
if len(val) > 0 {
return false
}
}
}
return true
}
func setK8sResourceMap(frameworks []reporthandling.Framework) *cautils.K8SResources {
k8sResources := make(cautils.K8SResources)
complexMap := setComplexK8sResourceMap(frameworks)
@@ -80,10 +114,16 @@ func setComplexArmoResourceMap(frameworks []reporthandling.Framework, resourceTo
}
func mapArmoResourceToApiGroup(resource string) []string {
if val, ok := hostsensorutils.MapResourceToApiGroup[resource]; ok {
if val, ok := MapResourceToApiGroup[resource]; ok {
return []string{val}
}
return MapResourceToApiGroupCloud[resource]
if val, ok := MapResourceToApiGroupCloud[resource]; ok {
return val
}
if val, ok := MapResourceToApiGroupVuln[resource]; ok {
return val
}
return []string{}
}
func insertControls(resource string, resourceToControl map[string][]string, control reporthandling.Control) {

View File

@@ -2,7 +2,9 @@ package resourcehandler
import (
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/opa-utils/reporthandling"
"github.com/stretchr/testify/assert"
"testing"
)
@@ -24,6 +26,18 @@ func TestSetResourceMap(t *testing.T) {
}
}
func TestSsEmptyImgVulns(t *testing.T) {
armoResourcesMap := make(cautils.ArmoResources, 0)
armoResourcesMap["container.googleapis.com/v1"] = []string{"fsdfds"}
assert.Equal(t, true, isEmptyImgVulns(armoResourcesMap))
armoResourcesMap["armo.vuln.images/v1/ImageVulnerabilities"] = []string{"dada"}
assert.Equal(t, false, isEmptyImgVulns(armoResourcesMap))
armoResourcesMap["armo.vuln.images/v1/ImageVulnerabilities"] = []string{}
armoResourcesMap["bla"] = []string{"blu"}
assert.Equal(t, true, isEmptyImgVulns(armoResourcesMap))
}
func TestInsertK8sResources(t *testing.T) {
// insertK8sResources

View File

@@ -1,14 +1,15 @@
package resourcehandler
import (
"fmt"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
armosecadaptorv1 "github.com/armosec/kubescape/v2/core/pkg/registryadaptors/armosec/v1"
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
logger "github.com/dwertent/go-logger"
"github.com/armosec/opa-utils/shared"
)
@@ -45,8 +46,9 @@ func (registryAdaptors *RegistryAdaptors) collectImagesVulnerabilities(k8sResour
for i := range registryAdaptors.adaptors { // login and and get vulnerabilities
if err := registryAdaptors.adaptors[i].Login(); err != nil {
logger.L().Error("failed to login", helpers.Error(err))
continue
if err != nil {
return fmt.Errorf("failed to login, adaptor: '%s', reason: '%s'", registryAdaptors.adaptors[i].DescribeAdaptor(), err.Error())
}
}
vulnerabilities, err := registryAdaptors.adaptors[i].GetImagesVulnerabilities(imagesIdentifiers)
if err != nil {
@@ -60,6 +62,10 @@ func (registryAdaptors *RegistryAdaptors) collectImagesVulnerabilities(k8sResour
// convert result to IMetadata object
metaObjs := vulnerabilitiesToIMetadata(imagesVulnerability)
if len(metaObjs) == 0 {
return fmt.Errorf("no vulnerabilities found for any of the images")
}
// save in resources map
for i := range metaObjs {
allResources[metaObjs[i].GetID()] = metaObjs[i]

View File

@@ -0,0 +1,36 @@
package resourcehandler
import (
"fmt"
"os"
giturl "github.com/armosec/go-git-url"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
)
// cloneRepo clones a repository to a local temporary directory and returns the directory
func cloneRepo(gitURL giturl.IGitURL) (string, error) {
// Create temp directory
tmpDir, err := os.MkdirTemp("", "")
if err != nil {
return "", fmt.Errorf("failed to create temporary directory: %w", err)
}
// Clone option
cloneURL := gitURL.GetHttpCloneURL()
cloneOpts := git.CloneOptions{URL: cloneURL}
if gitURL.GetBranchName() != "" {
cloneOpts.ReferenceName = plumbing.NewBranchReferenceName(gitURL.GetBranchName())
cloneOpts.SingleBranch = true
}
// Actual clone
_, err = git.PlainClone(tmpDir, false, &cloneOpts)
if err != nil {
return "", fmt.Errorf("failed to clone %s. %w", gitURL.GetRepoName(), err)
}
return tmpDir, nil
}

View File

@@ -4,15 +4,15 @@ import (
giturl "github.com/armosec/go-git-url"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
func loadResourcesFromUrl(inputPatterns []string) (map[string][]workloadinterface.IMetadata, error) {
if len(inputPatterns) == 0 {
return nil, nil
}
g, err := giturl.NewGitURL(inputPatterns[0])
g, err := giturl.NewGitAPI(inputPatterns[0])
if err != nil {
return nil, nil
}
@@ -33,7 +33,7 @@ func loadResourcesFromUrl(inputPatterns []string) (map[string][]workloadinterfac
for i, j := range files {
w, e := cautils.ReadFile(j, cautils.GetFileFormat(i))
if len(e) != 0 || len(w) == 0 {
if e != nil || len(w) == 0 {
continue
}
if _, ok := workloads[i]; !ok {

View File

@@ -6,7 +6,7 @@ import (
"path/filepath"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
logger "github.com/dwertent/go-logger"
)
var INDENT = " "
@@ -17,6 +17,7 @@ const (
JunitResultFormat string = "junit"
PrometheusFormat string = "prometheus"
PdfFormat string = "pdf"
HtmlFormat string = "html"
)
type IPrinter interface {

View File

@@ -6,8 +6,8 @@ import (
"os"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
logger "github.com/dwertent/go-logger"
)
type JsonPrinter struct {

View File

@@ -6,9 +6,9 @@ import (
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
"github.com/armosec/opa-utils/reporthandling"
logger "github.com/dwertent/go-logger"
)
type PrometheusPrinter struct {

View File

@@ -0,0 +1,154 @@
<!DOCTYPE html>
<html lang="en-US">
<head>
<meta charset="UTF-8">
<title>Kubescape Scan Report</title>
</head>
<style>
:root {
--cell-padding-vertical: 0.25em;
--cell-padding-horizontal: 0.25em;
--font-family-sans: system-ui, -apple-system, sans-serif;
}
body {
max-width: 60em;
margin: auto;
font-family: var(--font-family-sans);
}
table {
width: 100%;
border-top: 0.1em solid black;
border-bottom: 0.1em solid black;
border-collapse: collapse;
table-layout: fixed;
}
th {
text-align: left;
}
td, th {
padding-top: var(--cell-padding-vertical);
padding-bottom: var(--cell-padding-vertical);
padding-right: var(--cell-padding-horizontal);
vertical-align: top;
}
td > p {
margin: 0;
word-break: break-all;
hyphens: auto;
}
thead {
border-bottom: 0.01em solid black;
}
.numericCell {
text-align: right;
}
.controlSeverityCell {
width: 10%;
}
.controlNameCell {
width: 50%;
}
.controlRiskCell {
width: 10%;
}
.resourceSeverityCell {
width: 10%;
}
.resourceNameCell {
width: 30%;
}
.resourceURLCell {
width: 10%;
}
.resourceRemediationCell {
width: 50%;
}
.logo {
width: 25%;
float: right;
}
</style>
<body>
<img class="logo" src="https://raw.githubusercontent.com/armosec/kubescape/master/core/pkg/resultshandling/printer/v2/pdf/logo.png">
<h1>Kubescape Scan Report</h1>
{{ with .OPASessionObj.Report.SummaryDetails }}
<h2>By Controls</h2>
<h3>Summary</h3>
<table>
<thead>
<tr>
<th>All</th>
<th>Failed</th>
<th>Excluded</th>
<th>Skipped</th>
</tr>
</thead>
<tbody>
<tr>
<td>{{ .NumberOfControls.All }}</td>
<td>{{ .NumberOfControls.Failed }}</td>
<td>{{ .NumberOfControls.Excluded }}</td>
<td>{{ .NumberOfControls.Skipped }}</td>
</tr>
</tbody>
</table>
<h3>Details</h3>
<table>
<thead>
<tr>
<th class="controlSeverityCell">Severity</th>
<th class="controlNameCell">Control Name</th>
<th class="controlRiskCell">Failed Resources</th>
<th class="controlRiskCell">Excluded Resources</th>
<th class="controlRiskCell">All Resources</th>
<th class="controlRiskCell">Risk Score, %</th>
</tr>
</thead>
<tbody>
{{ $sorted := sortBySeverityName .Controls }}
{{ range $control := $sorted }}
<tr>
<td class="controlSeverityCell">{{ controlSeverityToString $control.ScoreFactor }}</td>
<td class="controlNameCell">{{ $control.Name }}</td>
<td class="controlRiskCell numericCell">{{ $control.ResourceCounters.FailedResources }}</td>
<td class="controlRiskCell numericCell">{{ $control.ResourceCounters.ExcludedResources }}</td>
<td class="controlRiskCell numericCell">{{ sum $control.ResourceCounters.ExcludedResources $control.ResourceCounters.FailedResources $control.ResourceCounters.PassedResources }}</td>
<td class="controlRiskCell numericCell">{{ float32ToInt $control.Score }}</td>
</tr>
</tr>
{{ end }}
<tbody>
</table>
{{ end }}
<h2>By Resource</h2>
{{ $sortedResourceTableView := sortByNamespace .ResourceTableView }}
{{ range $sortedResourceTableView }}
<h3>Name: {{ .Resource.GetName }}</h3>
<p>ApiVersion: {{ .Resource.GetApiVersion }}</p>
<p>Kind: {{ .Resource.GetKind }}</p>
<p>Name: {{ .Resource.GetName }}</p>
<p>Namespace: {{ .Resource.GetNamespace }}</p>
<table>
<thead>
<tr>
<th class="resourceSeverityCell">Severity</th>
<th class="resourceNameCell">Name</th>
<th class="resourceURLCell">Docs</th>
<th class="resourceRemediationCell">Assistant Remediation</th>
</tr>
</thead>
<tbody>
{{ range .ControlsResult }}
<tr>
<td class="resourceSeverityCell">{{ .Severity }}</td>
<td class="resourceNameCell">{{ .Name }}</td>
<td class="resourceURLCell"><a href="https://hub.armosec.io/docs/{{ lower .URL }}">{{ .URL }}</a></td>
<td class="resourceRemediationCell">{{ range .FailedPaths }} <p>{{ . }}</p> {{ end }}</td>
</tr>
{{ end }}
</tbody>
</table>
</div>
{{ end }}
</body>
</html>

View File

@@ -0,0 +1,151 @@
package v2
import (
_ "embed"
"html/template"
"os"
"path/filepath"
"sort"
"strings"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
"github.com/armosec/opa-utils/reporthandling/apis"
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
const (
htmlOutputFile = "report"
htmlOutputExt = ".html"
)
//go:embed html/report.gohtml
var reportTemplate string
type HTMLReportingCtx struct {
OPASessionObj *cautils.OPASessionObj
ResourceTableView ResourceTableView
}
type HtmlPrinter struct {
writer *os.File
}
func NewHtmlPrinter() *HtmlPrinter {
return &HtmlPrinter{}
}
func (htmlPrinter *HtmlPrinter) SetWriter(outputFile string) {
if outputFile == "" {
outputFile = htmlOutputFile
}
if filepath.Ext(strings.TrimSpace(outputFile)) != htmlOutputExt {
outputFile = outputFile + htmlOutputExt
}
htmlPrinter.writer = printer.GetWriter(outputFile)
}
func (htmlPrinter *HtmlPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
tplFuncMap := template.FuncMap{
"sum": func(nums ...int) int {
total := 0
for _, n := range nums {
total += n
}
return total
},
"float32ToInt": cautils.Float32ToInt,
"lower": strings.ToLower,
"sortByNamespace": func(resourceTableView ResourceTableView) ResourceTableView {
sortedResourceTableView := make(ResourceTableView, len(resourceTableView))
copy(sortedResourceTableView, resourceTableView)
sort.SliceStable(
sortedResourceTableView,
func(i, j int) bool {
return sortedResourceTableView[i].Resource.GetNamespace() < sortedResourceTableView[j].Resource.GetNamespace()
},
)
return sortedResourceTableView
},
"controlSeverityToString": apis.ControlSeverityToString,
"sortBySeverityName": func(controlSummaries map[string]reportsummary.ControlSummary) []reportsummary.ControlSummary {
sortedSlice := make([]reportsummary.ControlSummary, 0, len(controlSummaries))
for _, val := range controlSummaries {
sortedSlice = append(sortedSlice, val)
}
sort.SliceStable(
sortedSlice,
func(i, j int) bool {
//First sort by Severity descending
iSeverity := apis.ControlSeverityToInt(sortedSlice[i].GetScoreFactor())
jSeverity := apis.ControlSeverityToInt(sortedSlice[j].GetScoreFactor())
if iSeverity > jSeverity {
return true
}
if iSeverity < jSeverity {
return false
}
//And then by Name ascending
return sortedSlice[i].GetName() < sortedSlice[j].GetName()
},
)
return sortedSlice
},
}
tpl := template.Must(
template.New("htmlReport").Funcs(tplFuncMap).Parse(reportTemplate),
)
resourceTableView := buildResourceTableView(opaSessionObj)
reportingCtx := HTMLReportingCtx{opaSessionObj, resourceTableView}
err := tpl.Execute(htmlPrinter.writer, reportingCtx)
if err != nil {
logger.L().Error("failed to render template", helpers.Error(err))
}
}
func (htmlPrinter *HtmlPrinter) Score(score float32) {
return
}
func buildResourceTableView(opaSessionObj *cautils.OPASessionObj) ResourceTableView {
resourceTableView := make(ResourceTableView, 0)
for resourceID, result := range opaSessionObj.ResourcesResult {
if result.GetStatus(nil).IsFailed() {
resource := opaSessionObj.AllResources[resourceID]
ctlResults := buildResourceControlResultTable(result.AssociatedControls, &opaSessionObj.Report.SummaryDetails)
resourceTableView = append(resourceTableView, ResourceResult{resource, ctlResults})
}
}
return resourceTableView
}
func buildResourceControlResult(resourceControl resourcesresults.ResourceAssociatedControl, control reportsummary.IControlSummary) ResourceControlResult {
ctlSeverity := apis.ControlSeverityToString(control.GetScoreFactor())
ctlName := resourceControl.GetName()
ctlURL := resourceControl.GetID()
failedPaths := failedPathsToString(&resourceControl)
return ResourceControlResult{ctlSeverity, ctlName, ctlURL, failedPaths}
}
func buildResourceControlResultTable(resourceControls []resourcesresults.ResourceAssociatedControl, summaryDetails *reportsummary.SummaryDetails) []ResourceControlResult {
var ctlResults []ResourceControlResult
for _, resourceControl := range resourceControls {
if resourceControl.GetStatus(nil).IsFailed() {
control := summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, resourceControl.GetName())
ctlResult := buildResourceControlResult(resourceControl, control)
ctlResults = append(ctlResults, ctlResult)
}
}
return ctlResults
}

View File

@@ -6,9 +6,9 @@ import (
"os"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
type JsonPrinter struct {
@@ -28,7 +28,7 @@ func (jsonPrinter *JsonPrinter) Score(score float32) {
}
func (jsonPrinter *JsonPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
r, err := json.Marshal(DataToJson(opaSessionObj))
r, err := json.Marshal(FinalizeResults(opaSessionObj))
if err != nil {
logger.L().Fatal("failed to Marshal posture report object")
}

View File

@@ -9,11 +9,11 @@ import (
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/armosec/opa-utils/shared"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
/*
@@ -49,7 +49,7 @@ type JUnitTestSuite struct {
XMLName xml.Name `xml:"testsuite"`
Name string `xml:"name,attr"` // Full (class) name of the test for non-aggregated testsuite documents. Class name without the package for aggregated testsuites documents. Required
Disabled int `xml:"disabled,attr"` // The total number of disabled tests in the suite. optional. not supported by maven surefire.
Errors int `xml:"errors,attr"` // The total number of tests in the suite that errored
Errors int `xml:"errors,attr"` // The total number of tests in the suite that errors
Failures int `xml:"failures,attr"` // The total number of tests in the suite that failed
Hostname string `xml:"hostname,attr"` // Host on which the tests were executed ? cluster name ?
ID int `xml:"id,attr"` // Starts at 0 for the first testsuite and is incremented by 1 for each following testsuite
@@ -181,7 +181,7 @@ func testsCases(results *cautils.OPASessionObj, controls reportsummary.IControls
testCase.Failure = &testCaseFailure
} else if control.GetStatus().IsSkipped() {
testCase.SkipMessage = &JUnitSkipMessage{
Message: "", // TODO - fill after statusInfo is supportred
Message: "", // TODO - fill after statusInfo is supported
}
}

View File

@@ -10,10 +10,11 @@ import (
"time"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"github.com/johnfercher/maroto/pkg/color"
"github.com/johnfercher/maroto/pkg/consts"
"github.com/johnfercher/maroto/pkg/pdf"

View File

@@ -33,7 +33,7 @@ func NewPrettyPrinter(verboseMode bool, formatVersion string, viewType cautils.V
}
func (prettyPrinter *PrettyPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
fmt.Fprintf(prettyPrinter.writer, "\n"+getSperator("^")+"\n")
fmt.Fprintf(prettyPrinter.writer, "\n"+getSeparator("^")+"\n")
sortedControlNames := getSortedControlsNames(opaSessionObj.Report.SummaryDetails.Controls) // ListControls().All())
@@ -188,6 +188,10 @@ func generateFooter(summaryDetails *reportsummary.SummaryDetails) []string {
}
func (prettyPrinter *PrettyPrinter) printSummaryTable(summaryDetails *reportsummary.SummaryDetails, sortedControlNames [][]string) {
if summaryDetails.NumberOfControls().All() == 0 {
fmt.Fprintf(prettyPrinter.writer, "\nKubescape did not scan any of the resources, make sure you are scanning valid kubernetes manifests (Deployments, Pods, etc.)\n")
return
}
cautils.InfoTextDisplay(prettyPrinter.writer, "\n"+controlCountersForSummary(summaryDetails.NumberOfControls())+"\n\n")
summaryTable := tablewriter.NewWriter(prettyPrinter.writer)
@@ -196,10 +200,16 @@ func (prettyPrinter *PrettyPrinter) printSummaryTable(summaryDetails *reportsumm
summaryTable.SetHeaderLine(true)
summaryTable.SetColumnAlignment(getColumnsAlignments())
printAll := prettyPrinter.verboseMode
if summaryDetails.NumberOfResources().Failed() == 0 {
// if there are no failed controls, print the resource table and detailed information
printAll = true
}
infoToPrintInfo := mapInfoToPrintInfo(summaryDetails.Controls)
for i := len(sortedControlNames) - 1; i >= 0; i-- {
for _, c := range sortedControlNames[i] {
row := generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, c), infoToPrintInfo, prettyPrinter.verboseMode)
row := generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, c), infoToPrintInfo, printAll)
if len(row) > 0 {
summaryTable.Append(row)
}
@@ -243,7 +253,7 @@ func frameworksScoresToString(frameworks []reportsummary.IFrameworkSummary) stri
}
func getControlLink(controlID string) string {
return fmt.Sprintf("https://hub.armo.cloud/docs/%s", strings.ToLower(controlID))
return fmt.Sprintf("https://hub.armosec.io/docs/%s", strings.ToLower(controlID))
}
func controlCountersForSummary(counters reportsummary.ICounters) string {
@@ -253,7 +263,7 @@ func controlCountersForSummary(counters reportsummary.ICounters) string {
func controlCountersForResource(l *helpersv1.AllLists) string {
return fmt.Sprintf("Controls: %d (Failed: %d, Excluded: %d)", len(l.All()), len(l.Failed()), len(l.Excluded()))
}
func getSperator(sep string) string {
func getSeparator(sep string) string {
s := ""
for i := 0; i < 80; i++ {
s += sep

View File

@@ -6,11 +6,11 @@ import (
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
type PrometheusPrinter struct {

View File

@@ -0,0 +1,19 @@
package v2
import (
"github.com/armosec/k8s-interface/workloadinterface"
)
type ResourceTableView []ResourceResult
type ResourceResult struct {
Resource workloadinterface.IMetadata
ControlsResult []ResourceControlResult
}
type ResourceControlResult struct {
Severity string
Name string
URL string
FailedPaths []string
}

View File

@@ -29,10 +29,10 @@ func (prettyPrinter *PrettyPrinter) resourceTable(opaSessionObj *cautils.OPASess
if !ok {
continue
}
fmt.Fprintf(prettyPrinter.writer, "\n"+getSperator("#")+"\n\n")
fmt.Fprintf(prettyPrinter.writer, "\n%s\n", getSeparator("#"))
if source, ok := opaSessionObj.ResourceSource[resourceID]; ok {
fmt.Fprintf(prettyPrinter.writer, "Source: %s\n", source)
fmt.Fprintf(prettyPrinter.writer, "Source: %s\n", source.RelativePath)
}
fmt.Fprintf(prettyPrinter.writer, "ApiVersion: %s\n", resource.GetApiVersion())
fmt.Fprintf(prettyPrinter.writer, "Kind: %s\n", resource.GetKind())
@@ -73,7 +73,7 @@ func generateResourceRows(controls []resourcesresults.ResourceAssociatedControl,
continue
}
row[resourceColumnURL] = fmt.Sprintf("https://hub.armo.cloud/docs/%s", strings.ToLower(controls[i].GetID()))
row[resourceColumnURL] = fmt.Sprintf("https://hub.armosec.io/docs/%s", strings.ToLower(controls[i].GetID()))
row[resourceColumnPath] = strings.Join(failedPathsToString(&controls[i]), "\n")
row[resourceColumnName] = controls[i].GetName()

View File

@@ -3,16 +3,16 @@ package v2
import (
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
)
// finalizeV2Report finalize the results objects by copying data from map to lists
func DataToJson(data *cautils.OPASessionObj) *reporthandlingv2.PostureReport {
func FinalizeResults(data *cautils.OPASessionObj) *reporthandlingv2.PostureReport {
report := reporthandlingv2.PostureReport{
SummaryDetails: data.Report.SummaryDetails,
ClusterAPIServerInfo: data.Report.ClusterAPIServerInfo,
@@ -62,13 +62,13 @@ func mapInfoToPrintInfo(controls reportsummary.ControlSummaries) []infoStars {
return infoToPrintInfo
}
func finalizeResources(results []resourcesresults.Result, allResources map[string]workloadinterface.IMetadata, resourcesSource map[string]string) []reporthandling.Resource {
func finalizeResources(results []resourcesresults.Result, allResources map[string]workloadinterface.IMetadata, resourcesSource map[string]reporthandling.Source) []reporthandling.Resource {
resources := make([]reporthandling.Resource, 0)
for i := range results {
if obj, ok := allResources[results[i].ResourceID]; ok {
resource := *reporthandling.NewResourceIMetadata(obj)
if r, ok := resourcesSource[results[i].ResourceID]; ok {
resource.SetSource(&reporthandling.Source{Path: r})
resource.SetSource(&r)
}
resources = append(resources, resource)
}

View File

@@ -10,9 +10,11 @@ import (
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/v2/core/cautils"
"github.com/armosec/kubescape/v2/core/cautils/getter"
"github.com/armosec/kubescape/v2/core/cautils/logger"
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
v2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v2"
"github.com/armosec/opa-utils/reporthandling"
logger "github.com/dwertent/go-logger"
"github.com/dwertent/go-logger/helpers"
"github.com/google/uuid"
)
@@ -140,8 +142,8 @@ func (report *ReportEventReceiver) generateMessage() {
message := "You can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here:"
u := url.URL{}
u.Scheme = "https"
u.Host = getter.GetArmoAPIConnector().GetFrontendURL()
v2.ParseHost(&u)
if report.customerAdminEMail != "" {
logger.L().Debug("", helpers.String("account ID", report.customerGUID))

Some files were not shown because too many files have changed in this diff Show More