Compare commits

...

114 Commits

Author SHA1 Message Date
David Wertenteil
298f8346e9 validate downloaded framework 2022-12-15 17:13:14 +02:00
Rotem Refael
dce563d2f5 Merge pull request #973 from kooomix/dev
Excluding controlPlaneInfo from error message in case no data recieved.
2022-12-14 11:02:55 +02:00
kooomix
8d556a5b84 minor 2022-12-14 10:48:01 +02:00
kooomix
a61063e5b8 revert opa-utils version 2022-12-14 10:45:24 +02:00
kooomix
94973867db Merge branch 'kubescape:dev' into dev 2022-12-14 10:23:11 +02:00
kooomix
214c2dcae8 patch to filter out "controlPlaneInfo" from error messages in case no data 2022-12-14 10:19:24 +02:00
David Wertenteil
72b36bf012 Merge pull request #968 from fredbi/chore/package-name
chore(style): renamed versioned packages to stick to idiomatic conventions
2022-12-13 16:52:57 +02:00
Frederic BIDON
4335e6ceac chore(style): renamed versioned packages to stick to idiomatic conventions
* fixes: #967

Signed-off-by: Frederic BIDON <fredbi@yahoo.com>
2022-12-13 14:27:21 +01:00
kooomix
b5f92a7d54 go mod tidy 2022-12-13 11:32:23 +02:00
kooomix
41ec75d264 update opa-utils v0.0.209 2022-12-13 11:20:17 +02:00
kooomix
6d6ad1f487 Merge pull request #963 from kooomix/outputs_to_get_controls_only_by_ids
All prints and outputs to get data only by control ID
2022-12-13 08:32:01 +02:00
kooomix
3ac33d21ac All prints and outputs to get data by control ID 2022-12-12 15:20:48 +02:00
Moshe Rappaport
04ea0fe524 change linux runner to 20.04 instead of ubuntu-latest (#960)
Co-authored-by: Amir Malka <amirm@armosec.io>
2022-12-11 14:20:28 +02:00
David Wertenteil
30c43bff10 Merge pull request #958 from Moshe-Rappaport-CA/dev
Fix Junit format
2022-12-08 19:41:31 +02:00
Moshe-Rappaport-CA
e009244566 Fix Junit format 2022-12-08 17:56:16 +02:00
David Wertenteil
3d3cd2c2d8 Added Kubescape flow.drawio 2022-12-06 15:44:34 +02:00
David Wertenteil
f5498371ec Merge pull request #942 from kooomix/eran-dev
new host-scanner endpoint - cloudProviderInfo
2022-12-06 15:20:24 +02:00
David Wertenteil
c3b95bed8c Merge branch 'dev' into eran-dev 2022-12-06 14:17:49 +02:00
David Wertenteil
8ce7d6c0f6 Merge pull request #930 from JusteenR/issue929
Issue929
2022-12-06 14:15:35 +02:00
David Wertenteil
e875f429a9 Merge pull request #948 from YiscahLevySilas1/dev
Print host scanner version
2022-12-06 14:13:47 +02:00
David Wertenteil
b6beff0488 Merge pull request #946 from suhasgumma/dev
Fixed: CIS control link not working for html output format
2022-12-06 14:13:06 +02:00
David Wertenteil
60c69ac3f0 Merge pull request #950 from fredbi/fix-789
fix(giturlparse): fixes panic on unexpected gitlab remote URL
2022-12-06 14:12:25 +02:00
David Wertenteil
1fb9320421 Merge pull request #941 from dwertent/master
Updating examples
2022-12-06 14:11:07 +02:00
David Wertenteil
9a176f6667 remove tag latest 2022-12-06 11:42:34 +02:00
David Wertenteil
96ea9a9e42 fixed scanning example 2022-12-06 11:41:12 +02:00
David Wertenteil
e39fca0c11 do not build dev images 2022-12-06 11:05:21 +02:00
David Wertenteil
2ec035005d fixed echo command 2022-12-04 15:45:23 +02:00
Frederic BIDON
b734b3aef0 go mod tidy ancillary modules manifest
Signed-off-by: Frederic BIDON <fredbi@yahoo.com>
2022-12-04 12:39:34 +01:00
yiscah
0f5635f42d move parsing of version to GetVersion 2022-12-04 12:17:04 +02:00
Frederic BIDON
8557075b7c fix(giturlparse): fixes panic on unexpected gitlab remote URL
* replaced dependencies to github.com/armosec/go-git-url by
github.com/kubescape/go-git-url
* fixes #789

NOTE: this requires kubescape/go-git-url#2 to be merged, a new release
of that repo to be cut, in order to finalize the dependency update.

Signed-off-by: Frederic BIDON <fredbi@yahoo.com>
2022-12-02 16:09:25 +01:00
David Wertenteil
bc0f0e7087 Merge branch 'master' of github.com:dwertent/kubescape 2022-12-02 02:31:14 +02:00
David Wertenteil
8ce5f9aea3 fixed typo 2022-12-02 02:30:35 +02:00
David Wertenteil
050f9d3a4e Update cmd/scan/framework.go
Co-authored-by: craigbox <craig.box@gmail.com>
2022-12-02 02:16:43 +02:00
David Wertenteil
a81bf0deb4 deprecate set-output 2022-12-02 01:43:45 +02:00
David Wertenteil
2059324c27 testing release 2022-12-02 01:35:57 +02:00
David Wertenteil
a09a0a1bca Merge pull request #9 from dwertent/fix-dev-image
run build only if secret is set
2022-12-02 01:32:26 +02:00
David Wertenteil
83712bb9f5 run build only if secret is set 2022-12-02 01:30:24 +02:00
David Wertenteil
728ae47b9a Merge pull request #8 from dwertent/fix-dev-image
Fix dev image
2022-12-02 00:56:12 +02:00
David Wertenteil
2a9b272a14 tagging only main image as latest 2022-12-02 00:54:03 +02:00
David Wertenteil
8662deac43 update repository scanning URL 2022-12-02 00:42:28 +02:00
yiscah
e42644bbd8 update hostscanner version 2022-12-01 08:57:58 +02:00
YiscahLevySilas1
07d30b6272 Merge branch 'kubescape:dev' into dev 2022-11-30 20:52:48 +02:00
yiscah
2a4f8543cc added logs of host scanner version 2022-11-30 20:51:45 +02:00
suhasgumma
186b293cce fix link for cis controls in html output 2022-11-30 01:23:45 +05:30
David Wertenteil
2bfe72f39d Merge pull request #944 from kooomix/dev
opa-utils adjustments + dataControlInputs support
2022-11-29 19:11:49 +02:00
kooomix
f99f955223 go mod tidy 2022-11-29 15:26:30 +02:00
kooomix
ec56e69a3c minor fix 2022-11-29 14:55:30 +02:00
kooomix
3942583b1d Merge pull request #1 from kooomix/dataControlInputs
update opa-utils functions
2022-11-29 14:35:08 +02:00
kooomix
a10b15ba4b update opa-utils functions 2022-11-29 14:29:33 +02:00
David Wertenteil
5003cbd7a8 Merge pull request #943 from suhasgumma/invalidformat
Handle Invalid Formats
2022-11-28 17:39:14 +02:00
kooomix
481a137c23 Update host-scanner image version to v1.0.38 2022-11-28 16:46:32 +02:00
suhasgumma
c3f7f0938d Handle Invalid Formats 2022-11-28 19:56:27 +05:30
kooomix
b1925fa38d Support in new host-scanner endpoint - cloudProviderInfo 2022-11-28 09:18:43 +02:00
David Wertenteil
d9f8a7a46f Merge pull request #918 from suhasgumma/dev1
Store Git Repo's root path as localRootPath
2022-11-27 16:25:24 +02:00
David Wertenteil
846a072bf9 Merge pull request #917 from suhasgumma/dev
Fixed: Wrong Relative Path When scanning Local Directory
2022-11-27 16:24:19 +02:00
kooomix
5dd7bbd8a7 Merge pull request #938 from kooomix/eran-dev
Added cloudProvider to postureControlInputs
2022-11-27 09:06:56 +02:00
kooomix
e1773acf24 Getting cloud provider from gitversion of discovered API version 2022-11-25 09:27:27 +02:00
kooomix
03a0f97669 Getting cluster name from context 2022-11-24 16:09:05 +02:00
David Wertenteil
917a3f41e8 Merge pull request #925 from amirmalka/dev
Omit raw resources flag in json output
2022-11-24 14:47:14 +02:00
David Wertenteil
3c8da1b299 supporting client type from env 2022-11-24 11:09:30 +02:00
David Wertenteil
c61c7edbd0 update examples 2022-11-24 11:06:37 +02:00
kooomix
53402d9a1c Added "CloudProvider" to postureControlInputs 2022-11-23 11:57:36 +02:00
David Wertenteil
de9278b388 Merge pull request #935 from mkilchhofer/bugfix/use_correct_directory
fix: filepath.Dir requires trailing slash
2022-11-23 10:49:16 +02:00
Marco Kilchhofer
4fef6200f8 fix: filepath.Dir requires trailing slash
Signed-off-by: Marco Kilchhofer <mkilchhofer@users.noreply.github.com>
2022-11-22 21:26:37 +01:00
JusteenR
81771b7bd7 Adding frameworks column to control command 2022-11-20 15:42:13 -08:00
Moshe Rappaport
2fee77c42c Merge pull request #928 from Moshe-Rappaport-CA/PER-633-support-loading-exceptions-from-cache-kubescape
PER-633 support loading exceptions from cache
2022-11-20 14:09:30 +02:00
Moshe-Rappaport-CA
968ecdb31d PER-633 support loading exceptions from cache 2022-11-20 12:22:15 +02:00
David Wertenteil
af7b36a88b Merge pull request #927 from Moshe-Rappaport-CA/PER-550-support-loading-attack-tracks-from-cache-kubescape
Per 550 support loading attack tracks from cache kubescape
2022-11-20 11:24:38 +02:00
Moshe-Rappaport-CA
6ad58d38e2 PER-550 Support loading attack tracks from cache 2022-11-17 16:31:51 +02:00
Moshe-Rappaport-CA
681b4ce155 stash 2022-11-17 10:49:36 +02:00
Moshe Rappaport
9d21ac1b16 Merge pull request #924 from Moshe-Rappaport-CA/dev
revert change in Junit format
2022-11-16 15:36:48 +02:00
Amir Malka
2b3fcca7e8 omit raw resources flag in json output 2022-11-16 12:15:17 +02:00
David Wertenteil
af8e786ab5 Merge pull request #914 from kubescape/dev
Closing issues
2022-11-16 10:59:59 +02:00
Moshe-Rappaport-CA
c8df1b8f1f Merge remote-tracking branch 'armo/dev' into dev 2022-11-15 17:34:45 +02:00
Moshe-Rappaport-CA
4f921ddf6f Revert PR #802 to the old junit format 2022-11-15 16:59:37 +02:00
David Wertenteil
4f5839870b Merge pull request #920 from amirmalka/dev
Fixed docker build to support ARM #919
2022-11-15 14:53:20 +02:00
Amir Malka
c0d7f51d6c test build flow 2022-11-15 13:29:35 +02:00
Amir Malka
a81d770360 fixed docker build to support arm 2022-11-15 10:57:29 +02:00
suhasgumma
f64d5eab50 Fix RootDir Info 2022-11-15 12:38:57 +05:30
suhasgumma
d773397fe9 replace src with RelSrc 2022-11-15 10:34:36 +05:30
suhasgumma
2e30995bfc Relative Path When scanning Local Repos 2022-11-15 10:22:04 +05:30
David Wertenteil
17a2547f18 Merge pull request #915 from kubescape/change-test-control-name
replace control 0006 by 0048
2022-11-14 14:56:34 +02:00
David Wertenteil
87a5cd66c8 replace control 0006 by 0048 2022-11-14 14:36:37 +02:00
David Wertenteil
9436ace64f continue when resource not found 2022-11-14 13:52:46 +02:00
David Wertenteil
fde00f6bd8 Merge pull request #909 from suhasgumma/dev
pretty-print Controls format  made Pretty
2022-11-13 17:04:46 +02:00
David Wertenteil
04a72a069a Merge pull request #913 from dwertent/ignore-missing-resource
Do not exit on error
2022-11-13 16:04:15 +02:00
David Wertenteil
e2dcb5bc15 Merge pull request #912 from dwertent/dep-rbac-submit
Deprecate rbac submit
2022-11-13 16:03:51 +02:00
suhasgumma
c7040a257c Pretty Print frameworks and exceptions 2022-11-13 19:29:26 +05:30
suhasgumma
602dc00c65 Shift GetControlLink to cautils 2022-11-13 19:09:30 +05:30
David Wertenteil
0339691571 Merge pull request #911 from dwertent/adding-remidiation
Adding remediation
2022-11-13 15:12:21 +02:00
David Wertenteil
9e1f3ec131 remove from smoke test 2022-11-13 15:10:05 +02:00
David Wertenteil
b8589819dc Do not exit on error 2022-11-13 15:06:32 +02:00
David Wertenteil
a3e87f4c01 Updating json v1 deprecation message 2022-11-13 15:03:22 +02:00
David Wertenteil
21ab5a602e Deprecate rbac submit 2022-11-13 15:01:32 +02:00
David Wertenteil
5d97d7b4b2 adding Remediation to message 2022-11-13 14:55:52 +02:00
suhasgumma
d8d7d0b372 Updated and Used GetControlLink 2022-11-13 17:56:39 +05:30
suhasgumma
b8323d41fc Modified Link Convention for CIS Controls 2022-11-13 17:22:37 +05:30
suhasgumma
d0b5314201 Improve Code Quality 2022-11-13 15:39:04 +05:30
suhasgumma
547e36e73f Pretty Print Controls made Pretty 2022-11-13 14:29:30 +05:30
David Wertenteil
e593a772cb Merge pull request #908 from Moshe-Rappaport-CA/update-k8s-interface-version
Update k8s-interface version and rbac-utils
2022-11-13 09:31:00 +02:00
Moshe-Rappaport-CA
4da09529b6 Update rbac-utils tag 2022-11-10 18:56:28 +02:00
Moshe-Rappaport-CA
de375992e8 Fix go.mod in httphandler 2022-11-10 17:54:44 +02:00
Moshe-Rappaport-CA
0bc4a29881 Update k8s-interface version 2022-11-10 17:38:32 +02:00
David Wertenteil
9575c92713 Merge pull request #906 from suhasgumma/dev
Fixed: Empty Lines before printing Controls and Added Invalid Format Error
2022-11-10 11:27:22 +02:00
David Wertenteil
cf277874eb Merge pull request #907 from matthyx/ioutil
remove deprecated ioutil package
2022-11-10 11:23:10 +02:00
Matthias Bertschy
746e060402 remove deprecated ioutil package 2022-11-10 09:58:07 +01:00
suhasgumma
dd3a7c816e Invalid Format Error 2022-11-10 11:57:57 +05:30
suhasgumma
814bc3ab2c Solved: Empty Lines before printing Controls 2022-11-10 11:17:48 +05:30
David Wertenteil
dbaf6761df Merge pull request #905 from matthyx/900
900
2022-11-10 06:52:34 +02:00
Matthias Bertschy
580e45827d add IDs to controls list, deprecate id flag 2022-11-09 22:08:04 +01:00
David Wertenteil
f3b8de9d1f fixing readme (#899) 2022-11-08 12:02:52 +02:00
David Wertenteil
6e9a2f55fd Merge pull request #894 from kubescape/dev
Enhancing CLI capabilities and SARIF output
2022-11-06 15:40:00 +02:00
David Wertenteil
dd7a8fd0c1 Merge pull request #883 from kubescape/dev
Minor changes
2022-10-26 13:31:04 +03:00
David Wertenteil
3373b728b7 Merge pull request #877 from kubescape/dev
Enhance configuration usage
2022-10-24 12:00:27 +03:00
97 changed files with 963 additions and 594 deletions

View File

@@ -26,14 +26,24 @@ on:
type: boolean
description: 'support amd64/arm64'
secrets:
QUAYIO_REGISTRY_USERNAME:
required: true
QUAYIO_REGISTRY_PASSWORD:
required: true
jobs:
check-secret:
name: check if QUAYIO_REGISTRY_USERNAME & QUAYIO_REGISTRY_PASSWORD is set in github secrets
runs-on: ubuntu-latest
outputs:
is-secret-set: ${{ steps.check-secret-set.outputs.is-secret-set }}
steps:
- name: Check whether unity activation requests should be done
id: check-secret-set
env:
QUAYIO_REGISTRY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
QUAYIO_REGISTRY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
run: |
echo "is-secret-set=${{ env.QUAYIO_REGISTRY_USERNAME != '' && env.QUAYIO_REGISTRY_PASSWORD != '' }}" >> $GITHUB_OUTPUT
build-image:
needs: [check-secret]
if: needs.check-secret.outputs.is-secret-set == 'true'
name: Build image and upload to registry
runs-on: ubuntu-latest
permissions:
@@ -61,10 +71,10 @@ jobs:
- name: Build and push image
if: ${{ inputs.support_platforms }}
run: docker buildx build . --file build/Dockerfile --tag ${{ inputs.image_name }}:${{ inputs.image_tag }} --tag ${{ inputs.image_name }}:latest --build-arg image_version=${{ inputs.image_tag }} --build-arg client=${{ inputs.client }} --push --platform linux/amd64,linux/arm64
- name: Build and push image without amd64/arm64 support
if: ${{ !inputs.support_platforms }}
run: docker buildx build . --file build/Dockerfile --tag ${{ inputs.image_name }}:${{ inputs.image_tag }} --tag ${{ inputs.image_name }}:latest --build-arg image_version=${{ inputs.image_tag }} --build-arg client=${{ inputs.client }} --push
run: docker buildx build . --file build/Dockerfile --tag ${{ inputs.image_name }}:${{ inputs.image_tag }} --tag ${{ inputs.image_name }}:latest --build-arg image_version=${{ inputs.image_tag }} --build-arg client=${{ inputs.client }} --push
- name: Install cosign
uses: sigstore/cosign-installer@main
@@ -75,6 +85,5 @@ jobs:
env:
COSIGN_EXPERIMENTAL: "true"
run: |
cosign sign --force ${{ inputs.image_name }}:latest
cosign sign --force ${{ inputs.image_name }}:${{ inputs.image_tag }}
cosign sign --force ${{ inputs.image_name }}

View File

@@ -4,7 +4,6 @@ on:
push:
branches: [ master ]
paths-ignore:
# Do not run the pipeline if only Markdown files changed
- '**.md'
jobs:
test:
@@ -29,7 +28,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
os: [ubuntu-20.04, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v3
with:
@@ -56,8 +55,8 @@ jobs:
CGO_ENABLED: 1
run: python3 --version && python3 build.py
- name: Upload release binaries
id: upload-release-asset
- name: Upload release binaries (Windows / MacOS)
id: upload-release-asset-win-macos
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -66,9 +65,22 @@ jobs:
asset_path: build/${{ matrix.os }}/kubescape
asset_name: kubescape-${{ matrix.os }}
asset_content_type: application/octet-stream
if: matrix.os != 'ubuntu-20.04'
- name: Upload release hash
id: upload-release-hash
- name: Upload release binaries (Linux)
id: upload-release-asset-linux
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs.create-release.outputs.upload_url }}
asset_path: build/ubuntu-latest/kubescape
asset_name: kubescape-ubuntu-latest
asset_content_type: application/octet-stream
if: matrix.os == 'ubuntu-20.04'
- name: Upload release hash (Windows / MacOS)
id: upload-release-hash-win-macos
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -77,15 +89,27 @@ jobs:
asset_path: build/${{ matrix.os }}/kubescape.sha256
asset_name: kubescape-${{ matrix.os }}-sha256
asset_content_type: application/octet-stream
if: matrix.os != 'ubuntu-20.04'
- name: Upload release hash (Linux)
id: upload-release-hash-linux
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs.create-release.outputs.upload_url }}
asset_path: build/ubuntu-latest/kubescape.sha256
asset_name: kubescape-ubuntu-latest-sha256
asset_content_type: application/octet-stream
if: matrix.os == 'ubuntu-20.04'
publish-image:
if: ${{ github.repository == 'kubescape/kubescape' }} # TODO
uses: ./.github/workflows/build-image.yaml
needs: create-release
with:
client: "image-release"
image_name: "quay.io/${{ github.repository_owner }}/kubescape"
image_tag: "v2.0.${{ github.run_number }}"
support_platforms: false
support_platforms: true
cosign: true
secrets: inherit

View File

@@ -13,14 +13,13 @@ jobs:
release: "v2.0.${{ github.run_number }}"
client: test
publish-dev-image:
if: ${{ github.repository == 'kubescape/kubescape' }} # TODO
uses: ./.github/workflows/build-image.yaml
needs: test
with:
client: "image-dev"
image_name: "quay.io/${{ github.repository_owner }}/kubescape"
image_tag: "dev-v2.0.${{ github.run_number }}"
support_platforms: false
cosign: true
secrets: inherit
# publish-dev-image:
# uses: ./.github/workflows/build-image.yaml
# needs: test
# with:
# client: "image-dev"
# image_name: "quay.io/${{ github.repository_owner }}/kubescape"
# image_tag: "dev-v2.0.${{ github.run_number }}"
# support_platforms: true
# cosign: true
# secrets: inherit

View File

@@ -19,14 +19,14 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
os: [ubuntu-20.04, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Cache Go modules (Linux)
if: matrix.os == 'ubuntu-latest'
if: matrix.os == 'ubuntu-20.04'
uses: actions/cache@v3
with:
path: |
@@ -85,9 +85,16 @@ jobs:
CGO_ENABLED: 1
run: python3 --version && python3 build.py
- name: Smoke Testing
- name: Smoke Testing (Windows / MacOS)
env:
RELEASE: ${{ inputs.release }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
if: matrix.os != 'ubuntu-20.04'
- name: Smoke Testing (Linux)
env:
RELEASE: ${{ inputs.release }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: python3 smoke_testing/init.py ${PWD}/build/ubuntu-latest/kubescape
if: matrix.os == 'ubuntu-20.04'

View File

@@ -52,6 +52,9 @@ kubescape scan --enable-host-scan --verbose
</br>
## Architecture in short
[Component architecture](docs/architecture.drawio.svg)
### [CLI](#kubescape-cli)
<div align="center">
<img src="docs/ks-cli-arch.png" width="300" alt="cli-diagram">
@@ -220,6 +223,8 @@ kubescape scan *.yaml
```
#### Scan Kubernetes manifest files from a git repository
```
kubescape scan https://github.com/kubescape/kubescape
```

View File

@@ -57,7 +57,7 @@ def main():
if client_name:
ldflags += " -X {}={}".format(client_var, client_name)
build_command = ["go", "build", "-tags=static", "-o", ks_file, "-ldflags" ,ldflags]
build_command = ["go", "build", "-buildmode=pie", "-tags=static", "-o", ks_file, "-ldflags" ,ldflags]
print("Building kubescape and saving here: {}".format(ks_file))
print("Build command: {}".format(" ".join(build_command)))

View File

@@ -12,7 +12,7 @@ ENV CGO_ENABLED=1
# Install required python/pip
ENV PYTHONUNBUFFERED=1
RUN apk add --update --no-cache python3 git openssl-dev musl-dev gcc make cmake pkgconfig && ln -sf python3 /usr/bin/python
RUN apk add --update --no-cache python3 gcc make git libc-dev binutils-gold cmake pkgconfig && ln -sf python3 /usr/bin/python
RUN python3 -m ensurepip
RUN pip3 install --no-cache --upgrade pip setuptools

View File

@@ -24,8 +24,8 @@ var (
# Download the NSA framework. Run 'kubescape list frameworks' for all frameworks names
kubescape download framework nsa
# Download the "Allowed hostPath" control. Run 'kubescape list controls' for all controls names
kubescape download control "Allowed hostPath"
# Download the "HostPath mount" control. Run 'kubescape list controls' for all controls names
kubescape download control "HostPath mount"
# Download the "C-0001" control. Run 'kubescape list controls --id' for all controls ids
kubescape download control C-0001
@@ -36,6 +36,8 @@ var (
# Download the configured controls-inputs
kubescape download controls-inputs
# Download the attack tracks
kubescape download attack-tracks
`
)

View File

@@ -20,11 +20,8 @@ var (
# List all supported frameworks names
kubescape list frameworks --account <account id>
# List all supported controls names
# List all supported controls names with ids
kubescape list controls
# List all supported controls ids
kubescape list controls --id
Control documentation:
https://hub.armosec.io/docs/controls
@@ -67,8 +64,8 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-printer'/'json'")
listCmd.PersistentFlags().BoolVarP(&listPolicies.ListIDs, "id", "", false, "List control ID's instead of controls names")
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-print'/'json'")
listCmd.PersistentFlags().MarkDeprecated("id", "Control ID's are included in list outpus")
return listCmd
}

View File

@@ -27,7 +27,7 @@ var rootInfo cautils.RootInfo
var ksExamples = `
# Scan command
kubescape scan --submit
kubescape scan
# List supported frameworks
kubescape list frameworks

View File

@@ -23,7 +23,7 @@ var (
kubescape scan control "privileged container"
# Scan list of controls separated with a comma
kubescape scan control "privileged container","allowed hostpath"
kubescape scan control "privileged container","HostPath mount"
# Scan list of controls using the control ID separated with a comma
kubescape scan control C-0058,C-0057
@@ -61,7 +61,7 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
if err := validateFrameworkScanInfo(scanInfo); err != nil {
return err
}
// flagValidationControl(scanInfo)
scanInfo.PolicyIdentifier = []cautils.PolicyIdentifier{}
@@ -109,7 +109,7 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
}
enforceSeverityThresholds(&results.GetResults().SummaryDetails.SeverityCounters, scanInfo, terminateOnExceedingSeverity)
enforceSeverityThresholds(results.GetResults().SummaryDetails.GetResourcesSeverityCounters(), scanInfo, terminateOnExceedingSeverity)
return nil
},
@@ -120,6 +120,10 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
func validateControlScanInfo(scanInfo *cautils.ScanInfo) error {
severity := scanInfo.FailThresholdSeverity
if scanInfo.Submit && scanInfo.OmitRawResources {
return fmt.Errorf("you can use `omit-raw-resources` or `submit`, but not both")
}
if err := validateSeverity(severity); severity != "" && err != nil {
return err
}

View File

@@ -22,8 +22,8 @@ import (
var (
frameworkExample = `
# Scan all frameworks and submit the results
kubescape scan framework all --submit
# Scan all frameworks
kubescape scan framework all
# Scan the NSA framework
kubescape scan framework nsa
@@ -35,7 +35,7 @@ var (
kubescape scan framework all
# Scan kubernetes YAML manifest files (single file or glob)
kubescape scan framework nsa *.yaml
kubescape scan framework nsa .
Run 'kubescape list frameworks' for the list of supported frameworks
`
@@ -119,7 +119,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
}
enforceSeverityThresholds(&results.GetData().Report.SummaryDetails.SeverityCounters, scanInfo, terminateOnExceedingSeverity)
enforceSeverityThresholds(results.GetData().Report.SummaryDetails.GetResourcesSeverityCounters(), scanInfo, terminateOnExceedingSeverity)
return nil
},
}
@@ -136,10 +136,10 @@ func countersExceedSeverityThreshold(severityCounters reportsummary.ISeverityCou
SeverityName string
GetFailedResources func() int
}{
{reporthandlingapis.SeverityLowString, severityCounters.NumberOfResourcesWithLowSeverity},
{reporthandlingapis.SeverityMediumString, severityCounters.NumberOfResourcesWithMediumSeverity},
{reporthandlingapis.SeverityHighString, severityCounters.NumberOfResourcesWithHighSeverity},
{reporthandlingapis.SeverityCriticalString, severityCounters.NumberOfResourcesWithCriticalSeverity},
{reporthandlingapis.SeverityLowString, severityCounters.NumberOfLowSeverity},
{reporthandlingapis.SeverityMediumString, severityCounters.NumberOfMediumSeverity},
{reporthandlingapis.SeverityHighString, severityCounters.NumberOfHighSeverity},
{reporthandlingapis.SeverityCriticalString, severityCounters.NumberOfCriticalSeverity},
}
targetSeverityIdx := 0
@@ -201,7 +201,9 @@ func validateFrameworkScanInfo(scanInfo *cautils.ScanInfo) error {
if 100 < scanInfo.FailThreshold || 0 > scanInfo.FailThreshold {
return fmt.Errorf("bad argument: out of range threshold")
}
if scanInfo.Submit && scanInfo.OmitRawResources {
return fmt.Errorf("you can use `omit-raw-resources` or `submit`, but not both")
}
severity := scanInfo.FailThresholdSeverity
if err := validateSeverity(severity); severity != "" && err != nil {
return err

View File

@@ -17,10 +17,10 @@ var scanCmdExamples = `
kubescape scan --enable-host-scan --verbose
# Scan kubernetes YAML manifest files
kubescape scan *.yaml
kubescape scan .
# Scan and save the results in the JSON format
kubescape scan --format json --output results.json
kubescape scan --format json --output results.json --format-version=v2
# Display all resources
kubescape scan --verbose
@@ -88,11 +88,13 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
scanCmd.PersistentFlags().StringVar(&scanInfo.FormatVersion, "format-version", "v1", "Output object can be different between versions, this is for maintaining backward and forward compatibility. Supported:'v1'/'v2'")
scanCmd.PersistentFlags().StringVar(&scanInfo.CustomClusterName, "cluster-name", "", "Set the custom name of the cluster. Not same as the kube-context flag")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Submit the scan results to Kubescape SaaS where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.OmitRawResources, "omit-raw-resources", "", false, "Omit raw resources from the output. By default the raw resources are included in the output")
scanCmd.PersistentFlags().MarkDeprecated("silent", "use '--logger' flag instead. Flag will be removed at 1.May.2022")
// hidden flags
scanCmd.PersistentFlags().MarkHidden("host-scan-yaml") // this flag should be used very cautiously. We prefer users will not use it at all unless the DaemonSet can not run pods on the nodes
scanCmd.PersistentFlags().MarkHidden("omit-raw-resources")
// Retrieve --kubeconfig flag from https://github.com/kubernetes/kubectl/blob/master/pkg/cmd/cmd.go
scanCmd.PersistentFlags().AddGoFlag(flag.Lookup("kubeconfig"))

View File

@@ -24,91 +24,91 @@ func TestExceedsSeverity(t *testing.T) {
{
Description: "Critical failed resource should exceed Critical threshold",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "critical"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithCriticalSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{CriticalSeverityCounter: 1},
Want: true,
},
{
Description: "Critical failed resource should exceed Critical threshold set as constant",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: apis.SeverityCriticalString},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithCriticalSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{CriticalSeverityCounter: 1},
Want: true,
},
{
Description: "High failed resource should not exceed Critical threshold",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "critical"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithHighSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{HighSeverityCounter: 1},
Want: false,
},
{
Description: "Critical failed resource exceeds High threshold",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "high"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithCriticalSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{CriticalSeverityCounter: 1},
Want: true,
},
{
Description: "High failed resource exceeds High threshold",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "high"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithHighSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{HighSeverityCounter: 1},
Want: true,
},
{
Description: "Medium failed resource does not exceed High threshold",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "high"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithMediumSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{MediumSeverityCounter: 1},
Want: false,
},
{
Description: "Critical failed resource exceeds Medium threshold",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "medium"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithCriticalSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{CriticalSeverityCounter: 1},
Want: true,
},
{
Description: "High failed resource exceeds Medium threshold",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "medium"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithHighSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{HighSeverityCounter: 1},
Want: true,
},
{
Description: "Medium failed resource exceeds Medium threshold",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "medium"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithMediumSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{MediumSeverityCounter: 1},
Want: true,
},
{
Description: "Low failed resource does not exceed Medium threshold",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "medium"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithLowSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{LowSeverityCounter: 1},
Want: false,
},
{
Description: "Critical failed resource exceeds Low threshold",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "low"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithCriticalSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{CriticalSeverityCounter: 1},
Want: true,
},
{
Description: "High failed resource exceeds Low threshold",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "low"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithHighSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{HighSeverityCounter: 1},
Want: true,
},
{
Description: "Medium failed resource exceeds Low threshold",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "low"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithMediumSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{MediumSeverityCounter: 1},
Want: true,
},
{
Description: "Low failed resource exceeds Low threshold",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "low"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithLowSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{LowSeverityCounter: 1},
Want: true,
},
{
Description: "Unknown severity returns an error",
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "unknown"},
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithLowSeverityCounter: 1},
SeverityCounters: &reportsummary.SeverityCounters{LowSeverityCounter: 1},
Want: false,
Error: ErrUnknownSeverity,
},
@@ -139,7 +139,7 @@ func Test_enforceSeverityThresholds(t *testing.T) {
}{
{
"Exceeding Critical severity counter should call the terminating function",
&reportsummary.SeverityCounters{ResourcesWithCriticalSeverityCounter: 1},
&reportsummary.SeverityCounters{CriticalSeverityCounter: 1},
&cautils.ScanInfo{FailThresholdSeverity: apis.SeverityCriticalString},
true,
},

View File

@@ -31,10 +31,11 @@ var (
// getRBACCmd represents the RBAC command
func getRBACCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
return &cobra.Command{
Use: "rbac",
Example: rbacExamples,
Short: "Submit cluster's Role-Based Access Control(RBAC)",
Long: ``,
Use: "rbac",
Deprecated: "This command is deprecated and will not be supported after 1/Jan/2023. Please use the 'scan' command instead.",
Example: rbacExamples,
Short: "Submit cluster's Role-Based Access Control(RBAC)",
Long: ``,
RunE: func(cmd *cobra.Command, args []string) error {
if err := flagValidationSubmit(submitInfo); err != nil {

View File

@@ -7,16 +7,21 @@ import (
)
var submitCmdExamples = `
# Submit Kubescape scan results file
kubescape submit results
# Submit exceptions file to Kubescape SaaS
kubescape submit exceptions
`
func GetSubmitCmd(ks meta.IKubescape) *cobra.Command {
var submitInfo metav1.Submit
submitCmd := &cobra.Command{
Use: "submit <command>",
Short: "Submit an object to the Kubescape SaaS version",
Long: ``,
Use: "submit <command>",
Short: "Submit an object to the Kubescape SaaS version",
Long: ``,
Example: submitCmdExamples,
Run: func(cmd *cobra.Command, args []string) {
},
}

View File

@@ -0,0 +1,12 @@
package cautils
import (
"fmt"
"strings"
)
func GetControlLink(controlID string) string {
// For CIS Controls, cis-1.1.3 will be transformed to cis-1-1-3 in documentation link.
docLinkID := strings.ReplaceAll(controlID, ".", "-")
return fmt.Sprintf("https://hub.armosec.io/docs/%s", strings.ToLower(docLinkID))
}

View File

@@ -18,18 +18,19 @@ type OPASessionObj struct {
K8SResources *K8SResources // input k8s objects
ArmoResource *KSResources // input ARMO objects
AllPolicies *Policies // list of all frameworks
Policies []reporthandling.Framework // list of frameworks to scan
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<resource ID>]<resource>
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<resource ID>]<resource result>
ResourceSource map[string]reporthandling.Source // resources sources, map[<resource ID>]<resource result>
ResourcesPrioritized map[string]prioritization.PrioritizedResource // resources prioritization information, map[<resource ID>]<prioritized resource>
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
RegoInputData RegoInputData // input passed to rego for scanning. map[<control name>][<input arguments>]
Metadata *reporthandlingv2.Metadata
InfoMap map[string]apis.StatusInfo // Map errors of resources to StatusInfo
ResourceToControlsMap map[string][]string // map[<apigroup/apiversion/resource>] = [<control_IDs>]
SessionID string // SessionID
InfoMap map[string]apis.StatusInfo // Map errors of resources to StatusInfo
ResourceToControlsMap map[string][]string // map[<apigroup/apiversion/resource>] = [<control_IDs>]
SessionID string // SessionID
Policies []reporthandling.Framework // list of frameworks to scan
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
OmitRawResources bool // omit raw resources from output
}
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources, scanInfo *ScanInfo) *OPASessionObj {
@@ -45,6 +46,7 @@ func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SRe
ResourceSource: make(map[string]reporthandling.Source),
SessionID: scanInfo.ScanID,
Metadata: scanInfoToScanMetadata(scanInfo),
OmitRawResources: scanInfo.OmitRawResources,
}
}
@@ -94,6 +96,7 @@ type Exception struct {
type RegoInputData struct {
PostureControlInputs map[string][]string `json:"postureControlInputs"`
DataControlInputs map[string]string `json:"dataControlInputs"`
// ClusterName string `json:"clusterName"`
// K8sConfig RegoK8sConfig `json:"k8sconfig"`
}

View File

@@ -1,6 +1,7 @@
package getter
import (
"fmt"
"strings"
"github.com/armosec/armoapi-go/armotypes"
@@ -55,13 +56,29 @@ func (drp *DownloadReleasedPolicy) ListFrameworks() ([]string, error) {
return drp.gs.GetOPAFrameworksNamesList()
}
func (drp *DownloadReleasedPolicy) ListControls(listType ListType) ([]string, error) {
switch listType {
case ListID:
return drp.gs.GetOPAControlsIDsList()
default:
return drp.gs.GetOPAControlsNamesList()
func (drp *DownloadReleasedPolicy) ListControls() ([]string, error) {
controlsIDsList, err := drp.gs.GetOPAControlsIDsList()
if err != nil {
return []string{}, err
}
controlsNamesList, err := drp.gs.GetOPAControlsNamesList()
if err != nil {
return []string{}, err
}
controls, err := drp.gs.GetOPAControls()
if err != nil {
return []string{}, err
}
var controlsFrameworksList [][]string
for _, control := range controls {
controlsFrameworksList = append(controlsFrameworksList, control.FrameworkNames)
}
controlsNamesWithIDsandFrameworksList := make([]string, len(controlsIDsList))
// by design all slices have the same lengt
for i := range controlsIDsList {
controlsNamesWithIDsandFrameworksList[i] = fmt.Sprintf("%v|%v|%v", controlsIDsList[i], controlsNamesList[i], strings.Join(controlsFrameworksList[i], ","))
}
return controlsNamesWithIDsandFrameworksList, nil
}
func (drp *DownloadReleasedPolicy) GetControlsInputs(clusterName string) (map[string][]string, error) {

View File

@@ -6,19 +6,13 @@ import (
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
)
// supported listing
type ListType string
const ListID ListType = "id"
const ListName ListType = "name"
type IPolicyGetter interface {
GetFramework(name string) (*reporthandling.Framework, error)
GetFrameworks() ([]reporthandling.Framework, error)
GetControl(name string) (*reporthandling.Control, error)
ListFrameworks() ([]string, error)
ListControls(ListType) ([]string, error)
ListControls() ([]string, error)
}
type IExceptionsGetter interface {

View File

@@ -4,7 +4,7 @@ import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"strings"
"time"
@@ -306,7 +306,7 @@ func (api *KSCloudAPI) ListFrameworks() ([]string, error) {
return frameworkList, nil
}
func (api *KSCloudAPI) ListControls(l ListType) ([]string, error) {
func (api *KSCloudAPI) ListControls() ([]string, error) {
return nil, fmt.Errorf("control api is not public")
}
@@ -358,7 +358,7 @@ func (api *KSCloudAPI) Login() error {
return fmt.Errorf("error authenticating: %d", resp.StatusCode)
}
responseBody, err := ioutil.ReadAll(resp.Body)
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return err
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/armosec/armoapi-go/armotypes"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
)
// =======================================================================================================================
@@ -109,7 +110,7 @@ func (lp *LoadPolicy) ListFrameworks() ([]string, error) {
return fwNames, nil
}
func (lp *LoadPolicy) ListControls(listType ListType) ([]string, error) {
func (lp *LoadPolicy) ListControls() ([]string, error) {
// TODO - Support
return []string{}, fmt.Errorf("loading controls list from file is not supported")
}
@@ -152,3 +153,18 @@ func (lp *LoadPolicy) filePath() string {
}
return ""
}
func (lp *LoadPolicy) GetAttackTracks() ([]v1alpha1.AttackTrack, error) {
attackTracks := []v1alpha1.AttackTrack{}
f, err := os.ReadFile(lp.filePath())
if err != nil {
return nil, err
}
if err := json.Unmarshal(f, &attackTracks); err != nil {
return nil, err
}
return attackTracks, nil
}

View File

@@ -0,0 +1,16 @@
package cautils
import (
"testing"
giturl "github.com/kubescape/go-git-url"
"github.com/stretchr/testify/require"
)
func TestEnsureRemoteParsed(t *testing.T) {
const remote = "git@gitlab.com:foobar/gitlab-tests/sample-project.git"
require.NotPanics(t, func() {
_, _ = giturl.NewGitURL(remote)
})
}

View File

@@ -3,7 +3,6 @@ package cautils
import (
_ "embed"
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -39,7 +38,7 @@ func (s *HelmChartTestSuite) SetupSuite() {
}
var obj interface{}
file, _ := ioutil.ReadFile(filepath.Join("testdata", "helm_expected_default_values.json"))
file, _ := os.ReadFile(filepath.Join("testdata", "helm_expected_default_values.json"))
_ = json.Unmarshal([]byte(file), &obj)
s.expectedDefaultValues = obj.(map[string]interface{})
}

View File

@@ -6,10 +6,10 @@ import (
"strings"
"time"
"github.com/armosec/go-git-url/apis"
gitv5 "github.com/go-git/go-git/v5"
configv5 "github.com/go-git/go-git/v5/config"
plumbingv5 "github.com/go-git/go-git/v5/plumbing"
"github.com/kubescape/go-git-url/apis"
git2go "github.com/libgit2/git2go/v33"
)

View File

@@ -3,7 +3,6 @@ package cautils
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -11,7 +10,7 @@ import (
"github.com/armosec/armoapi-go/armotypes"
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
giturl "github.com/armosec/go-git-url"
giturl "github.com/kubescape/go-git-url"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"
@@ -40,7 +39,8 @@ const (
// ScanCluster string = "cluster"
// ScanLocalFiles string = "yaml"
localControlInputsFilename string = "controls-inputs.json"
localExceptionsFilename string = "exceptions.json"
LocalExceptionsFilename string = "exceptions.json"
LocalAttackTracksFilename string = "attack-tracks.json"
)
type BoolPtrFlag struct {
@@ -128,6 +128,7 @@ type ScanInfo struct {
KubeContext string // context name
FrameworkScan bool // false if scanning control
ScanAll bool // true if scan all frameworks
OmitRawResources bool // true if omit raw resources from the output
}
type Getters struct {
@@ -159,7 +160,7 @@ func (scanInfo *ScanInfo) setUseArtifactsFrom() {
scanInfo.UseArtifactsFrom = dir
}
// set frameworks files
files, err := ioutil.ReadDir(scanInfo.UseArtifactsFrom)
files, err := os.ReadDir(scanInfo.UseArtifactsFrom)
if err != nil {
logger.L().Fatal("failed to read files from directory", helpers.String("dir", scanInfo.UseArtifactsFrom), helpers.Error(err))
}
@@ -176,7 +177,7 @@ func (scanInfo *ScanInfo) setUseArtifactsFrom() {
// set config-inputs file
scanInfo.ControlsInputs = filepath.Join(scanInfo.UseArtifactsFrom, localControlInputsFilename)
// set exceptions
scanInfo.UseExceptions = filepath.Join(scanInfo.UseArtifactsFrom, localExceptionsFilename)
scanInfo.UseExceptions = filepath.Join(scanInfo.UseArtifactsFrom, LocalExceptionsFilename)
}
func (scanInfo *ScanInfo) setUseFrom() {
@@ -419,7 +420,7 @@ func metadataGitLocal(input string) (*reporthandlingv2.RepoContextMetadata, erro
Date: commit.Committer.Date,
CommitterName: commit.Committer.Name,
}
context.LocalRootPath = getAbsPath(input)
context.LocalRootPath, _ = gitParser.GetRootDir()
return context, nil
}

View File

@@ -14,8 +14,9 @@ import (
"golang.org/x/mod/semver"
)
const SKIP_VERSION_CHECK_DEPRECATED = "KUBESCAPE_SKIP_UPDATE_CHECK"
const SKIP_VERSION_CHECK = "KS_SKIP_UPDATE_CHECK"
const SKIP_VERSION_CHECK_DEPRECATED_ENV = "KUBESCAPE_SKIP_UPDATE_CHECK"
const SKIP_VERSION_CHECK_ENV = "KS_SKIP_UPDATE_CHECK"
const CLIENT_ENV = "KS_CLIENT"
var BuildNumber string
var Client string
@@ -31,9 +32,14 @@ func NewIVersionCheckHandler() IVersionCheckHandler {
if BuildNumber == "" {
logger.L().Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
}
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && boolutils.StringToBool(v) {
if v, ok := os.LookupEnv(CLIENT_ENV); ok && v != "" {
Client = v
}
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_ENV); ok && boolutils.StringToBool(v) {
return NewVersionCheckHandlerMock()
} else if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_DEPRECATED); ok && boolutils.StringToBool(v) {
} else if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_DEPRECATED_ENV); ok && boolutils.StringToBool(v) {
return NewVersionCheckHandlerMock()
}
return NewVersionCheckHandler()

View File

@@ -19,6 +19,7 @@ var (
"KubeletInfo",
"KubeProxyInfo",
"ControlPlaneInfo",
"CloudProviderInfo",
}
CloudResources = []string{
"ClusterDescribe",

View File

@@ -19,6 +19,7 @@ var downloadFunc = map[string]func(*metav1.DownloadInfo) error{
"control": downloadControl,
"framework": downloadFramework,
"artifacts": downloadArtifacts,
"attack-tracks": downloadAttackTracks,
}
func DownloadSupportCommands() []string {
@@ -70,6 +71,7 @@ func downloadArtifacts(downloadInfo *metav1.DownloadInfo) error {
"controls-inputs": downloadConfigInputs,
"exceptions": downloadExceptions,
"framework": downloadFramework,
"attack-tracks": downloadAttackTracks,
}
for artifact := range artifacts {
if err := downloadArtifact(&metav1.DownloadInfo{Target: artifact, Path: downloadInfo.Path, FileName: fmt.Sprintf("%s.json", artifact)}, artifacts); err != nil {
@@ -108,12 +110,12 @@ func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
exceptionsGetter := getExceptionsGetter("", tenant.GetAccountID(), nil)
exceptions := []armotypes.PostureExceptionPolicy{}
if tenant.GetAccountID() != "" {
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetContextName())
if err != nil {
return err
}
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetContextName())
if err != nil {
return err
}
if downloadInfo.FileName == "" {
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
}
@@ -126,6 +128,30 @@ func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
return nil
}
func downloadAttackTracks(downloadInfo *metav1.DownloadInfo) error {
var err error
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
attackTracksGetter := getAttackTracksGetter(tenant.GetAccountID(), nil)
attackTracks, err := attackTracksGetter.GetAttackTracks()
if err != nil {
return err
}
if downloadInfo.FileName == "" {
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
}
// save in file
err = getter.SaveInFile(attackTracks, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
if err != nil {
return err
}
logger.L().Success("Downloaded", helpers.String("attack tracks", downloadInfo.Target), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
return nil
}
func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())

View File

@@ -45,8 +45,9 @@ func getExceptionsGetter(useExceptions string, accountID string, downloadRelease
if downloadReleasedPolicy == nil {
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
}
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil {
logger.L().Warning("failed to get exceptions from github release, this may affect the scanning results", helpers.Error(err))
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull attack tracks, fallback to cache
logger.L().Warning("failed to get exceptions from github release, loading attack tracks from cache", helpers.Error(err))
return getter.NewLoadPolicy([]string{getter.GetDefaultPath(cautils.LocalExceptionsFilename)})
}
return downloadReleasedPolicy
@@ -247,8 +248,9 @@ func getAttackTracksGetter(accountID string, downloadReleasedPolicy *getter.Down
if downloadReleasedPolicy == nil {
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
}
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil {
logger.L().Warning("failed to get attack tracks from github release, this may affect the scanning results", helpers.Error(err))
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull attack tracks, fallback to cache
logger.L().Warning("failed to get attack tracks from github release, loading attack tracks from cache", helpers.Error(err))
return getter.NewLoadPolicy([]string{getter.GetDefaultPath(cautils.LocalAttackTracksFilename)})
}
return downloadReleasedPolicy
}

View File

@@ -6,8 +6,11 @@ import (
"sort"
"strings"
"github.com/kubescape/kubescape/v2/core/cautils/getter"
"github.com/kubescape/kubescape/v2/core/cautils"
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
v2 "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2"
"github.com/olekukonko/tablewriter"
)
var listFunc = map[string]func(*metav1.ListPolicies) ([]string, error){
@@ -16,7 +19,7 @@ var listFunc = map[string]func(*metav1.ListPolicies) ([]string, error){
"exceptions": listExceptions,
}
var listFormatFunc = map[string]func(*metav1.ListPolicies, []string){
var listFormatFunc = map[string]func(string, []string){
"pretty-print": prettyPrintListFormat,
"json": jsonListFormat,
}
@@ -29,14 +32,18 @@ func ListSupportActions() []string {
return commands
}
func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
if f, ok := listFunc[listPolicies.Target]; ok {
policies, err := f(listPolicies)
if policyListerFunc, ok := listFunc[listPolicies.Target]; ok {
policies, err := policyListerFunc(listPolicies)
if err != nil {
return err
}
sort.Strings(policies)
listFormatFunc[listPolicies.Format](listPolicies, policies)
if listFormatFunction, ok := listFormatFunc[listPolicies.Format]; ok {
listFormatFunction(listPolicies.Target, policies)
} else {
return fmt.Errorf("Invalid format \"%s\", Supported formats: 'pretty-print'/'json' ", listPolicies.Format)
}
return nil
}
@@ -45,20 +52,16 @@ func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
func listFrameworks(listPolicies *metav1.ListPolicies) ([]string, error) {
tenant := getTenantConfig(&listPolicies.Credentials, "", "", getKubernetesApi()) // change k8sinterface
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
policyGetter := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
return listFrameworksNames(g), nil
return listFrameworksNames(policyGetter), nil
}
func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
tenant := getTenantConfig(&listPolicies.Credentials, "", "", getKubernetesApi()) // change k8sinterface
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
l := getter.ListName
if listPolicies.ListIDs {
l = getter.ListID
}
return g.ListControls(l)
policyGetter := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
return policyGetter.ListControls()
}
func listExceptions(listPolicies *metav1.ListPolicies) ([]string, error) {
@@ -77,12 +80,73 @@ func listExceptions(listPolicies *metav1.ListPolicies) ([]string, error) {
return exceptionsNames, nil
}
func prettyPrintListFormat(listPolicies *metav1.ListPolicies, policies []string) {
sep := "\n * "
fmt.Printf("Supported %s:%s%s\n", listPolicies.Target, sep, strings.Join(policies, sep))
func prettyPrintListFormat(targetPolicy string, policies []string) {
if targetPolicy == "controls" {
prettyPrintControls(policies)
return
}
header := fmt.Sprintf("Supported %s", targetPolicy)
policyTable := tablewriter.NewWriter(printer.GetWriter(""))
policyTable.SetAutoWrapText(true)
policyTable.SetHeader([]string{header})
policyTable.SetHeaderLine(true)
policyTable.SetRowLine(true)
data := v2.Matrix{}
controlRows := generatePolicyRows(policies)
data = append(data, controlRows...)
policyTable.SetAlignment(tablewriter.ALIGN_CENTER)
policyTable.AppendBulk(data)
policyTable.Render()
}
func jsonListFormat(listPolicies *metav1.ListPolicies, policies []string) {
func jsonListFormat(targetPolicy string, policies []string) {
j, _ := json.MarshalIndent(policies, "", " ")
fmt.Printf("%s\n", j)
}
func prettyPrintControls(policies []string) {
controlsTable := tablewriter.NewWriter(printer.GetWriter(""))
controlsTable.SetAutoWrapText(true)
controlsTable.SetHeader([]string{"Control ID", "Control Name", "Docs", "Frameworks"})
controlsTable.SetHeaderLine(true)
controlsTable.SetRowLine(true)
data := v2.Matrix{}
controlRows := generateControlRows(policies)
data = append(data, controlRows...)
controlsTable.AppendBulk(data)
controlsTable.Render()
}
func generateControlRows(policies []string) [][]string {
rows := [][]string{}
for _, control := range policies {
idAndControlAndFrameworks := strings.Split(control, "|")
id, control, framework := idAndControlAndFrameworks[0], idAndControlAndFrameworks[1], idAndControlAndFrameworks[2]
docs := cautils.GetControlLink(id)
currentRow := []string{id, control, docs, framework}
rows = append(rows, currentRow)
}
return rows
}
func generatePolicyRows(policies []string) [][]string {
rows := [][]string{}
for _, policy := range policies {
currentRow := []string{policy}
rows = append(rows, currentRow)
}
return rows
}

View File

@@ -54,6 +54,10 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
if err := tenantConfig.SetTenant(); err != nil {
logger.L().Error(err.Error())
}
if scanInfo.OmitRawResources {
logger.L().Warning("omit-raw-resources flag will be ignored in submit mode")
}
}
// ================== version testing ======================================

View File

@@ -4,7 +4,6 @@ import "github.com/kubescape/kubescape/v2/core/cautils"
type ListPolicies struct {
Target string
ListIDs bool
Format string
Credentials cautils.Credentials
}

View File

@@ -8,7 +8,7 @@ import (
"github.com/kubescape/opa-utils/reporthandling"
)
var mockControl_0006 = `{"guid":"","name":"Allowed hostPath","attributes":{"armoBuiltin":true},"id":"C-0006","controlID":"C-0006","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6}`
var mockControl_0006 = `{"guid":"","name":"HostPath mount","attributes":{"armoBuiltin":true},"id":"C-0048","controlID":"C-0048","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6}`
var mockControl_0044 = `{"guid":"","name":"Container hostPort","attributes":{"armoBuiltin":true},"id":"C-0044","controlID":"C-0044","creationTime":"","description":"Configuring hostPort limits you to a particular port, and if any two workloads that specify the same HostPort they cannot be deployed to the same node. Therefore, if the number of replica of such workload is higher than the number of nodes, the deployment will fail.","remediation":"Avoid usage of hostPort unless it is absolutely necessary. Use NodePort / ClusterIP instead.","rules":[{"guid":"","name":"container-hostPort","attributes":{"armoBuiltin":true},"creationTime":"","rule":"package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbegginingOfPath := \"spec.\"\n\tpath := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nisHostPort(container, i, begginingOfPath) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [begginingOfPath, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod","CronJob"]}],"ruleDependencies":[],"configInputs":null,"controlConfigInputs":null,"description":"fails if container has hostPort","remediation":"Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP","ruleQuery":"armo_builtins"}],"rulesIDs":[""],"baseScore":4}`
@@ -31,7 +31,7 @@ func MockFramework_0013() *reporthandling.Framework {
return fw
}
// MockFramework_0006_0013 mock control 0013 and control 0006 - "Non-root containers" and "Allowed hostPath"
// MockFramework_0006_0013 mock control 0013 and control 0006 - "Non-root containers" and "HostPath mount"
func MockFramework_0006_0013() *reporthandling.Framework {
fw := &reporthandling.Framework{
PortalBase: armotypes.PortalBase{

View File

@@ -36,7 +36,7 @@ spec:
effect: NoSchedule
containers:
- name: host-sensor
image: quay.io/kubescape/host-scanner:v1.0.32
image: quay.io/kubescape/host-scanner:v1.0.39
securityContext:
privileged: true
readOnlyRootFilesystem: true

View File

@@ -3,6 +3,7 @@ package hostsensorutils
import (
"encoding/json"
"fmt"
"strings"
"sync"
logger "github.com/kubescape/go-logger"
@@ -99,6 +100,30 @@ func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(path, requestKind string
return res, nil
}
// return host-scanner version
func (hsh *HostSensorHandler) GetVersion() (string, error) {
// loop over pods and port-forward it to each of them
podList, err := hsh.getPodList()
if err != nil {
return "", fmt.Errorf("failed to sendAllPodsHTTPGETRequest: %v", err)
}
// initialization of the channels
hsh.workerPool.init(len(podList))
hsh.workerPool.hostSensorApplyJobs(podList, "/version", "version")
for job := range hsh.workerPool.jobs {
resBytes, err := hsh.HTTPGetToPod(job.podName, job.path)
if err != nil {
return "", err
} else {
version := strings.ReplaceAll(string(resBytes), "\"", "")
version = strings.ReplaceAll(version, "\n", "")
return version, nil
}
}
return "", nil
}
// return list of LinuxKernelVariables
func (hsh *HostSensorHandler) GetKernelVariables() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
@@ -135,6 +160,12 @@ func (hsh *HostSensorHandler) GetControlPlaneInfo() ([]hostsensor.HostSensorData
return hsh.sendAllPodsHTTPGETRequest("/controlPlaneInfo", ControlPlaneInfo)
}
// return list of KubeProxyInfo
func (hsh *HostSensorHandler) GetCloudProviderInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest("/cloudProviderInfo", CloudProviderInfo)
}
// return list of KubeletCommandLine
func (hsh *HostSensorHandler) GetKubeletCommandLine() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
@@ -192,6 +223,16 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
var kcData []hostsensor.HostSensorDataEnvelope
var err error
logger.L().Debug("Accessing host scanner")
version, err := hsh.GetVersion()
if err != nil {
logger.L().Warning(err.Error())
}
if len(version) > 0 {
logger.L().Info("Host scanner version : " + version)
} else {
logger.L().Info("Unknown host scanner version")
}
//
kcData, err = hsh.GetKubeletConfigurations()
if err != nil {
addInfoToMap(KubeletConfiguration, infoMap, err)
@@ -285,6 +326,16 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
res = append(res, kcData...)
}
// GetCloudProviderInfo
kcData, err = hsh.GetCloudProviderInfo()
if err != nil {
addInfoToMap(CloudProviderInfo, infoMap, err)
logger.L().Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
logger.L().Debug("Done reading information from host scanner")
return res, infoMap, nil
}

View File

@@ -47,7 +47,10 @@ func (wp *workerPool) hostSensorWorker(hsh *HostSensorHandler, wg *sync.WaitGrou
for job := range wp.jobs {
hostSensorDataEnvelope, err := hsh.getResourcesFromPod(job.podName, job.nodeName, job.requestKind, job.path)
if err != nil {
logger.L().Error("failed to get data", helpers.String("path", job.path), helpers.String("podName", job.podName), helpers.Error(err))
// TODO: Add to the condition also cloud provider (as in main cloud providers there is no access to control plane)
if job.path != "/controlPlaneInfo" {
logger.L().Error("failed to get data", helpers.String("path", job.path), helpers.String("podName", job.podName), helpers.Error(err))
}
} else {
wp.results <- hostSensorDataEnvelope
}

View File

@@ -16,6 +16,7 @@ var (
KubeletInfo = "KubeletInfo"
KubeProxyInfo = "KubeProxyInfo"
ControlPlaneInfo = "ControlPlaneInfo"
CloudProviderInfo = "CloudProviderInfo"
MapHostSensorResourceToApiGroup = map[string]string{
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
@@ -28,6 +29,7 @@ var (
KubeletInfo: "hostdata.kubescape.cloud/v1beta0",
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
ControlPlaneInfo: "hostdata.kubescape.cloud/v1beta0",
CloudProviderInfo: "hostdata.kubescape.cloud/v1beta0",
}
)

View File

@@ -35,6 +35,7 @@ type OPAProcessor struct {
func NewOPAProcessor(sessionObj *cautils.OPASessionObj, regoDependenciesData *resources.RegoDependenciesData) *OPAProcessor {
if regoDependenciesData != nil && sessionObj != nil {
regoDependenciesData.PostureControlInputs = sessionObj.RegoInputData.PostureControlInputs
regoDependenciesData.DataControlInputs = sessionObj.RegoInputData.DataControlInputs
}
return &OPAProcessor{
OPASessionObj: sessionObj,
@@ -153,12 +154,16 @@ func (opap *OPAProcessor) processControl(control *reporthandling.Control) (map[s
func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule, fixedControlInputs map[string][]string) (map[string]*resourcesresults.ResourceAssociatedRule, error) {
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs) // get store
dataControlInputs := map[string]string{"cloudProvider": opap.OPASessionObj.Report.ClusterCloudProvider}
// Merge configurable control input and fixed control input
for k, v := range fixedControlInputs {
postureControlInputs[k] = v
}
RuleRegoDependenciesData := resources.RegoDependenciesData{DataControlInputs: dataControlInputs,
PostureControlInputs: postureControlInputs}
inputResources, err := reporthandling.RegoResourcesAggregator(rule, getAllSupportedObjects(opap.K8SResources, opap.ArmoResource, opap.AllResources, rule))
if err != nil {
return nil, fmt.Errorf("error getting aggregated k8sObjects: %s", err.Error())
@@ -185,7 +190,7 @@ func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule, fixedCont
opap.AllResources[inputResources[i].GetID()] = inputResources[i]
}
ruleResponses, err := opap.runOPAOnSingleRule(rule, inputRawResources, ruleData, postureControlInputs)
ruleResponses, err := opap.runOPAOnSingleRule(rule, inputRawResources, ruleData, RuleRegoDependenciesData)
if err != nil {
// TODO - Handle error
logger.L().Error(err.Error())
@@ -217,16 +222,16 @@ func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule, fixedCont
return resources, err
}
func (opap *OPAProcessor) runOPAOnSingleRule(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, postureControlInputs map[string][]string) ([]reporthandling.RuleResponse, error) {
func (opap *OPAProcessor) runOPAOnSingleRule(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, ruleRegoDependenciesData resources.RegoDependenciesData) ([]reporthandling.RuleResponse, error) {
switch rule.RuleLanguage {
case reporthandling.RegoLanguage, reporthandling.RegoLanguage2:
return opap.runRegoOnK8s(rule, k8sObjects, getRuleData, postureControlInputs)
return opap.runRegoOnK8s(rule, k8sObjects, getRuleData, ruleRegoDependenciesData)
default:
return nil, fmt.Errorf("rule: '%s', language '%v' not supported", rule.Name, rule.RuleLanguage)
}
}
func (opap *OPAProcessor) runRegoOnK8s(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, postureControlInputs map[string][]string) ([]reporthandling.RuleResponse, error) {
func (opap *OPAProcessor) runRegoOnK8s(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, ruleRegoDependenciesData resources.RegoDependenciesData) ([]reporthandling.RuleResponse, error) {
// compile modules
modules, err := getRuleDependencies()
@@ -239,7 +244,7 @@ func (opap *OPAProcessor) runRegoOnK8s(rule *reporthandling.PolicyRule, k8sObjec
return nil, fmt.Errorf("in 'runRegoOnSingleRule', failed to compile rule, name: %s, reason: %s", rule.Name, err.Error())
}
store, err := resources.TOStorage(postureControlInputs)
store, err := ruleRegoDependenciesData.TOStorage()
if err != nil {
return nil, err
}
@@ -282,8 +287,12 @@ func (opap *OPAProcessor) enumerateData(rule *reporthandling.PolicyRule, k8sObje
return k8sObjects, nil
}
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs)
dataControlInputs := map[string]string{"cloudProvider": opap.OPASessionObj.Report.ClusterCloudProvider}
ruleResponse, err := opap.runOPAOnSingleRule(rule, k8sObjects, ruleEnumeratorData, postureControlInputs)
RuleRegoDependenciesData := resources.RegoDependenciesData{DataControlInputs: dataControlInputs,
PostureControlInputs: postureControlInputs}
ruleResponse, err := opap.runOPAOnSingleRule(rule, k8sObjects, ruleEnumeratorData, RuleRegoDependenciesData)
if err != nil {
return nil, err
}

View File

@@ -4,6 +4,8 @@ import (
"fmt"
"github.com/armosec/armoapi-go/armotypes"
"github.com/kubescape/k8s-interface/cloudsupport"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/pkg/resourcehandler"
)
@@ -49,6 +51,17 @@ func (policyHandler *PolicyHandler) CollectResources(policyIdentifier []cautils.
func (policyHandler *PolicyHandler) getResources(policyIdentifier []cautils.PolicyIdentifier, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
opaSessionObj.Report.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
// attempting to get cloud provider from API server git version
if opaSessionObj.Report.ClusterAPIServerInfo != nil {
opaSessionObj.Report.ClusterCloudProvider = cloudsupport.GetCloudProvider(opaSessionObj.Report.ClusterAPIServerInfo.GitVersion)
}
// if didn't succeed getting cloud provider from API server git version, try from context.
if opaSessionObj.Report.ClusterCloudProvider == "" {
clusterName := k8sinterface.GetContextName()
opaSessionObj.Report.ClusterCloudProvider = cloudsupport.GetCloudProvider(clusterName)
}
resourcesMap, allResources, ksResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj, &policyIdentifier[0].Designators)
if err != nil {
return err

View File

@@ -60,9 +60,11 @@ func (policyHandler *PolicyHandler) getScanPolicies(policyIdentifier []cautils.P
if err != nil {
return frameworks, policyDownloadError(err)
}
if err := validateFramework(receivedFramework); err != nil {
return frameworks, err
}
if receivedFramework != nil {
frameworks = append(frameworks, *receivedFramework)
cache := getter.GetDefaultPath(rule.Name + ".json")
if err := getter.SaveInFile(receivedFramework, cache); err != nil {
logger.L().Warning("failed to cache file", helpers.String("file", cache), helpers.Error(err))

View File

@@ -1,23 +0,0 @@
package policyhandler
// func TestGetPoliciesFromBackend(t *testing.T) {
// notification := reporthandling.PolicyNotification{
// Rules: []reporthandling.PolicyIdentifier{
// {
// Kind: reporthandling.KindFramework,
// Name: "mitretest",
// },
// },
// }
// // os.Setenv(cacli., "")
// ph := PolicyHandler{
// cacli: &cacli.Cacli{},
// }
// f, err := ph.GetPoliciesFromBackend(&notification)
// if err != nil {
// t.Error(err)
// }
// if len(f) == 0 {
// t.Errorf("empty")
// }
// }

View File

@@ -5,6 +5,7 @@ import (
"strings"
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/kubescape/v2/core/cautils"
)
@@ -21,3 +22,16 @@ func policyDownloadError(err error) error {
}
return err
}
// validate the framework
func validateFramework(framework *reporthandling.Framework) error {
if framework == nil {
return fmt.Errorf("received empty framework")
}
// validate the controls are not empty
if len(framework.Controls) == 0 {
return fmt.Errorf("failed to load controls for framework: %s: empty list of controls", framework.Name)
}
return nil
}

View File

@@ -0,0 +1,48 @@
package policyhandler
import (
"testing"
"github.com/kubescape/opa-utils/reporthandling"
)
func Test_validateFramework(t *testing.T) {
type args struct {
framework *reporthandling.Framework
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "empty framework",
args: args{
framework: &reporthandling.Framework{
Controls: []reporthandling.Control{},
},
},
wantErr: true,
},
{
name: "none empty framework",
args: args{
framework: &reporthandling.Framework{
Controls: []reporthandling.Control{
{
ControlID: "c-0001",
},
},
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := validateFramework(tt.args.framework); (err != nil) != tt.wantErr {
t.Errorf("validateControls() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@@ -76,9 +76,10 @@ func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASess
}
if err := fileHandler.registryAdaptors.collectImagesVulnerabilities(k8sResources, allResources, ksResources); err != nil {
logger.L().Warning("failed to collect images vulnerabilities", helpers.Error(err))
}
// Should Kubescape scan image related controls when scanning local files?
// if err := fileHandler.registryAdaptors.collectImagesVulnerabilities(k8sResources, allResources, ksResources); err != nil {
// logger.L().Warning("failed to collect images vulnerabilities", helpers.Error(err))
// }
cautils.StopSpinner()
logger.L().Success("Done accessing local objects")
@@ -103,6 +104,8 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
gitRepo, err := cautils.NewLocalGitRepository(path)
if err == nil && gitRepo != nil {
repoRoot, _ = gitRepo.GetRootDir()
} else {
repoRoot, _ = filepath.Abs(path)
}
// load resource from local file system
@@ -141,7 +144,7 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
}
workloadSource := reporthandling.Source{
RelativePath: source,
RelativePath: relSource,
FileType: filetype,
LastCommit: lastCommit,
}

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"path/filepath"
giturl "github.com/armosec/go-git-url"
giturl "github.com/kubescape/go-git-url"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"

View File

@@ -88,7 +88,7 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
logger.L().Info("Requesting images vulnerabilities results")
cautils.StartSpinner()
if err := k8sHandler.registryAdaptors.collectImagesVulnerabilities(k8sResourcesMap, allResources, ksResourceMap); err != nil {
logger.L().Warning("failed to collect image vulnerabilities", helpers.Error(err))
logger.L().Warning("failed to collect image vulnerabilities", helpers.Error(err), helpers.String("Read more here", "https://hub.armosec.io/docs/configuration-of-image-vulnerabilities"))
cautils.SetInfoMapForResources(fmt.Sprintf("failed to pull image scanning data: %s. for more information: https://hub.armosec.io/docs/configuration-of-image-vulnerabilities", err.Error()), imgVulnResources, sessionObj.InfoMap)
} else {
if isEmptyImgVulns(*ksResourceMap) {

View File

@@ -23,6 +23,7 @@ var (
KubeletInfo = "KubeletInfo"
KubeProxyInfo = "KubeProxyInfo"
ControlPlaneInfo = "ControlPlaneInfo"
CloudProviderInfo = "CloudProviderInfo"
MapResourceToApiGroup = map[string]string{
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
@@ -35,6 +36,7 @@ var (
KubeletInfo: "hostdata.kubescape.cloud/v1beta0",
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
ControlPlaneInfo: "hostdata.kubescape.cloud/v1beta0",
CloudProviderInfo: "hostdata.kubescape.cloud/v1beta0",
}
MapResourceToApiGroupVuln = map[string][]string{
ImageVulnerabilities: {"armo.vuln.images/v1", "image.vulnscan.com/v1"}}

View File

@@ -8,8 +8,8 @@ import (
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/cautils/getter"
gcpadaptorv1 "github.com/kubescape/kubescape/v2/core/pkg/registryadaptors/gcp/v1"
armosecadaptorv1 "github.com/kubescape/kubescape/v2/core/pkg/registryadaptors/armosec/v1"
gcpadaptorv1 "github.com/kubescape/kubescape/v2/core/pkg/registryadaptors/gcp/v1"
"github.com/kubescape/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
"github.com/kubescape/opa-utils/shared"

View File

@@ -6,11 +6,11 @@ import (
nethttp "net/http"
"os"
giturl "github.com/armosec/go-git-url"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/http"
giturl "github.com/kubescape/go-git-url"
)
// To Check if the given repository is Public(No Authentication needed), send a HTTP GET request to the URL

View File

@@ -1,7 +1,7 @@
package resourcehandler
import (
giturl "github.com/armosec/go-git-url"
giturl "github.com/kubescape/go-git-url"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/workloadinterface"

View File

@@ -3,6 +3,8 @@ package resourcesprioritization
import (
"fmt"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/cautils/getter"
@@ -19,7 +21,8 @@ func NewResourcesPrioritizationHandler(attackTracksGetter getter.IAttackTracksGe
attackTracks: make([]v1alpha1.IAttackTrack, 0),
}
if tracks, err := attackTracksGetter.GetAttackTracks(); err != nil {
tracks, err := attackTracksGetter.GetAttackTracks()
if err != nil {
return nil, err
} else {
for _, attackTrack := range tracks {
@@ -36,6 +39,12 @@ func NewResourcesPrioritizationHandler(attackTracksGetter getter.IAttackTracksGe
return nil, fmt.Errorf("expected to find at least one attack track")
}
// Store attack tracks in cache
cache := getter.GetDefaultPath(cautils.LocalAttackTracksFilename)
if err := getter.SaveInFile(tracks, cache); err != nil {
logger.L().Warning("failed to cache file", helpers.String("file", cache), helpers.Error(err))
}
return handler, nil
}
@@ -55,7 +64,8 @@ func (handler *ResourcesPrioritizationHandler) PrioritizeResources(sessionObj *c
resourcePriorityVector := []prioritization.ControlsVector{}
resource, exist := sessionObj.AllResources[resourceId]
if !exist {
return fmt.Errorf("expected to find resource id '%s' in scanned resources map", resourceId)
logger.L().Error("resource not found in resources map", helpers.String("resource ID", resourceId))
continue
}
workload := workloadinterface.NewWorkloadObj(resource.GetObject())

View File

@@ -1,4 +1,4 @@
package v1
package printer
import (
"encoding/json"

View File

@@ -1,4 +1,4 @@
package v1
package printer
import (
"fmt"

View File

@@ -1,4 +1,4 @@
package v2
package printer
import (
"fmt"
@@ -78,6 +78,19 @@ func getColor(controlSeverity int) color.Attribute {
}
}
func getSortedControlsIDs(controls reportsummary.ControlSummaries) [][]string {
controlIDs := make([][]string, 5)
for k := range controls {
c := controls[k]
i := apis.ControlSeverityToInt(c.GetScoreFactor())
controlIDs[i] = append(controlIDs[i], c.GetID())
}
for i := range controlIDs {
sort.Strings(controlIDs[i])
}
return controlIDs
}
func getSortedControlsNames(controls reportsummary.ControlSummaries) [][]string {
controlNames := make([][]string, 5)
for k := range controls {

View File

@@ -142,7 +142,7 @@
<tr>
<td class="resourceSeverityCell">{{ .Severity }}</td>
<td class="resourceNameCell">{{ .Name }}</td>
<td class="resourceURLCell"><a href="https://hub.armosec.io/docs/{{ lower .URL }}">{{ .URL }}</a></td>
<td class="resourceURLCell"><a href="{{ lower .URL }}">{{ .ID }}</a></td>
<td class="resourceRemediationCell">{{ range .FailedPaths }} <p>{{ . }}</p> {{ end }}</td>
</tr>
{{ end }}

View File

@@ -1,4 +1,4 @@
package v2
package printer
import (
_ "embed"
@@ -130,17 +130,18 @@ func buildResourceTableView(opaSessionObj *cautils.OPASessionObj) ResourceTableV
func buildResourceControlResult(resourceControl resourcesresults.ResourceAssociatedControl, control reportsummary.IControlSummary) ResourceControlResult {
ctlSeverity := apis.ControlSeverityToString(control.GetScoreFactor())
ctlName := resourceControl.GetName()
ctlURL := resourceControl.GetID()
ctlID := resourceControl.GetID()
ctlURL := cautils.GetControlLink(resourceControl.GetID())
failedPaths := append(failedPathsToString(&resourceControl), fixPathsToString(&resourceControl)...)
return ResourceControlResult{ctlSeverity, ctlName, ctlURL, failedPaths}
return ResourceControlResult{ctlSeverity, ctlName, ctlID, ctlURL, failedPaths}
}
func buildResourceControlResultTable(resourceControls []resourcesresults.ResourceAssociatedControl, summaryDetails *reportsummary.SummaryDetails) []ResourceControlResult {
var ctlResults []ResourceControlResult
for _, resourceControl := range resourceControls {
if resourceControl.GetStatus(nil).IsFailed() {
control := summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, resourceControl.GetName())
control := summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, resourceControl.GetID())
ctlResult := buildResourceControlResult(resourceControl, control)
ctlResults = append(ctlResults, ctlResult)

View File

@@ -1,4 +1,4 @@
package v2
package printer
import (
"encoding/json"

View File

@@ -1,9 +1,10 @@
package v2
package printer
import (
"encoding/xml"
"fmt"
"os"
"sort"
"strings"
logger "github.com/kubescape/go-logger"
@@ -11,9 +12,8 @@ import (
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
"github.com/kubescape/opa-utils/reporthandling/apis"
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
"github.com/kubescape/opa-utils/shared"
)
/*
@@ -36,7 +36,6 @@ type JUnitTestSuites struct {
XMLName xml.Name `xml:"testsuites"`
Suites []JUnitTestSuite `xml:"testsuite"` // list of controls
Errors int `xml:"errors,attr"` // total number of tests with error result from all testsuites
Disabled int `xml:"disabled,attr"` // total number of disabled tests from all testsuites
Failures int `xml:"failures,attr"` // total number of failed tests from all testsuites
Tests int `xml:"tests,attr"` // total number of tests from all testsuites. Some software may expect to only see the number of successful tests from all testsuites though
Time string `xml:"time,attr"` // time in seconds to execute all test suites
@@ -46,8 +45,8 @@ type JUnitTestSuites struct {
// JUnitTestSuite represents a single control
type JUnitTestSuite struct {
XMLName xml.Name `xml:"testsuite"`
Tests int `xml:"tests,attr"` // total number of tests from this testsuite. Some software may expect to only see the number of successful tests though
Name string `xml:"name,attr"` // Full (class) name of the test for non-aggregated testsuite documents. Class name without the package for aggregated testsuites documents. Required
Disabled int `xml:"disabled,attr"` // The total number of disabled tests in the suite. optional. not supported by maven surefire.
Errors int `xml:"errors,attr"` // The total number of tests in the suite that errors
Failures int `xml:"failures,attr"` // The total number of tests in the suite that failed
Hostname string `xml:"hostname,attr"` // Host on which the tests were executed ? cluster name ?
@@ -55,7 +54,6 @@ type JUnitTestSuite struct {
Skipped string `xml:"skipped,attr"` // The total number of skipped tests
Time string `xml:"time,attr"` // Time taken (in seconds) to execute the tests in the suite
Timestamp string `xml:"timestamp,attr"` // when the test was executed in ISO 8601 format (2014-01-21T16:17:18)
File string `xml:"file,attr"` // The file be tested
Properties []JUnitProperty `xml:"properties>property,omitempty"`
TestCases []JUnitTestCase `xml:"testcase"`
}
@@ -64,7 +62,6 @@ type JUnitTestSuite struct {
type JUnitTestCase struct {
XMLName xml.Name `xml:"testcase"`
Classname string `xml:"classname,attr"` // Full class name for the class the test method is in. required
Status string `xml:"status,attr"` // Status
Name string `xml:"name,attr"` // Name of the test method, required
Time string `xml:"time,attr"` // Time taken (in seconds) to execute the test. optional
SkipMessage *JUnitSkipMessage `xml:"skipped,omitempty"`
@@ -89,11 +86,6 @@ type JUnitFailure struct {
Contents string `xml:",chardata"`
}
const (
lineSeparator = "\n===================================================================================================================\n\n"
testCaseTypeResources = "Resources"
)
func NewJunitPrinter(verbose bool) *JunitPrinter {
return &JunitPrinter{
verbose: verbose,
@@ -124,118 +116,104 @@ func (junitPrinter *JunitPrinter) ActionPrint(opaSessionObj *cautils.OPASessionO
func testsSuites(results *cautils.OPASessionObj) *JUnitTestSuites {
return &JUnitTestSuites{
Suites: listTestsSuite(results),
Tests: results.Report.SummaryDetails.NumberOfResources().All(),
Tests: results.Report.SummaryDetails.NumberOfControls().All(),
Name: "Kubescape Scanning",
Failures: results.Report.SummaryDetails.NumberOfResources().Failed(),
Failures: results.Report.SummaryDetails.NumberOfControls().Failed(),
}
}
// aggregate resources source to a list of resources results
func sourceToResourcesResults(results *cautils.OPASessionObj) map[string][]resourcesresults.Result {
resourceResults := make(map[string][]resourcesresults.Result)
for i := range results.ResourceSource {
if r, ok := results.ResourcesResult[i]; ok {
if _, ok := resourceResults[results.ResourceSource[i].RelativePath]; !ok {
resourceResults[results.ResourceSource[i].RelativePath] = []resourcesresults.Result{}
}
resourceResults[results.ResourceSource[i].RelativePath] = append(resourceResults[results.ResourceSource[i].RelativePath], r)
}
}
return resourceResults
}
// listTestsSuite returns a list of testsuites
func listTestsSuite(results *cautils.OPASessionObj) []JUnitTestSuite {
var testSuites []JUnitTestSuite
resourceResults := sourceToResourcesResults(results)
counter := 0
// control scan
for path, resourcesResult := range resourceResults {
if len(results.Report.SummaryDetails.ListFrameworks()) == 0 {
testSuite := JUnitTestSuite{}
testSuite.Tests = results.Report.SummaryDetails.NumberOfControls().All()
testSuite.Failures = results.Report.SummaryDetails.NumberOfControls().Failed()
testSuite.Timestamp = results.Report.ReportGenerationTime.String()
testSuite.ID = counter
counter++
testSuite.File = path
testSuite.TestCases = testsCases(results, resourcesResult)
if len(testSuite.TestCases) > 0 {
testSuites = append(testSuites, testSuite)
}
testSuite.ID = 0
testSuite.Name = "kubescape"
testSuite.Properties = properties(results.Report.SummaryDetails.Score)
testSuite.TestCases = testsCases(results, &results.Report.SummaryDetails.Controls, "Kubescape")
testSuites = append(testSuites, testSuite)
return testSuites
}
for i, f := range results.Report.SummaryDetails.Frameworks {
testSuite := JUnitTestSuite{}
testSuite.Tests = f.NumberOfControls().All()
testSuite.Failures = f.NumberOfControls().Failed()
testSuite.Timestamp = results.Report.ReportGenerationTime.String()
testSuite.ID = i
testSuite.Name = f.Name
testSuite.Properties = properties(f.Score)
testSuite.TestCases = testsCases(results, f.GetControls(), f.GetName())
testSuites = append(testSuites, testSuite)
}
return testSuites
}
func failedControlsToFailureMessage(results *cautils.OPASessionObj, controls []resourcesresults.ResourceAssociatedControl, severityCounter []int) string {
msg := ""
for _, c := range controls {
control := results.Report.SummaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, c.GetID())
if c.GetStatus(nil).IsFailed() {
msg += fmt.Sprintf("Test: %s\n", control.GetName())
msg += fmt.Sprintf("Severity: %s\n", apis.ControlSeverityToString(control.GetScoreFactor()))
msg += fmt.Sprintf("Remediation: %s\n", control.GetRemediation())
msg += fmt.Sprintf("Link: %s\n", getControlLink(control.GetID()))
if failedPaths := failedPathsToString(&c); len(failedPaths) > 0 {
msg += fmt.Sprintf("Failed paths: \n - %s\n", strings.Join(failedPaths, "\n - "))
}
if fixPaths := fixPathsToString(&c); len(fixPaths) > 0 {
msg += fmt.Sprintf("Available fix: \n - %s\n", strings.Join(fixPaths, "\n - "))
}
msg += "\n"
severityCounter[apis.ControlSeverityToInt(control.GetScoreFactor())] += 1
}
}
return msg
}
// Every testCase includes a file (even if the file contains several resources)
func testsCases(results *cautils.OPASessionObj, resourcesResult []resourcesresults.Result) []JUnitTestCase {
func testsCases(results *cautils.OPASessionObj, controls reportsummary.IControlsSummaries, classname string) []JUnitTestCase {
var testCases []JUnitTestCase
testCase := JUnitTestCase{}
testCaseFailure := JUnitFailure{}
testCaseFailure.Type = testCaseTypeResources
message := ""
// severityCounter represents the severities, 0: Unknown, 1: Low, 2: Medium, 3: High, 4: Critical
severityCounter := make([]int, apis.NumberOfSeverities, apis.NumberOfSeverities)
iter := controls.ListControlsIDs().All()
for iter.HasNext() {
cID := iter.Next()
testCase := JUnitTestCase{}
control := results.Report.SummaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, cID)
testCase.Name = control.GetName()
testCase.Classname = classname
if control.GetStatus().IsFailed() {
resources := map[string]interface{}{}
resourceIDs := control.ListResourcesIDs().Failed()
for j := range resourceIDs {
resource := results.AllResources[resourceIDs[j]]
sourcePath := ""
if ResourceSourcePath, ok := results.ResourceSource[resourceIDs[j]]; ok {
sourcePath = ResourceSourcePath.RelativePath
}
resources[resourceToString(resource, sourcePath)] = nil
}
resourcesStr := shared.MapStringToSlice(resources)
sort.Strings(resourcesStr)
testCaseFailure := JUnitFailure{}
testCaseFailure.Type = "Control"
// testCaseFailure.Contents =
testCaseFailure.Message = fmt.Sprintf("Remediation: %s\nMore details: %s\n\n%s", control.GetRemediation(), cautils.GetControlLink(control.GetID()), strings.Join(resourcesStr, "\n"))
testCase.Failure = &testCaseFailure
} else if control.GetStatus().IsSkipped() {
testCase.SkipMessage = &JUnitSkipMessage{
Message: "", // TODO - fill after statusInfo is supported
}
for i := range resourcesResult {
if failedControls := failedControlsToFailureMessage(results, resourcesResult[i].ListControls(), severityCounter); failedControls != "" {
message += fmt.Sprintf("%sResource: %s\n\n%s", lineSeparator, resourceNameToString(results.AllResources[resourcesResult[i].GetResourceID()]), failedControls)
}
}
testCaseFailure.Message += fmt.Sprintf("%s\n%s", getSummaryMessage(severityCounter), message)
testCase.Failure = &testCaseFailure
if testCase.Failure.Message != "" {
testCases = append(testCases, testCase)
}
return testCases
}
func getSummaryMessage(severityCounter []int) string {
total := 0
severities := ""
for i, count := range severityCounter {
if apis.SeverityNumberToString(i) == apis.SeverityNumberToString(apis.SeverityUnknown) {
continue
}
severities += fmt.Sprintf("%s: %d, ", apis.SeverityNumberToString(i), count)
total += count
}
if len(severities) == 0 {
return ""
}
return fmt.Sprintf("Total: %d (%s)", total, severities[:len(severities)-2])
}
func resourceNameToString(resource workloadinterface.IMetadata) string {
func resourceToString(resource workloadinterface.IMetadata, sourcePath string) string {
sep := "; "
s := ""
s += fmt.Sprintf("kind=%s/", resource.GetKind())
s += fmt.Sprintf("apiVersion: %s", resource.GetApiVersion()) + sep
s += fmt.Sprintf("kind: %s", resource.GetKind()) + sep
if resource.GetNamespace() != "" {
s += fmt.Sprintf("namespace=%s/", resource.GetNamespace())
s += fmt.Sprintf("namespace: %s", resource.GetNamespace()) + sep
}
s += fmt.Sprintf("name: %s", resource.GetName())
if sourcePath != "" {
s += sep + fmt.Sprintf("sourcePath: %s", sourcePath)
}
s += fmt.Sprintf("name=%s", resource.GetName())
return s
}
func properties(riskScore float32) []JUnitProperty {
return []JUnitProperty{
{
Name: "riskScore",
Value: fmt.Sprintf("%.2f", riskScore),
},
}
}

View File

@@ -1,4 +1,4 @@
package v2
package printer
import (
_ "embed"
@@ -76,13 +76,13 @@ func (pdfPrinter *PdfPrinter) printInfo(m pdf.Maroto, summaryDetails *reportsumm
}
func (pdfPrinter *PdfPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
sortedControlNames := getSortedControlsNames(opaSessionObj.Report.SummaryDetails.Controls)
sortedControlIDs := getSortedControlsIDs(opaSessionObj.Report.SummaryDetails.Controls)
infoToPrintInfo := mapInfoToPrintInfo(opaSessionObj.Report.SummaryDetails.Controls)
m := pdf.NewMaroto(consts.Portrait, consts.A4)
pdfPrinter.printHeader(m)
pdfPrinter.printFramework(m, opaSessionObj.Report.SummaryDetails.ListFrameworks())
pdfPrinter.printTable(m, &opaSessionObj.Report.SummaryDetails, sortedControlNames)
pdfPrinter.printTable(m, &opaSessionObj.Report.SummaryDetails, sortedControlIDs)
pdfPrinter.printFinalResult(m, &opaSessionObj.Report.SummaryDetails)
pdfPrinter.printInfo(m, &opaSessionObj.Report.SummaryDetails, infoToPrintInfo)
@@ -149,16 +149,16 @@ func (pdfPrinter *PdfPrinter) printFramework(m pdf.Maroto, frameworks []reportsu
}
// Create pdf table
func (pdfPrinter *PdfPrinter) printTable(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails, sortedControlNames [][]string) {
func (pdfPrinter *PdfPrinter) printTable(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
headers := getControlTableHeaders()
infoToPrintInfoMap := mapInfoToPrintInfo(summaryDetails.Controls)
controls := make([][]string, len(sortedControlNames))
controls := make([][]string, len(sortedControlIDs))
for i := range controls {
controls[i] = make([]string, len(headers))
}
for i := len(sortedControlNames) - 1; i >= 0; i-- {
for _, c := range sortedControlNames[i] {
controls[i] = generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, c), infoToPrintInfoMap, true)
for i := len(sortedControlIDs) - 1; i >= 0; i-- {
for _, c := range sortedControlIDs[i] {
controls[i] = generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, c), infoToPrintInfoMap, true)
}
}

View File

@@ -1,10 +1,9 @@
package v2
package printer
import (
"fmt"
"os"
"sort"
"strings"
"github.com/enescakir/emoji"
"github.com/kubescape/k8s-interface/workloadinterface"
@@ -35,18 +34,18 @@ func NewPrettyPrinter(verboseMode bool, formatVersion string, viewType cautils.V
func (prettyPrinter *PrettyPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
fmt.Fprintf(prettyPrinter.writer, "\n"+getSeparator("^")+"\n")
sortedControlNames := getSortedControlsNames(opaSessionObj.Report.SummaryDetails.Controls) // ListControls().All())
sortedControlIDs := getSortedControlsIDs(opaSessionObj.Report.SummaryDetails.Controls) // ListControls().All())
switch prettyPrinter.viewType {
case cautils.ControlViewType:
prettyPrinter.printResults(&opaSessionObj.Report.SummaryDetails.Controls, opaSessionObj.AllResources, sortedControlNames)
prettyPrinter.printResults(&opaSessionObj.Report.SummaryDetails.Controls, opaSessionObj.AllResources, sortedControlIDs)
case cautils.ResourceViewType:
if prettyPrinter.verboseMode {
prettyPrinter.resourceTable(opaSessionObj)
}
}
prettyPrinter.printSummaryTable(&opaSessionObj.Report.SummaryDetails, sortedControlNames)
prettyPrinter.printSummaryTable(&opaSessionObj.Report.SummaryDetails, sortedControlIDs)
}
@@ -57,10 +56,10 @@ func (prettyPrinter *PrettyPrinter) SetWriter(outputFile string) {
func (prettyPrinter *PrettyPrinter) Score(score float32) {
}
func (prettyPrinter *PrettyPrinter) printResults(controls *reportsummary.ControlSummaries, allResources map[string]workloadinterface.IMetadata, sortedControlNames [][]string) {
for i := len(sortedControlNames) - 1; i >= 0; i-- {
for _, c := range sortedControlNames[i] {
controlSummary := controls.GetControl(reportsummary.EControlCriteriaName, c) // summaryDetails.Controls ListControls().All() Controls.GetControl(ca)
func (prettyPrinter *PrettyPrinter) printResults(controls *reportsummary.ControlSummaries, allResources map[string]workloadinterface.IMetadata, sortedControlIDs [][]string) {
for i := len(sortedControlIDs) - 1; i >= 0; i-- {
for _, c := range sortedControlIDs[i] {
controlSummary := controls.GetControl(reportsummary.EControlCriteriaID, c) // summaryDetails.Controls ListControls().All() Controls.GetControl(ca)
prettyPrinter.printTitle(controlSummary)
prettyPrinter.printResources(controlSummary, allResources)
prettyPrinter.printSummary(c, controlSummary)
@@ -84,7 +83,7 @@ func (prettyPrinter *PrettyPrinter) printSummary(controlName string, controlSumm
}
func (prettyPrinter *PrettyPrinter) printTitle(controlSummary reportsummary.IControlSummary) {
cautils.InfoDisplay(prettyPrinter.writer, "[control: %s - %s] ", controlSummary.GetName(), getControlLink(controlSummary.GetID()))
cautils.InfoDisplay(prettyPrinter.writer, "[control: %s - %s] ", controlSummary.GetName(), cautils.GetControlLink(controlSummary.GetID()))
switch controlSummary.GetStatus().Status() {
case apis.StatusSkipped:
cautils.InfoDisplay(prettyPrinter.writer, "skipped %v\n", emoji.ConfusedFace)
@@ -186,14 +185,14 @@ func generateFooter(summaryDetails *reportsummary.SummaryDetails) []string {
return row
}
func (prettyPrinter *PrettyPrinter) printSummaryTable(summaryDetails *reportsummary.SummaryDetails, sortedControlNames [][]string) {
func (prettyPrinter *PrettyPrinter) printSummaryTable(summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
if summaryDetails.NumberOfControls().All() == 0 {
fmt.Fprintf(prettyPrinter.writer, "\nKubescape did not scan any of the resources, make sure you are scanning valid kubernetes manifests (Deployments, Pods, etc.)\n")
return
}
cautils.InfoTextDisplay(prettyPrinter.writer, "\n"+controlCountersForSummary(summaryDetails.NumberOfControls())+"\n")
cautils.InfoTextDisplay(prettyPrinter.writer, renderSeverityCountersSummary(&summaryDetails.SeverityCounters)+"\n\n")
cautils.InfoTextDisplay(prettyPrinter.writer, renderSeverityCountersSummary(summaryDetails.GetResourcesSeverityCounters())+"\n\n")
// cautils.InfoTextDisplay(prettyPrinter.writer, "\n"+"Severities: SOME OTHER"+"\n\n")
@@ -210,9 +209,9 @@ func (prettyPrinter *PrettyPrinter) printSummaryTable(summaryDetails *reportsumm
}
infoToPrintInfo := mapInfoToPrintInfo(summaryDetails.Controls)
for i := len(sortedControlNames) - 1; i >= 0; i-- {
for _, c := range sortedControlNames[i] {
row := generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, c), infoToPrintInfo, printAll)
for i := len(sortedControlIDs) - 1; i >= 0; i-- {
for _, c := range sortedControlIDs[i] {
row := generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, c), infoToPrintInfo, printAll)
if len(row) > 0 {
summaryTable.Append(row)
}
@@ -255,16 +254,12 @@ func frameworksScoresToString(frameworks []reportsummary.IFrameworkSummary) stri
return ""
}
func getControlLink(controlID string) string {
return fmt.Sprintf("https://hub.armosec.io/docs/%s", strings.ToLower(controlID))
}
// renderSeverityCountersSummary renders the string that reports severity counters summary
func renderSeverityCountersSummary(counters reportsummary.ISeverityCounters) string {
critical := counters.NumberOfResourcesWithCriticalSeverity()
high := counters.NumberOfResourcesWithHighSeverity()
medium := counters.NumberOfResourcesWithMediumSeverity()
low := counters.NumberOfResourcesWithLowSeverity()
critical := counters.NumberOfCriticalSeverity()
high := counters.NumberOfHighSeverity()
medium := counters.NumberOfMediumSeverity()
low := counters.NumberOfLowSeverity()
return fmt.Sprintf(
"Failed Resources by Severity: Critical — %d, High — %d, Medium — %d, Low — %d",

View File

@@ -1,3 +1,3 @@
package v2
package printer
var INDENT = " "

View File

@@ -1,4 +1,4 @@
package v2
package printer
import (
"fmt"

View File

@@ -1,4 +1,4 @@
package v2
package printer
import (
"fmt"
@@ -289,7 +289,7 @@ func (m *Metrics) setRiskScores(summaryDetails *reportsummary.SummaryDetails) {
controlName: control.GetName(),
controlID: control.GetID(),
riskScore: cautils.Float32ToInt(control.GetScore()),
link: getControlLink(control.GetID()),
link: cautils.GetControlLink(control.GetID()),
severity: apis.ControlSeverityToString(control.GetScoreFactor()),
remediation: control.GetRemediation(),
}

View File

@@ -1,4 +1,4 @@
package v2
package printer
import (
"github.com/kubescape/k8s-interface/workloadinterface"
@@ -14,6 +14,7 @@ type ResourceResult struct {
type ResourceControlResult struct {
Severity string
Name string
ID string
URL string
FailedPaths []string
}

View File

@@ -1,4 +1,4 @@
package v2
package printer
import (
"fmt"
@@ -73,11 +73,11 @@ func generateResourceRows(controls []resourcesresults.ResourceAssociatedControl,
continue
}
row[resourceColumnURL] = fmt.Sprintf("https://hub.armosec.io/docs/%s", strings.ToLower(controls[i].GetID()))
row[resourceColumnURL] = cautils.GetControlLink(controls[i].GetID())
row[resourceColumnPath] = strings.Join(append(failedPathsToString(&controls[i]), fixPathsToString(&controls[i])...), "\n")
row[resourceColumnName] = controls[i].GetName()
if c := summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, controls[i].GetName()); c != nil {
if c := summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, controls[i].GetID()); c != nil {
row[resourceColumnSeverity] = getSeverityColumn(c)
}

View File

@@ -1,6 +1,7 @@
package v2
package printer
import (
"fmt"
"os"
"path"
"path/filepath"
@@ -84,7 +85,7 @@ func (sp *SARIFPrinter) addRule(scanRun *sarif.Run, control reportsummary.IContr
WithDefaultConfiguration(configuration).
WithShortDescription(sarif.NewMultiformatMessageString(control.GetName())).
WithFullDescription(sarif.NewMultiformatMessageString(control.GetDescription())).
WithHelp(sarif.NewMultiformatMessageString(control.GetRemediation()))
WithHelp(sarif.NewMultiformatMessageString(sp.generateRemediationMessage(control)))
}
// addResult adds a result of checking a rule to the scan run based on the given control summary
@@ -196,3 +197,8 @@ func getBasePathFromMetadata(opaSessionObj cautils.OPASessionObj) string {
return ""
}
// generateRemediationMessage generates a remediation message for the given control summary
func (sp *SARIFPrinter) generateRemediationMessage(control reportsummary.IControlSummary) string {
return fmt.Sprintf("Remediation: %s", control.GetRemediation())
}

View File

@@ -1,4 +1,4 @@
package v2
package printer
import "testing"

View File

@@ -1,4 +1,4 @@
package v2
package printer
import (
"github.com/kubescape/kubescape/v2/core/cautils"

View File

@@ -1,4 +1,4 @@
package v2
package printer
import (
"github.com/kubescape/k8s-interface/k8sinterface"

View File

@@ -1,4 +1,4 @@
package v2
package printer
import (
logger "github.com/kubescape/go-logger"
@@ -27,7 +27,9 @@ func FinalizeResults(data *cautils.OPASessionObj) *reporthandlingv2.PostureRepor
report.Results = make([]resourcesresults.Result, len(data.ResourcesResult))
finalizeResults(report.Results, data.ResourcesResult, data.ResourcesPrioritized)
report.Resources = finalizeResources(report.Results, data.AllResources, data.ResourceSource)
if !data.OmitRawResources {
report.Resources = finalizeResources(report.Results, data.AllResources, data.ResourceSource)
}
return &report
}

View File

@@ -1,4 +1,4 @@
package v2
package reporter
import (
"fmt"

View File

@@ -1,4 +1,4 @@
package v2
package reporter
import (
"encoding/json"
@@ -264,7 +264,7 @@ func (report *ReportEventReceiver) addPathURL(urlObj *url.URL) {
case SubmitContextRBAC:
urlObj.Path = "rbac-visualizer"
case SubmitContextRepository:
urlObj.Path = fmt.Sprintf("repositories-scan/%s", report.reportID)
urlObj.Path = fmt.Sprintf("repository-scanning/%s", report.reportID)
default:
urlObj.Path = "dashboard"
}

View File

@@ -1,4 +1,4 @@
package v2
package reporter
import (
"net/url"
@@ -88,7 +88,7 @@ func TestGetURL(t *testing.T) {
"XXXX",
SubmitContextRepository,
)
assert.Equal(t, "https://cloud.armosec.io/repositories-scan/XXXX?utm_campaign=Submit&utm_medium=CLI&utm_source=GitHub", reporter.GetURL())
assert.Equal(t, "https://cloud.armosec.io/repository-scanning/XXXX?utm_campaign=Submit&utm_medium=CLI&utm_source=GitHub", reporter.GetURL())
}
// Test submit and NOT registered url

View File

@@ -1,4 +1,4 @@
package v2
package reporter
import (
"net/url"

View File

@@ -1,4 +1,4 @@
package v2
package reporter
import (
"net/url"

View File

@@ -1,4 +1,4 @@
package v2
package reporter
import (
"net/url"

View File

@@ -1,4 +1,4 @@
package v2
package reporter
import (
"net/url"

View File

@@ -2,6 +2,7 @@ package resultshandling
import (
"encoding/json"
"fmt"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
@@ -86,7 +87,7 @@ func NewPrinter(printFormat, formatVersion string, verboseMode bool, viewType ca
case "v2":
return printerv2.NewJsonPrinter()
default:
logger.L().Warning("Deprecated format version", helpers.String("run", "--format-version=v2"))
logger.L().Warning("Deprecated format version", helpers.String("run", "--format-version=v2"), helpers.String("This will not be supported after", "1/Jan/2023"))
return printerv1.NewJsonPrinter()
}
case printer.JunitResultFormat:
@@ -100,6 +101,9 @@ func NewPrinter(printFormat, formatVersion string, verboseMode bool, viewType ca
case printer.SARIFFormat:
return printerv2.NewSARIFPrinter()
default:
if printFormat != printer.PrettyFormat {
logger.L().Error(fmt.Sprintf("Invalid format \"%s\", default format \"pretty-printer\" is applied", printFormat))
}
return printerv2.NewPrettyPrinter(verboseMode, formatVersion, viewType)
}
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
<mxfile host="app.diagrams.net" modified="2022-12-06T13:44:27.567Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36" etag="82Ze98LIXdxZb4qKuLJX" version="20.6.0" type="github"><diagram id="-fs2xgBgB6aFH82wrd4-" name="Page-1">5Vpdc5s4FP01fkzGIAT2o2OndabpxLNuZ9tHGRRQC4gRIrbz61cyAgOisXdqDG0ekkgXIUvnnvvpjMA82n1kKAk+Uw+HI3Ps7UZgMTJNAwJD/JGSfS4BxjQX+Ix4atFRsCavWAnHSpoRD6e1hZzSkJOkLnRpHGOX12SIMbqtL3umYf1TE+RjTbB2UahL/yUeD3LpBI6P8iUmflB8sjFWTyJULFaCNEAe3VZE4H4E5oxSno+i3RyHErwCl/y9D794Wh6M4Zif88Jk+fDt6y57/Ofl5uud/4pm2XR5Y4B8mxcUZurG6rR8X0CwDQjH6wS5cr4Vah6Bu4BHoZgZYqg2wIzj3S+PZpQXFkzBNMKc7cWS4gWgMFIkMaZqvj1CbheMCKpwFy8ipWa/3PuIhBgoMP4HMMXGFRywJ5ihppTxgPo0RuH9UXrHaBZ7WG47FrPjmkdKE4XWD8z5XtEcZZzWsRR4sf039f5h8l1ObmExXeyqDxd7NcvPKg/4tgbEfWjGXPwWIyxlZYj5mJ+kjq5ThkPEyUv9JBfXT3HMCnHnnxeazuoa6Z7JN1aDybbO5EkLkZ2ueGxO3yuP4Z/BY6jxePnly2pwRC5Z2xeRDVsD6lO2wamLEjwybRRJDOJNmpT3r6AncOB1iFBI/FiMXYEHZkIg0SIi7s/Ug4h4Xm4MOCWvaHPYSmKfUBLzw+3g3Qgu5F6C/2luCnLrlDP6E89pSMW+i5jGcpdnEoYN0QW0ZFqw7m4g1LQ0bdFSZ2GzSHbeyieuTWWjSeWxTuVSFtSTua7I7GgwiQT2mfgZEz6HxqkG2gmYUJrk2e8z2UlodX5fAki7AaQ50YB0ruoTJhqMT5sfAoj0UCbIXzjlv43lJQwVwmFBV1xhSIYKmiC1GKrhwGsaaoFKBaaHOMl0Tv21EcY47TzhVSOMXrGuGHVxqvvMv1UnYOrcnraUNnfSnVb0cqx3dwIt57Q7acuOOnQnerb/lPH35E/g1DqDu/ZVuatXFnnaML4RP0+r2RASCOA0/DA4z+K7SyD0FHadbSIyiHQLwobpF6etomW2oGV2hZaeqa6YsEqx2QcigBoAZjbULLOwjL5QK6rbvppXx37V9+qz7ptX4OzmldOu0+s0r4AezjSFpYFs0IiwtQ+J0AsDp2P/Jtfg46YUIPenf9CriJZimzJuqRAGL2MBDRcLWlpeRpuPLRsMlzcAvUqbU0E6GurZ7tXbKo1ugOWcF5G6A0uv1fDOxUl7Q+XqcDWKW8vUW3VtDdXO0h5LL6TyHtRt71hNhsYsvbpBnAunNDikzJ6RKloB7y5iW/a5EXvSZ8S29FrnT47Ypj28kK3XRZ8mayGYrR56dxfNFKd3f2HpddGSytq7+YWeHcp2yIaJkS9HqYviGLPeEQVwaIhONUQfIuRj+ZXISxbG/SNmDCy6Qz3Jnvk+wz7igyjMrQbFOmz9iOnxH9IOzyr/1gfu/wM=</diagram></mxfile>

View File

@@ -0,0 +1 @@
<mxfile host="app.diagrams.net" modified="2022-12-06T07:59:31.961Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36" etag="dboVwkfdjNikGFJag3q6" version="20.6.0" type="device"><diagram id="-fs2xgBgB6aFH82wrd4-" name="Page-1">5Vpdc5s4FP01fkzGIAT2o2OndabpxLNuZ9tHGRRQC4gRIrbz61cyAgOisXdqDG0e4qArkKVzz/00IzCPdh8ZSoLP1MPhyBx7uxFYjEzTcGxL/JOSfS4Blp0LfEY8ddNRsCavWAnHSpoRD6e1GzmlISdJXejSOMYur8kQY3Rbv+2ZhvVvTZCPNcHaRaEu/Zd4PMilEzg+ypeY+EHxzcZYzUSouFkJ0gB5dFsRgfsRmDNKeX4V7eY4lOAVuOTPffjFbLkxhmN+zgOT5cO3r7vs8Z+Xm693/iuaZdPljQHyZV5QmKkTq93yfQHBNiAcrxPkyvFWqHkE7gIehWJkiEu1AGYc7365NaM8sGAKphHmbC9uKR4ACiNFEmOqxtsj5HbBiKAKd/EgUmr2y7WPSIgLBcb/AKZYuIID9gQz1JAyHlCfxii8P0rvGM1iD8tlx2J0vOeR0kSh9QNzvlc0RxmndSwFXmz/TT1/GHyXg1tYDBe76uRir0b5XuUG39aAOA/NmIvfYoQyWI6Yj/lJ6ug6ZThEnLzUd3Jx/RTbrBB3/nmh6ayuke6ZfGM1mGzrTJ60ENnpisfm9L3yGP4ZPIYaj5dfvqwGR+SStX0R2bA1oD5lG5y6KMEj00aRxCDepEl5/gp6AgdehwiFxI/FtSvwwEwIJFpExP2ZmoiI5+XGgFPyijaHpST2CSUxP5wO3o3gQq4l+J/mpiCXTjmjP/GchlSsu4hpLFd5JmHYEF1AS6YF6+4GQk1L0xYtdRY2i2TnrXzi2lQ2mlQe61QuZUE9meuKzI4Gk0hgn4mfMeFzaJxqoJ2ACaVJnv0+k52EVuf3JYC0G0CaEw1I56o+YaLB+LT5IYBID2WC/MAp/20sL2GoEA4LuuIIQzJU0ASpxVANB17TUAtUKjA9xEmmc+qvjTDGaecJrxph9Ip1xaiLU91n/q06AVPn9rSltLmT7rSil2O9uxNoOafdSVt21KE70bP9p4y/J38Cp9YZ3LWvyl29ssjThvGN+HtazYaQQACn4YfBeRbfXQKhp7DrbBORQaRbEDZMv9htFS2zBS2zK7T0THXFhFWKxT4QAdQAMLOhZpmFZfSFWlHd9tW8Ovarvlfnum9egbObV067Tq/TvAJ6ONMUlgayQSPC1j4kQi8MnI79m1yDj5tSgNyf/kGvIlqKZcq4pUIYvIwFNFwsaGl5GW0+tmwwXN4A9CptTgXpaKhnu1dvqzS6AZZzXkTqDiy9VsM7FyftDZWrw9Uobi1Tb9W1NVQ7S3ssvZDKe1C3vWM1GRqz9OoGcS6c0uCQMntGqmgFvLuIXbzWcDpiT/qM2JZe6/zJEdu0hxey9bro02QtBLPVQ+/uopni9O4vLL0uWlJZezd/0LND2Q7ZMHHly6vURXGMWe+IAjg0RKcaog8R8rH8SeQlC+P+ETMGFt2hnmTPfJ9hH/FBFOZWg2Idtn7E8PhC2mGu8lofuP8P</diagram></mxfile>

View File

@@ -64,24 +64,24 @@ But if you wish to exclude all namespaces **OR** any resource with the label `"e
Same works with the `posturePolicies` list ->
e.g. If you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **AND** failed the `Allowed hostPath` control, the `posturePolicies` list should look as follows:
e.g. If you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **AND** failed the `HostPath mount` control, the `posturePolicies` list should look as follows:
```
"posturePolicies": [
{
"frameworkName": "NSA",
"controlName": "Allowed hostPath"
"controlName": "HostPath mount"
}
]
```
But if you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **OR** failed the `Allowed hostPath` control, the `posturePolicies` list should look as follows:
But if you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **OR** failed the `HostPath mount` control, the `posturePolicies` list should look as follows:
```
"posturePolicies": [
{
"frameworkName": "NSA"
},
{
"controlName": "Allowed hostPath"
"controlName": "HostPath mount"
}
]
```
@@ -122,7 +122,7 @@ The resources
]
```
### Exclude deployments in the default namespace that failed the "Allowed hostPath" control
### Exclude deployments in the default namespace that failed the "HostPath mount" control
```
[
{
@@ -142,7 +142,7 @@ The resources
],
"posturePolicies": [
{
"controlName": "Allowed hostPath"
"controlName": "HostPath mount"
}
]
}

View File

@@ -15,7 +15,7 @@
],
"posturePolicies": [
{
"controlName": "Allowed hostPath"
"controlName": "HostPath mount"
}
]
}

View File

@@ -16,7 +16,7 @@
],
"posturePolicies": [
{
"controlName": "Allowed hostPath"
"controlName": "HostPath mount"
}
]
}

View File

@@ -98,56 +98,56 @@ kubescape_object_failed_count{framework="NSA",control="Privileged container",nam
kubescape_object_failed_count{framework="NSA",control="Privileged container",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "NSA" control "Privileged container"
kubescape_object_failed_count{framework="NSA",control="Privileged container",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1
# Number of resources found as part of NSA control Allowed hostPath
kubescape_resources_found_count{framework="NSA",control="Allowed hostPath"} 22
# Number of resources excluded as part of NSA control Allowed hostPath
kubescape_resources_excluded_count{framework="NSA",control="Allowed hostPath"} 0
# Number of resources failed as part of NSA control Allowed hostPath
kubescape_resources_failed_count{framework="NSA",control="Allowed hostPath"} 7
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-oracle",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-posture",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-rbac",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-audit",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-dashboard-aggregator",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-notification-server",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-ocimage",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="coredns",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="nginx-ingress",name="nginx-ingress",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-apiserver-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-scheduler-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Number of resources found as part of NSA control HostPath mount
kubescape_resources_found_count{framework="NSA",control="HostPath mount"} 22
# Number of resources excluded as part of NSA control HostPath mount
kubescape_resources_excluded_count{framework="NSA",control="HostPath mount"} 0
# Number of resources failed as part of NSA control HostPath mount
kubescape_resources_failed_count{framework="NSA",control="HostPath mount"} 7
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-oracle",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-posture",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-rbac",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-audit",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-dashboard-aggregator",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-notification-server",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-ocimage",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="coredns",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="nginx-ingress",name="nginx-ingress",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-apiserver-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-scheduler-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Number of resources found as part of NSA control Automatic mapping of service account
kubescape_resources_found_count{framework="NSA",control="Automatic mapping of service account"} 47
# Number of resources excluded as part of NSA control Automatic mapping of service account
@@ -2668,56 +2668,56 @@ kubescape_object_failed_count{framework="ArmoBest",control="Privileged container
kubescape_object_failed_count{framework="ArmoBest",control="Privileged container",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "ArmoBest" control "Privileged container"
kubescape_object_failed_count{framework="ArmoBest",control="Privileged container",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1
# Number of resources found as part of ArmoBest control Allowed hostPath
kubescape_resources_found_count{framework="ArmoBest",control="Allowed hostPath"} 22
# Number of resources excluded as part of ArmoBest control Allowed hostPath
kubescape_resources_excluded_count{framework="ArmoBest",control="Allowed hostPath"} 0
# Number of resources failed as part of ArmoBest control Allowed hostPath
kubescape_resources_failed_count{framework="ArmoBest",control="Allowed hostPath"} 7
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="coredns",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="nginx-ingress",name="nginx-ingress",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-oracle",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-posture",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-rbac",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-audit",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-dashboard-aggregator",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-notification-server",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-ocimage",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-apiserver-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-scheduler-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Number of resources found as part of ArmoBest control HostPath mount
kubescape_resources_found_count{framework="ArmoBest",control="HostPath mount"} 22
# Number of resources excluded as part of ArmoBest control HostPath mount
kubescape_resources_excluded_count{framework="ArmoBest",control="HostPath mount"} 0
# Number of resources failed as part of ArmoBest control HostPath mount
kubescape_resources_failed_count{framework="ArmoBest",control="HostPath mount"} 7
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="coredns",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="nginx-ingress",name="nginx-ingress",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-oracle",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-posture",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-rbac",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-audit",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-dashboard-aggregator",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-notification-server",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-ocimage",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-apiserver-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-scheduler-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Number of resources found as part of ArmoBest control Automatic mapping of service account
kubescape_resources_found_count{framework="ArmoBest",control="Automatic mapping of service account"} 47
# Number of resources excluded as part of ArmoBest control Automatic mapping of service account

View File

@@ -54,26 +54,26 @@ kubescape_resources_excluded_count{framework="NSA",control="Privileged container
kubescape_resources_failed_count{framework="NSA",control="Privileged container"} 1
# Failed object from "NSA" control "Privileged container"
kubescape_object_failed_count{framework="NSA",control="Privileged container",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Number of resources found as part of NSA control Allowed hostPath
kubescape_resources_found_count{framework="NSA",control="Allowed hostPath"} 22
# Number of resources excluded as part of NSA control Allowed hostPath
kubescape_resources_excluded_count{framework="NSA",control="Allowed hostPath"} 0
# Number of resources failed as part of NSA control Allowed hostPath
kubescape_resources_failed_count{framework="NSA",control="Allowed hostPath"} 7
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Number of resources found as part of NSA control HostPath mount
kubescape_resources_found_count{framework="NSA",control="HostPath mount"} 22
# Number of resources excluded as part of NSA control HostPath mount
kubescape_resources_excluded_count{framework="NSA",control="HostPath mount"} 0
# Number of resources failed as part of NSA control HostPath mount
kubescape_resources_failed_count{framework="NSA",control="HostPath mount"} 7
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Number of resources found as part of NSA control Automatic mapping of service account
kubescape_resources_found_count{framework="NSA",control="Automatic mapping of service account"} 47
# Number of resources excluded as part of NSA control Automatic mapping of service account
@@ -872,26 +872,26 @@ kubescape_resources_excluded_count{framework="ArmoBest",control="Privileged cont
kubescape_resources_failed_count{framework="ArmoBest",control="Privileged container"} 1
# Failed object from "ArmoBest" control "Privileged container"
kubescape_object_failed_count{framework="ArmoBest",control="Privileged container",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Number of resources found as part of ArmoBest control Allowed hostPath
kubescape_resources_found_count{framework="ArmoBest",control="Allowed hostPath"} 22
# Number of resources excluded as part of ArmoBest control Allowed hostPath
kubescape_resources_excluded_count{framework="ArmoBest",control="Allowed hostPath"} 0
# Number of resources failed as part of ArmoBest control Allowed hostPath
kubescape_resources_failed_count{framework="ArmoBest",control="Allowed hostPath"} 7
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Number of resources found as part of ArmoBest control HostPath mount
kubescape_resources_found_count{framework="ArmoBest",control="HostPath mount"} 22
# Number of resources excluded as part of ArmoBest control HostPath mount
kubescape_resources_excluded_count{framework="ArmoBest",control="HostPath mount"} 0
# Number of resources failed as part of ArmoBest control HostPath mount
kubescape_resources_failed_count{framework="ArmoBest",control="HostPath mount"} 7
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Number of resources found as part of ArmoBest control Automatic mapping of service account
kubescape_resources_found_count{framework="ArmoBest",control="Automatic mapping of service account"} 47
# Number of resources excluded as part of ArmoBest control Automatic mapping of service account

30
go.mod
View File

@@ -5,7 +5,6 @@ go 1.18
require (
cloud.google.com/go/containeranalysis v0.4.0
github.com/armosec/armoapi-go v0.0.119
github.com/armosec/go-git-url v0.0.15
github.com/armosec/utils-go v0.0.12
github.com/armosec/utils-k8s-go v0.0.12
github.com/briandowns/spinner v1.18.1
@@ -15,10 +14,11 @@ require (
github.com/go-git/go-git/v5 v5.4.2
github.com/google/uuid v1.3.0
github.com/johnfercher/maroto v0.37.0
github.com/kubescape/go-git-url v0.0.17
github.com/kubescape/go-logger v0.0.6
github.com/kubescape/k8s-interface v0.0.84
github.com/kubescape/opa-utils v0.0.200
github.com/kubescape/rbac-utils v0.0.17
github.com/kubescape/k8s-interface v0.0.89
github.com/kubescape/opa-utils v0.0.204
github.com/kubescape/rbac-utils v0.0.19
github.com/libgit2/git2go/v33 v33.0.9
github.com/mattn/go-isatty v0.0.14
github.com/mikefarah/yq/v4 v4.29.1
@@ -35,10 +35,10 @@ require (
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473
gopkg.in/yaml.v3 v3.0.1
helm.sh/helm/v3 v3.9.0
k8s.io/api v0.24.3
k8s.io/apimachinery v0.24.3
k8s.io/client-go v0.24.3
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c
k8s.io/api v0.25.3
k8s.io/apimachinery v0.25.3
k8s.io/client-go v0.25.3
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
sigs.k8s.io/kustomize/api v0.11.4
sigs.k8s.io/kustomize/kyaml v0.13.6
sigs.k8s.io/yaml v1.3.0
@@ -52,8 +52,8 @@ require (
cloud.google.com/go/grafeas v0.2.0 // indirect
github.com/Azure/azure-sdk-for-go v66.0.0+incompatible // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
@@ -96,7 +96,7 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/elliotchance/orderedmap v1.5.0 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/emirpasic/gods v1.12.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/ghodss/yaml v1.0.0 // indirect
@@ -181,11 +181,11 @@ require (
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiextensions-apiserver v0.24.2 // indirect
k8s.io/klog/v2 v2.60.1 // indirect
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
k8s.io/klog/v2 v2.80.1 // indirect
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
sigs.k8s.io/controller-runtime v0.12.3 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
)
replace github.com/libgit2/git2go/v33 => ./git2go

59
go.sum
View File

@@ -78,19 +78,22 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest v0.11.24 h1:1fIGgHKqVm54KIPT+q8Zmd1QlVsmHqeUGso5qm2BqqE=
github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A=
github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg=
github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
@@ -154,8 +157,6 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/armosec/armoapi-go v0.0.119 h1:7XbvBbOKp26Bpp72LQ8Spw4FBpbXu3+qZFQyPEwTPFk=
github.com/armosec/armoapi-go v0.0.119/go.mod h1:2zoNzb3Fy9ZByeczJZ47ftDRLRzTykVdTISS3GTc/JU=
github.com/armosec/go-git-url v0.0.15 h1:sDtu0WNvAhrDJ2begTyWP8T4tE1j1K6D0ZJ6t3Cx8k4=
github.com/armosec/go-git-url v0.0.15/go.mod h1:GzfssG3IW9KiURSpK7c/bySBRTlghpObQ7NQ1O4hcMI=
github.com/armosec/utils-go v0.0.12 h1:NXkG/BhbSVAmTVXr0qqsK02CmxEiXuJyPmdTRcZ4jAo=
github.com/armosec/utils-go v0.0.12/go.mod h1:F/K1mI/qcj7fNuJl7xktoCeHM83azOF0Zq6eC2WuPyU=
github.com/armosec/utils-k8s-go v0.0.12 h1:u7kHSUp4PpvPP3hEaRXMbM0Vw23IyLhAzzE+2TW6Jkk=
@@ -271,8 +272,9 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg
github.com/elliotchance/orderedmap v1.5.0 h1:1IsExUsjv5XNBD3ZdC7jkAAqLWOOKdbPTmkHx63OsBg=
github.com/elliotchance/orderedmap v1.5.0/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog=
@@ -581,14 +583,16 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubescape/go-git-url v0.0.17 h1:yBPmQzxIVa3vbFVjwTlrnxjGf1kTAWDeMh+kvMd/RZA=
github.com/kubescape/go-git-url v0.0.17/go.mod h1:a1rDC6M1VBMwTaDfrSwbVq84Zu71U+1qKqQmI1cA0lE=
github.com/kubescape/go-logger v0.0.6 h1:ynhAmwrz0O7Jtqq1CdmCZUrKveji25hVP+B/FAb3QrA=
github.com/kubescape/go-logger v0.0.6/go.mod h1:DnVWEvC90LFY1nNMaNo6nBVOcqkLMK3S0qzXP1fzRvI=
github.com/kubescape/k8s-interface v0.0.84 h1:k7YzpQ3SaN+bJCtpXzMj60WWIK9RkQQrU8dFQutr3LA=
github.com/kubescape/k8s-interface v0.0.84/go.mod h1:ihX96yqar+xogHl45mFE8zT9DLI06iy7XQPAP+j5KJE=
github.com/kubescape/opa-utils v0.0.200 h1:7EhE9FTabzkUxicvxdchXuaTWW0J2mFj04vK4jTrxN0=
github.com/kubescape/opa-utils v0.0.200/go.mod h1:rDC3PANuk8gU5lSDO/WPFTluypBQ+/6qiuZLye+slYg=
github.com/kubescape/rbac-utils v0.0.17 h1:B78kjlTKqjYK/PXwmi4GPysHsFxIwVz1KFb4+IGT29w=
github.com/kubescape/rbac-utils v0.0.17/go.mod h1:pBwjpcrVeuH/no+DiCZWvlhYtCDzd3U0o/hEZKi+eM8=
github.com/kubescape/k8s-interface v0.0.89 h1:OtlvZosHpjlbHfsilfQk2wRbuBnxwF0e+WZX6GbkfLU=
github.com/kubescape/k8s-interface v0.0.89/go.mod h1:pgFRs20mHiavf6+fFWY7h/f8HuKlwuZwirvjxiKJlu0=
github.com/kubescape/opa-utils v0.0.204 h1:9O9drjyzjOhI7Xi2S4Px0WKa66U5GFPQqeOLvhDqHnw=
github.com/kubescape/opa-utils v0.0.204/go.mod h1:rDC3PANuk8gU5lSDO/WPFTluypBQ+/6qiuZLye+slYg=
github.com/kubescape/rbac-utils v0.0.19 h1:7iydgVxlMLW15MgHORfMBMqNj9jHtFGACd744fdtrFs=
github.com/kubescape/rbac-utils v0.0.19/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@@ -672,10 +676,11 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
github.com/open-policy-agent/opa v0.45.0 h1:P5nuhVRtR+e58fk3CMMbiqr6ZFyWQPNOC3otsorGsFs=
github.com/open-policy-agent/opa v0.45.0/go.mod h1:/OnsYljNEWJ6DXeFOOnoGn8CvwZGMUS4iRqzYdJvmBI=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@@ -1528,17 +1533,17 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg=
k8s.io/api v0.24.3 h1:tt55QEmKd6L2k5DP6G/ZzdMQKvG5ro4H4teClqm0sTY=
k8s.io/api v0.24.3/go.mod h1:elGR/XSZrS7z7cSZPzVWaycpJuGIw57j9b95/1PdJNI=
k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ=
k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI=
k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k=
k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ=
k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
k8s.io/apimachinery v0.24.3 h1:hrFiNSA2cBZqllakVYyH/VyEh4B581bQRmqATJSeQTg=
k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc=
k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo=
k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI=
k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30=
k8s.io/client-go v0.24.3 h1:Nl1840+6p4JqkFWEW2LnMKU667BUxw03REfLAVhuKQY=
k8s.io/client-go v0.24.3/go.mod h1:AAovolf5Z9bY1wIg2FZ8LPQlEdKHjLI7ZD4rw920BJw=
k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0=
k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA=
k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@@ -1546,15 +1551,17 @@ k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c h1:hFZO68mv/0xe8+V0gRT9BAq3/31cKjjeVv4nScriuBk=
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
@@ -1562,15 +1569,17 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio=
sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.11.4 h1:/0Mr3kfBBNcNPOW5Qwk/3eb8zkswCwnqQxxKtmrTkRo=
sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI=
sigs.k8s.io/kustomize/kyaml v0.13.6 h1:eF+wsn4J7GOAXlvajv6OknSunxpcOBQQqsnPxObtkGs=
sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@@ -56,7 +56,7 @@ def main():
if client_name:
ldflags += " -X {}={}".format(client_var, client_name)
build_command = ["go", "build", "-tags=static", "-o", ks_file, "-ldflags" ,ldflags]
build_command = ["go", "build", "-buildmode=pie", "-tags=static", "-o", ks_file, "-ldflags" ,ldflags]
print("Building kubescape and saving here: {}".format(ks_file))
print("Build command: {}".format(" ".join(build_command)))

View File

@@ -75,7 +75,6 @@ definitions:
description: |-
Submit results to Kubescape Cloud.
Same as `kubescape scan --submit`.
type: boolean
x-go-name: Submit
targetNames:

View File

@@ -12,9 +12,9 @@ require (
github.com/gorilla/schema v1.2.0
github.com/kubescape/go-logger v0.0.6
github.com/kubescape/kubescape/v2 v2.0.0-00010101000000-000000000000
github.com/kubescape/opa-utils v0.0.200
github.com/kubescape/opa-utils v0.0.204
github.com/stretchr/testify v1.8.0
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
)
require (
@@ -24,8 +24,8 @@ require (
cloud.google.com/go/grafeas v0.2.0 // indirect
github.com/Azure/azure-sdk-for-go v66.0.0+incompatible // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
@@ -47,7 +47,6 @@ require (
github.com/agnivade/levenshtein v1.1.1 // indirect
github.com/alecthomas/participle/v2 v2.0.0-beta.5 // indirect
github.com/armosec/armoapi-go v0.0.119 // indirect
github.com/armosec/go-git-url v0.0.15 // indirect
github.com/armosec/utils-k8s-go v0.0.12 // indirect
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
github.com/aws/aws-sdk-go v1.44.51 // indirect
@@ -73,7 +72,7 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/elliotchance/orderedmap v1.5.0 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/emirpasic/gods v1.12.0 // indirect
github.com/enescakir/emoji v1.0.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
@@ -119,8 +118,9 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/jung-kurt/gofpdf v1.16.2 // indirect
github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect
github.com/kubescape/k8s-interface v0.0.84 // indirect
github.com/kubescape/rbac-utils v0.0.17 // indirect
github.com/kubescape/go-git-url v0.0.17 // indirect
github.com/kubescape/k8s-interface v0.0.89 // indirect
github.com/kubescape/rbac-utils v0.0.19 // indirect
github.com/libgit2/git2go/v33 v33.0.9 // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@@ -187,17 +187,17 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
helm.sh/helm/v3 v3.9.0 // indirect
k8s.io/api v0.24.3 // indirect
k8s.io/api v0.25.3 // indirect
k8s.io/apiextensions-apiserver v0.24.2 // indirect
k8s.io/apimachinery v0.24.3 // indirect
k8s.io/client-go v0.24.3 // indirect
k8s.io/klog/v2 v2.60.1 // indirect
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
k8s.io/apimachinery v0.25.3 // indirect
k8s.io/client-go v0.25.3 // indirect
k8s.io/klog/v2 v2.80.1 // indirect
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
sigs.k8s.io/controller-runtime v0.12.3 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/kustomize/api v0.11.4 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

View File

@@ -78,19 +78,22 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest v0.11.24 h1:1fIGgHKqVm54KIPT+q8Zmd1QlVsmHqeUGso5qm2BqqE=
github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A=
github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg=
github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
@@ -154,8 +157,6 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/armosec/armoapi-go v0.0.119 h1:7XbvBbOKp26Bpp72LQ8Spw4FBpbXu3+qZFQyPEwTPFk=
github.com/armosec/armoapi-go v0.0.119/go.mod h1:2zoNzb3Fy9ZByeczJZ47ftDRLRzTykVdTISS3GTc/JU=
github.com/armosec/go-git-url v0.0.15 h1:sDtu0WNvAhrDJ2begTyWP8T4tE1j1K6D0ZJ6t3Cx8k4=
github.com/armosec/go-git-url v0.0.15/go.mod h1:GzfssG3IW9KiURSpK7c/bySBRTlghpObQ7NQ1O4hcMI=
github.com/armosec/utils-go v0.0.12 h1:NXkG/BhbSVAmTVXr0qqsK02CmxEiXuJyPmdTRcZ4jAo=
github.com/armosec/utils-go v0.0.12/go.mod h1:F/K1mI/qcj7fNuJl7xktoCeHM83azOF0Zq6eC2WuPyU=
github.com/armosec/utils-k8s-go v0.0.12 h1:u7kHSUp4PpvPP3hEaRXMbM0Vw23IyLhAzzE+2TW6Jkk=
@@ -273,8 +274,9 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg
github.com/elliotchance/orderedmap v1.5.0 h1:1IsExUsjv5XNBD3ZdC7jkAAqLWOOKdbPTmkHx63OsBg=
github.com/elliotchance/orderedmap v1.5.0/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog=
@@ -637,14 +639,16 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubescape/go-git-url v0.0.17 h1:yBPmQzxIVa3vbFVjwTlrnxjGf1kTAWDeMh+kvMd/RZA=
github.com/kubescape/go-git-url v0.0.17/go.mod h1:a1rDC6M1VBMwTaDfrSwbVq84Zu71U+1qKqQmI1cA0lE=
github.com/kubescape/go-logger v0.0.6 h1:ynhAmwrz0O7Jtqq1CdmCZUrKveji25hVP+B/FAb3QrA=
github.com/kubescape/go-logger v0.0.6/go.mod h1:DnVWEvC90LFY1nNMaNo6nBVOcqkLMK3S0qzXP1fzRvI=
github.com/kubescape/k8s-interface v0.0.84 h1:k7YzpQ3SaN+bJCtpXzMj60WWIK9RkQQrU8dFQutr3LA=
github.com/kubescape/k8s-interface v0.0.84/go.mod h1:ihX96yqar+xogHl45mFE8zT9DLI06iy7XQPAP+j5KJE=
github.com/kubescape/opa-utils v0.0.200 h1:7EhE9FTabzkUxicvxdchXuaTWW0J2mFj04vK4jTrxN0=
github.com/kubescape/opa-utils v0.0.200/go.mod h1:rDC3PANuk8gU5lSDO/WPFTluypBQ+/6qiuZLye+slYg=
github.com/kubescape/rbac-utils v0.0.17 h1:B78kjlTKqjYK/PXwmi4GPysHsFxIwVz1KFb4+IGT29w=
github.com/kubescape/rbac-utils v0.0.17/go.mod h1:pBwjpcrVeuH/no+DiCZWvlhYtCDzd3U0o/hEZKi+eM8=
github.com/kubescape/k8s-interface v0.0.89 h1:OtlvZosHpjlbHfsilfQk2wRbuBnxwF0e+WZX6GbkfLU=
github.com/kubescape/k8s-interface v0.0.89/go.mod h1:pgFRs20mHiavf6+fFWY7h/f8HuKlwuZwirvjxiKJlu0=
github.com/kubescape/opa-utils v0.0.204 h1:9O9drjyzjOhI7Xi2S4Px0WKa66U5GFPQqeOLvhDqHnw=
github.com/kubescape/opa-utils v0.0.204/go.mod h1:rDC3PANuk8gU5lSDO/WPFTluypBQ+/6qiuZLye+slYg=
github.com/kubescape/rbac-utils v0.0.19 h1:7iydgVxlMLW15MgHORfMBMqNj9jHtFGACd744fdtrFs=
github.com/kubescape/rbac-utils v0.0.19/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@@ -735,10 +739,11 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
github.com/open-policy-agent/opa v0.45.0 h1:P5nuhVRtR+e58fk3CMMbiqr6ZFyWQPNOC3otsorGsFs=
github.com/open-policy-agent/opa v0.45.0/go.mod h1:/OnsYljNEWJ6DXeFOOnoGn8CvwZGMUS4iRqzYdJvmBI=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@@ -1619,17 +1624,17 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg=
k8s.io/api v0.24.3 h1:tt55QEmKd6L2k5DP6G/ZzdMQKvG5ro4H4teClqm0sTY=
k8s.io/api v0.24.3/go.mod h1:elGR/XSZrS7z7cSZPzVWaycpJuGIw57j9b95/1PdJNI=
k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ=
k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI=
k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k=
k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ=
k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
k8s.io/apimachinery v0.24.3 h1:hrFiNSA2cBZqllakVYyH/VyEh4B581bQRmqATJSeQTg=
k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc=
k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo=
k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI=
k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30=
k8s.io/client-go v0.24.3 h1:Nl1840+6p4JqkFWEW2LnMKU667BUxw03REfLAVhuKQY=
k8s.io/client-go v0.24.3/go.mod h1:AAovolf5Z9bY1wIg2FZ8LPQlEdKHjLI7ZD4rw920BJw=
k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0=
k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA=
k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@@ -1637,15 +1642,17 @@ k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c h1:hFZO68mv/0xe8+V0gRT9BAq3/31cKjjeVv4nScriuBk=
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
@@ -1653,15 +1660,17 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio=
sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.11.4 h1:/0Mr3kfBBNcNPOW5Qwk/3eb8zkswCwnqQxxKtmrTkRo=
sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI=
sigs.k8s.io/kustomize/kyaml v0.13.6 h1:eF+wsn4J7GOAXlvajv6OknSunxpcOBQQqsnPxObtkGs=
sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@@ -3,7 +3,7 @@ package v1
import (
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"sync"
@@ -124,7 +124,7 @@ func getScanParamsFromRequest(r *http.Request, scanID string) (*scanRequestParam
return scanRequestParams, fmt.Errorf("failed to parse query params, reason: %s", err.Error())
}
readBuffer, err := ioutil.ReadAll(r.Body)
readBuffer, err := io.ReadAll(r.Body)
if err != nil {
// handler.writeError(w, fmt.Errorf("failed to read request body, reason: %s", err.Error()), scanID)
return scanRequestParams, fmt.Errorf("failed to read request body, reason: %s", err.Error())

View File

@@ -16,8 +16,8 @@ import (
"github.com/google/uuid"
)
var OutputDir = "./results"
var FailedOutputDir = "./failed"
var OutputDir = "./results/"
var FailedOutputDir = "./failed/"
// A Scan Response object
//

View File

@@ -22,7 +22,6 @@ def run(kubescape_exec:str):
test_command(command=[kubescape_exec, "scan", "framework"])
test_command(command=[kubescape_exec, "scan", "control"])
test_command(command=[kubescape_exec, "submit", "results"])
test_command(command=[kubescape_exec, "submit", "rbac"])
print("Done testing commands")

View File

@@ -13,15 +13,15 @@ def scan_all(kubescape_exec: str):
def scan_control_name(kubescape_exec: str):
return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'Allowed hostPath', all_files, "--enable-host-scan=false"])
return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'HostPath mount', all_files, "--enable-host-scan=false"])
def scan_control_id(kubescape_exec: str):
return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'C-0006', all_files, "--enable-host-scan=false"])
return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'C-0048', all_files, "--enable-host-scan=false"])
def scan_controls(kubescape_exec: str):
return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'Allowed hostPath,Allow privilege escalation', all_files, "--enable-host-scan=false"])
return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'HostPath mount,Allow privilege escalation', all_files, "--enable-host-scan=false"])
def scan_framework(kubescape_exec: str):