mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-16 02:49:52 +00:00
Compare commits
225 Commits
v2.2.6
...
github-act
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e09bb2e310 | ||
|
|
f7b3cdcf35 | ||
|
|
d6a47a82d2 | ||
|
|
936cb26c06 | ||
|
|
9265a5d6d0 | ||
|
|
e6f5c7e0dd | ||
|
|
4e48148d40 | ||
|
|
3648ef286d | ||
|
|
d946662e57 | ||
|
|
51b37d5cbf | ||
|
|
9afae713ba | ||
|
|
1d64522607 | ||
|
|
225a923006 | ||
|
|
6c1a3fb89b | ||
|
|
df5f7db51d | ||
|
|
35c593a624 | ||
|
|
869f0ea109 | ||
|
|
cf08daf7fb | ||
|
|
266029eb23 | ||
|
|
4c9fec8ef4 | ||
|
|
6f07e63d3f | ||
|
|
addd66bf72 | ||
|
|
e2f96200e0 | ||
|
|
f799b63684 | ||
|
|
a088219954 | ||
|
|
1a2e16b895 | ||
|
|
7444acae11 | ||
|
|
8294694e09 | ||
|
|
12d7f18b79 | ||
|
|
83279484bd | ||
|
|
ba134ebc32 | ||
|
|
b44f0a76c9 | ||
|
|
226b4772a2 | ||
|
|
5379b9b0a6 | ||
|
|
98f68d8097 | ||
|
|
f8057b5c79 | ||
|
|
f36d8c31b0 | ||
|
|
3abf18acb7 | ||
|
|
28200b2744 | ||
|
|
678f21e33c | ||
|
|
467a84ddac | ||
|
|
925145724e | ||
|
|
e3677fc45c | ||
|
|
704de5bfc1 | ||
|
|
2494c1971c | ||
|
|
3b8bd7735e | ||
|
|
602591e7f2 | ||
|
|
e276e54d2b | ||
|
|
0c019819ff | ||
|
|
d9e946cf6d | ||
|
|
e3a8ebfe05 | ||
|
|
fd3703b21b | ||
|
|
6bcdda7d56 | ||
|
|
981430d65f | ||
|
|
e91ec69832 | ||
|
|
bbfa5d356a | ||
|
|
d2af7f47db | ||
|
|
d28afcb00c | ||
|
|
ca6bdb0bef | ||
|
|
e424bfa81b | ||
|
|
9f1ff4c090 | ||
|
|
1a2dda700b | ||
|
|
c4e5611c7f | ||
|
|
d8e913fb9f | ||
|
|
a37b1f7319 | ||
|
|
b730ef5154 | ||
|
|
3280173e95 | ||
|
|
d0ae4f1c1a | ||
|
|
e4faad8284 | ||
|
|
bc131efd91 | ||
|
|
4763f0d69d | ||
|
|
22c412ce7f | ||
|
|
1503e984f8 | ||
|
|
a4478ba899 | ||
|
|
fcbcb53995 | ||
|
|
17c43fd366 | ||
|
|
d44746cb85 | ||
|
|
912035662b | ||
|
|
61dac76369 | ||
|
|
bacf15eeb8 | ||
|
|
0a5af235e3 | ||
|
|
6fec02caff | ||
|
|
067655d003 | ||
|
|
d55a74c6b2 | ||
|
|
e470fce6ed | ||
|
|
ea3172eda6 | ||
|
|
f060d02fbc | ||
|
|
43975ddafe | ||
|
|
abe0477249 | ||
|
|
5f197eb27c | ||
|
|
84b43d2b03 | ||
|
|
b149e00d1a | ||
|
|
f98b394ec2 | ||
|
|
492b08c995 | ||
|
|
8fa15688fb | ||
|
|
1a3e140e56 | ||
|
|
72f6988bb4 | ||
|
|
780be45392 | ||
|
|
676771e8b3 | ||
|
|
06f5c24b7d | ||
|
|
c17415d6e9 | ||
|
|
b5bed7bfbb | ||
|
|
3c38021f7c | ||
|
|
8989cc1679 | ||
|
|
0ab9c32715 | ||
|
|
868db91801 | ||
|
|
aa0fe21a2e | ||
|
|
1b181a47ef | ||
|
|
30487dcd0e | ||
|
|
46ad069fe5 | ||
|
|
05d5de17d5 | ||
|
|
6bc79458b0 | ||
|
|
ab85ca2b28 | ||
|
|
99938ecbee | ||
|
|
e2f8e273ad | ||
|
|
be63e1ef7c | ||
|
|
5e5b9d564c | ||
|
|
8ee72895b9 | ||
|
|
6cefada215 | ||
|
|
211ee487b3 | ||
|
|
bbe46c9fab | ||
|
|
ce7fde582c | ||
|
|
1c31e1f015 | ||
|
|
9e2fe607d8 | ||
|
|
5a5ec9b641 | ||
|
|
24c608e204 | ||
|
|
ac43036b4a | ||
|
|
03b89047f8 | ||
|
|
07a5c6488b | ||
|
|
c486b4fed7 | ||
|
|
00c48d756d | ||
|
|
b49563ae8c | ||
|
|
7840ecb5da | ||
|
|
e151c5bf81 | ||
|
|
225545476c | ||
|
|
987f97102d | ||
|
|
7bffed2afe | ||
|
|
3357713903 | ||
|
|
efd48eab08 | ||
|
|
231d9c231a | ||
|
|
91e705a3eb | ||
|
|
a92d573cb8 | ||
|
|
e8c72b9883 | ||
|
|
d380b2cb00 | ||
|
|
50b3d0f313 | ||
|
|
474b6d07ed | ||
|
|
2cddc4b395 | ||
|
|
b5fb355a22 | ||
|
|
d1bc6d0190 | ||
|
|
0a0ef10d50 | ||
|
|
4523dc8456 | ||
|
|
b26f83d0bd | ||
|
|
9cc3053d74 | ||
|
|
84842a6a91 | ||
|
|
aff8cc480e | ||
|
|
7feea43421 | ||
|
|
04ec32c9f4 | ||
|
|
b805f22038 | ||
|
|
092f37a636 | ||
|
|
9a2eb46f65 | ||
|
|
c637c1a589 | ||
|
|
7609a4aa5d | ||
|
|
75d31c22d9 | ||
|
|
b93a97a8c8 | ||
|
|
88696ca233 | ||
|
|
87d94d16ff | ||
|
|
1843bcdaf8 | ||
|
|
cdaff7ddbe | ||
|
|
ec7bc26f64 | ||
|
|
75b64d58f3 | ||
|
|
dce1d762c6 | ||
|
|
f3225855d0 | ||
|
|
5ae421dbc2 | ||
|
|
d4b75dcb0c | ||
|
|
b7935276e3 | ||
|
|
d6edd818b8 | ||
|
|
a73081c816 | ||
|
|
dd961b9e55 | ||
|
|
76ced13a26 | ||
|
|
95e88f8581 | ||
|
|
5955247f01 | ||
|
|
c0530b4f88 | ||
|
|
c23d6a17cc | ||
|
|
d448de131f | ||
|
|
b48c04da63 | ||
|
|
ecf770c756 | ||
|
|
6e33f37aee | ||
|
|
03f792e968 | ||
|
|
b017d77b86 | ||
|
|
2cde591180 | ||
|
|
f25d573f32 | ||
|
|
ebf3e49f53 | ||
|
|
acaf6e78da | ||
|
|
344e9188f6 | ||
|
|
3f69f06df1 | ||
|
|
e0b296c124 | ||
|
|
108bbd8bc4 | ||
|
|
5c1a41e920 | ||
|
|
0b8d207615 | ||
|
|
539b6c51b9 | ||
|
|
19ca590e2f | ||
|
|
4de50f82c0 | ||
|
|
ab41d5dbf4 | ||
|
|
fa6de6dc3f | ||
|
|
96e959c3b7 | ||
|
|
28fdee0dd2 | ||
|
|
9ce25c45fe | ||
|
|
d44b9f7a31 | ||
|
|
c7af6266fd | ||
|
|
91c13381b2 | ||
|
|
30ad3adbb6 | ||
|
|
64e3b08641 | ||
|
|
6d7a89bb74 | ||
|
|
e8d92ffd43 | ||
|
|
48a15e1a8d | ||
|
|
d02f15ef6f | ||
|
|
9327f70e1a | ||
|
|
c498026208 | ||
|
|
3238555df3 | ||
|
|
0c77d89bfc | ||
|
|
875deb7ec3 | ||
|
|
eae234136b | ||
|
|
93a35fffbd | ||
|
|
9a3767ef72 | ||
|
|
9420fd5e79 |
2
.dockerignore
Normal file
2
.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
||||
git2go
|
||||
kubescape
|
||||
9
.github/actions/tag-action/action.yaml
vendored
9
.github/actions/tag-action/action.yaml
vendored
@@ -17,7 +17,14 @@ runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- run: |
|
||||
SUB='-rc'
|
||||
if [[ -z "${{ inputs.ORIGINAL_TAG }}" ]]; then
|
||||
echo "The value of ORIGINAL_TAG is ${{ inputs.ORIGINAL_TAG }}"
|
||||
echo "Setting the value of ORIGINAL_TAG to ${{ github.ref_name }}"
|
||||
echo ORIGINAL_TAG="${{ github.ref_name }}" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- run: |
|
||||
if [[ "${{ inputs.ORIGINAL_TAG }}" == *"${{ inputs.SUB_STRING }}"* ]]; then
|
||||
echo "Release candidate tag found."
|
||||
else
|
||||
|
||||
18
.github/workflows/00-pr-scanner.yaml
vendored
18
.github/workflows/00-pr-scanner.yaml
vendored
@@ -2,12 +2,9 @@ name: 00-pr_scanner
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize, ready_for_review]
|
||||
branches:
|
||||
- 'master'
|
||||
- 'main'
|
||||
- 'dev'
|
||||
paths-ignore:
|
||||
- '**.yaml'
|
||||
- '**.yml'
|
||||
- '**.md'
|
||||
- '**.sh'
|
||||
- 'website/*'
|
||||
@@ -29,3 +26,16 @@ jobs:
|
||||
RELEASE: ""
|
||||
CLIENT: test
|
||||
secrets: inherit
|
||||
|
||||
binary-build:
|
||||
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
|
||||
with:
|
||||
COMPONENT_NAME: kubescape
|
||||
CGO_ENABLED: 1
|
||||
GO111MODULE: ""
|
||||
GO_VERSION: "1.20"
|
||||
RELEASE: ""
|
||||
CLIENT: test
|
||||
ARCH_MATRIX: '[ "" ]'
|
||||
OS_MATRIX: '[ "ubuntu-20.04" ]'
|
||||
secrets: inherit
|
||||
|
||||
34
.github/workflows/01-pr-merged.yaml
vendored
34
.github/workflows/01-pr-merged.yaml
vendored
@@ -1,34 +0,0 @@
|
||||
name: 01-pr-merged
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed]
|
||||
branches:
|
||||
- 'master'
|
||||
- 'main'
|
||||
paths-ignore:
|
||||
- '**.yaml'
|
||||
- '**.md'
|
||||
- '**.sh'
|
||||
- 'website/*'
|
||||
- 'examples/*'
|
||||
- 'docs/*'
|
||||
- 'build/*'
|
||||
- '.github/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
binary-build:
|
||||
if: ${{ github.event.pull_request.merged == true && contains( github.event.pull_request.labels.*.name, 'trigger-integration-test') && github.event.pull_request.base.ref == 'master' }} ## run only if labeled as "trigger-integration-test" and base branch is master
|
||||
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
|
||||
with:
|
||||
COMPONENT_NAME: kubescape
|
||||
CGO_ENABLED: 1
|
||||
GO111MODULE: ""
|
||||
GO_VERSION: "1.19"
|
||||
RELEASE: ""
|
||||
CLIENT: test
|
||||
secrets: inherit
|
||||
2
.github/workflows/02-release.yaml
vendored
2
.github/workflows/02-release.yaml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
COMPONENT_NAME: kubescape
|
||||
CGO_ENABLED: 1
|
||||
GO111MODULE: ""
|
||||
GO_VERSION: "1.19"
|
||||
GO_VERSION: "1.20"
|
||||
RELEASE: ${{ needs.retag.outputs.NEW_TAG }}
|
||||
CLIENT: release
|
||||
secrets: inherit
|
||||
|
||||
30
.github/workflows/03-post-release.yaml
vendored
30
.github/workflows/03-post-release.yaml
vendored
@@ -1,4 +1,4 @@
|
||||
name: 03-create_release_digests
|
||||
name: 03-post_release
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
@@ -6,8 +6,8 @@ on:
|
||||
- 'master'
|
||||
- 'main'
|
||||
jobs:
|
||||
create_release_digests:
|
||||
name: Creating digests
|
||||
post_release:
|
||||
name: Post release jobs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Digest
|
||||
@@ -15,3 +15,27 @@ jobs:
|
||||
with:
|
||||
hash-type: sha1
|
||||
file-name: kubescape-release-digests
|
||||
- name: Invoke workflow to update packaging
|
||||
uses: benc-uk/workflow-dispatch@v1
|
||||
if: github.repository_owner == 'kubescape'
|
||||
with:
|
||||
workflow: release.yml
|
||||
repo: kubescape/packaging
|
||||
ref: refs/heads/main
|
||||
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
- name: Invoke workflow to update homebrew tap
|
||||
uses: benc-uk/workflow-dispatch@v1
|
||||
if: github.repository_owner == 'kubescape'
|
||||
with:
|
||||
workflow: release.yml
|
||||
repo: kubescape/homebrew-tap
|
||||
ref: refs/heads/main
|
||||
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
- name: Invoke workflow to update github action
|
||||
uses: benc-uk/workflow-dispatch@v1
|
||||
if: github.repository_owner == 'kubescape'
|
||||
with:
|
||||
workflow: release.yaml
|
||||
repo: kubescape/github-action
|
||||
ref: refs/heads/main
|
||||
token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
|
||||
110
.github/workflows/a-pr-scanner.yaml
vendored
110
.github/workflows/a-pr-scanner.yaml
vendored
@@ -10,6 +10,10 @@ on:
|
||||
description: 'Client name'
|
||||
required: true
|
||||
type: string
|
||||
UNIT_TESTS_PATH:
|
||||
required: false
|
||||
type: string
|
||||
default: "./..."
|
||||
jobs:
|
||||
scanners:
|
||||
env:
|
||||
@@ -25,7 +29,7 @@ jobs:
|
||||
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # Install go because go-licenses use it ratchet:actions/setup-go@v3
|
||||
name: Installing go
|
||||
with:
|
||||
go-version: '1.19'
|
||||
go-version: '1.20'
|
||||
cache: true
|
||||
- name: Scanning - Forbidden Licenses (go-licenses)
|
||||
id: licenses-scan
|
||||
@@ -57,6 +61,21 @@ jobs:
|
||||
command: test --all-projects
|
||||
env:
|
||||
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
|
||||
|
||||
- name: Test coverage
|
||||
id: unit-test
|
||||
run: go test -v ${{ inputs.UNIT_TESTS_PATH }} -covermode=count -coverprofile=coverage.out
|
||||
|
||||
- name: Convert coverage count to lcov format
|
||||
uses: jandelgado/gcov2lcov-action@v1
|
||||
|
||||
- name: Submit coverage tests to Coveralls
|
||||
continue-on-error: true
|
||||
uses: coverallsapp/github-action@v1
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
path-to-lcov: coverage.lcov
|
||||
|
||||
- name: Comment results to PR
|
||||
continue-on-error: true # Warning: This might break opening PRs from forks
|
||||
uses: peter-evans/create-or-update-comment@5adcb0bb0f9fb3f95ef05400558bdb3f329ee808 # ratchet:peter-evans/create-or-update-comment@v2.1.0
|
||||
@@ -68,92 +87,3 @@ jobs:
|
||||
- Credentials scan: ${{ steps.credentials-scan.outcome }}
|
||||
- Vulnerabilities scan: ${{ steps.vulnerabilities-scan.outcome }}
|
||||
reactions: 'eyes'
|
||||
basic-tests:
|
||||
needs: scanners
|
||||
name: Create cross-platform build
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
CLIENT: ${{ inputs.CLIENT }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Cache Go modules (Linux)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
uses: actions/cache@69d9d449aced6a2ede0bc19182fadc3a0a42d2b0 # ratchet:actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Cache Go modules (macOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
uses: actions/cache@69d9d449aced6a2ede0bc19182fadc3a0a42d2b0 # ratchet:actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/Library/Caches/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Cache Go modules (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: actions/cache@69d9d449aced6a2ede0bc19182fadc3a0a42d2b0 # ratchet:actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~\AppData\Local\go-build
|
||||
~\go\pkg\mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # ratchet:actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
- name: Install MSYS2 & libgit2 (Windows)
|
||||
shell: cmd
|
||||
run: .\build.bat all
|
||||
if: matrix.os == 'windows-latest'
|
||||
- name: Install pkg-config (macOS)
|
||||
run: brew install pkg-config
|
||||
if: matrix.os == 'macos-latest'
|
||||
- name: Install libgit2 (Linux/macOS)
|
||||
run: make libgit2
|
||||
if: matrix.os != 'windows-latest'
|
||||
- name: Test core pkg
|
||||
run: go test "-tags=static,gitenabled" -v ./...
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test "-tags=static,gitenabled" -v ./...
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
CLIENT: ${{ inputs.CLIENT }}
|
||||
CGO_ENABLED: 1
|
||||
run: python3 --version && python3 build.py
|
||||
- name: Smoke Testing (Windows / MacOS)
|
||||
env:
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-${{ matrix.os }}
|
||||
if: matrix.os != 'ubuntu-20.04'
|
||||
- name: Smoke Testing (Linux)
|
||||
env:
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-ubuntu-latest
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
- name: golangci-lint
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
continue-on-error: true
|
||||
uses: golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5 # ratchet:golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: latest
|
||||
args: --timeout 10m --build-tags=static
|
||||
only-new-issues: true
|
||||
|
||||
135
.github/workflows/b-binary-build-and-e2e-tests.yaml
vendored
135
.github/workflows/b-binary-build-and-e2e-tests.yaml
vendored
@@ -1,5 +1,45 @@
|
||||
name: b-binary-build-and-e2e-tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
COMPONENT_NAME:
|
||||
required: false
|
||||
type: string
|
||||
default: "kubescape"
|
||||
RELEASE:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
CLIENT:
|
||||
required: false
|
||||
type: string
|
||||
default: "test"
|
||||
GO_VERSION:
|
||||
required: false
|
||||
type: string
|
||||
default: "1.20"
|
||||
GO111MODULE:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
CGO_ENABLED:
|
||||
type: number
|
||||
default: 1
|
||||
required: false
|
||||
OS_MATRIX:
|
||||
type: string
|
||||
required: false
|
||||
default: '[ "ubuntu-20.04", "macos-latest", "windows-latest"]'
|
||||
ARCH_MATRIX:
|
||||
type: string
|
||||
required: false
|
||||
default: '[ "", "arm64"]'
|
||||
BINARY_TESTS:
|
||||
type: string
|
||||
required: false
|
||||
default: '[ "scan_nsa", "scan_mitre", "scan_with_exceptions", "scan_repository", "scan_local_file", "scan_local_glob_files", "scan_local_list_of_files", "scan_nsa_and_submit_to_backend", "scan_mitre_and_submit_to_backend", "scan_local_repository_and_submit_to_backend", "scan_repository_from_url_and_submit_to_backend", "scan_with_exception_to_backend", "scan_with_custom_framework", "scan_customer_configuration", "host_scanner", "scan_compliance_score" ]'
|
||||
|
||||
workflow_call:
|
||||
inputs:
|
||||
COMPONENT_NAME:
|
||||
@@ -13,7 +53,7 @@ on:
|
||||
type: string
|
||||
GO_VERSION:
|
||||
type: string
|
||||
default: "1.19"
|
||||
default: "1.20"
|
||||
GO111MODULE:
|
||||
required: true
|
||||
type: string
|
||||
@@ -22,20 +62,26 @@ on:
|
||||
default: 1
|
||||
BINARY_TESTS:
|
||||
type: string
|
||||
default: '[ "scan_nsa", "scan_mitre", "scan_with_exceptions", "scan_repository", "scan_local_file", "scan_local_glob_files", "scan_local_list_of_files", "scan_nsa_and_submit_to_backend", "scan_mitre_and_submit_to_backend", "scan_local_repository_and_submit_to_backend", "scan_repository_from_url_and_submit_to_backend", "scan_with_exception_to_backend", "scan_with_custom_framework", "scan_customer_configuration", "host_scanner" ]'
|
||||
CHECKOUT_REPO:
|
||||
required: false
|
||||
default: '[ "scan_nsa", "scan_mitre", "scan_with_exceptions", "scan_repository", "scan_local_file", "scan_local_glob_files", "scan_local_list_of_files", "scan_nsa_and_submit_to_backend", "scan_mitre_and_submit_to_backend", "scan_local_repository_and_submit_to_backend", "scan_repository_from_url_and_submit_to_backend", "scan_with_exception_to_backend", "scan_with_custom_framework", "scan_customer_configuration", "host_scanner", "scan_compliance_score", "scan_custom_framework_scanning_file_scope_testing", "scan_custom_framework_scanning_cluster_scope_testing", "scan_custom_framework_scanning_cluster_and_file_scope_testing", "unified_configuration_config_view", "unified_configuration_config_set", "unified_configuration_config_delete" ]'
|
||||
OS_MATRIX:
|
||||
type: string
|
||||
|
||||
|
||||
|
||||
required: false
|
||||
default: '[ "ubuntu-20.04", "macos-latest", "windows-latest"]'
|
||||
ARCH_MATRIX:
|
||||
type: string
|
||||
required: false
|
||||
default: '[ "", "arm64"]'
|
||||
|
||||
jobs:
|
||||
wf-preparation:
|
||||
name: secret-validator
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
TEST_NAMES: ${{ steps.export_tests_to_env.outputs.TEST_NAMES }}
|
||||
OS_MATRIX: ${{ steps.export_os_to_env.outputs.OS_MATRIX }}
|
||||
ARCH_MATRIX: ${{ steps.export_arch_to_env.outputs.ARCH_MATRIX }}
|
||||
is-secret-set: ${{ steps.check-secret-set.outputs.is-secret-set }}
|
||||
|
||||
steps:
|
||||
- name: check if the necessary secrets are set in github secrets
|
||||
id: check-secret-set
|
||||
@@ -49,27 +95,46 @@ jobs:
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
run: "echo \"is-secret-set=${{ env.CUSTOMER != '' && \n env.USERNAME != '' &&\n env.PASSWORD != '' &&\n env.CLIENT_ID != '' &&\n env.SECRET_KEY != '' &&\n env.REGISTRY_USERNAME != '' &&\n env.REGISTRY_PASSWORD != ''\n }}\" >> $GITHUB_OUTPUT\n"
|
||||
|
||||
- id: export_os_to_env
|
||||
name: set test name
|
||||
run: |
|
||||
echo "OS_MATRIX=$input" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
input: ${{ inputs.OS_MATRIX }}
|
||||
|
||||
- id: export_tests_to_env
|
||||
name: set test name
|
||||
run: |
|
||||
echo "TEST_NAMES=$input" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
input: ${{ inputs.BINARY_TESTS }}
|
||||
|
||||
|
||||
- id: export_arch_to_env
|
||||
name: set test name
|
||||
run: |
|
||||
echo "ARCH_MATRIX=$input" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
input: ${{ inputs.ARCH_MATRIX }}
|
||||
|
||||
|
||||
binary-build:
|
||||
name: Create cross-platform build
|
||||
needs: wf-preparation
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-latest, windows-latest]
|
||||
os: ${{ fromJson(needs.wf-preparation.outputs.OS_MATRIX) }}
|
||||
arch: ${{ fromJson(needs.wf-preparation.outputs.ARCH_MATRIX) }}
|
||||
exclude:
|
||||
- os: windows-latest
|
||||
arch: arm64
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
|
||||
with:
|
||||
repository: ${{inputs.CHECKOUT_REPO}}
|
||||
fetch-depth: 0
|
||||
submodules: recursive
|
||||
|
||||
@@ -112,9 +177,26 @@ jobs:
|
||||
go-version: ${{ inputs.GO_VERSION }}
|
||||
cache: true
|
||||
|
||||
- name: start ${{ matrix.arch }} environment in container
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y binfmt-support qemu-user-static
|
||||
sudo docker run --platform linux/${{ matrix.arch }} -e RELEASE=${{ inputs.RELEASE }} \
|
||||
-e CLIENT=${{ inputs.CLIENT }} -e CGO_ENABLED=${{ inputs.CGO_ENABLED }} \
|
||||
-e KUBESCAPE_SKIP_UPDATE_CHECK=true -e GOARCH=${{ matrix.arch }} -v ${PWD}:/work \
|
||||
-w /work -v ~/go/pkg/mod:/root/go/pkg/mod -v ~/.cache/go-build:/root/.cache/go-build \
|
||||
-d --name build golang:${{ inputs.GO_VERSION }}-bullseye sleep 21600
|
||||
sudo docker ps
|
||||
DOCKER_CMD="sudo docker exec build"
|
||||
${DOCKER_CMD} apt update
|
||||
${DOCKER_CMD} apt install -y cmake python3
|
||||
${DOCKER_CMD} git config --global --add safe.directory '*'
|
||||
echo "DOCKER_CMD=${DOCKER_CMD}" >> $GITHUB_ENV;
|
||||
if: matrix.os == 'ubuntu-20.04' && matrix.arch != ''
|
||||
|
||||
- name: Install MSYS2 & libgit2 (Windows)
|
||||
shell: cmd
|
||||
run: .\build.bat all
|
||||
shell: pwsh
|
||||
run: .\build.ps1 all
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Install pkg-config (macOS)
|
||||
@@ -122,35 +204,44 @@ jobs:
|
||||
if: matrix.os == 'macos-latest'
|
||||
|
||||
- name: Install libgit2 (Linux/macOS)
|
||||
run: make libgit2
|
||||
run: ${{ env.DOCKER_CMD }} make libgit2${{ matrix.arch }}
|
||||
if: matrix.os != 'windows-latest'
|
||||
|
||||
- name: Test core pkg
|
||||
run: go test "-tags=static,gitenabled" -v ./...
|
||||
run: ${{ env.DOCKER_CMD }} go test "-tags=static,gitenabled" -v ./...
|
||||
if: "!startsWith(github.ref, 'refs/tags') && matrix.os == 'ubuntu-20.04' && matrix.arch == '' || startsWith(github.ref, 'refs/tags') && (matrix.os != 'macos-latest' || matrix.arch != 'arm64')"
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test "-tags=static,gitenabled" -v ./...
|
||||
run: ${{ env.DOCKER_CMD }} sh -c 'cd httphandler && go test "-tags=static,gitenabled" -v ./...'
|
||||
if: "!startsWith(github.ref, 'refs/tags') && matrix.os == 'ubuntu-20.04' && matrix.arch == '' || startsWith(github.ref, 'refs/tags') && (matrix.os != 'macos-latest' || matrix.arch != 'arm64')"
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
CLIENT: ${{ inputs.CLIENT }}
|
||||
CGO_ENABLED: ${{ inputs.CGO_ENABLED }}
|
||||
run: python3 --version && python3 build.py
|
||||
run: ${{ env.DOCKER_CMD }} python3 --version && ${{ env.DOCKER_CMD }} python3 build.py
|
||||
|
||||
- name: Smoke Testing (Windows / MacOS)
|
||||
env:
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-${{ matrix.os }}
|
||||
if: matrix.os != 'ubuntu-20.04'
|
||||
if: startsWith(github.ref, 'refs/tags') && matrix.os != 'ubuntu-20.04' && matrix.arch == ''
|
||||
|
||||
- name: Smoke Testing (Linux)
|
||||
- name: Smoke Testing (Linux amd64)
|
||||
env:
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-ubuntu-latest
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
run: ${{ env.DOCKER_CMD }} python3 smoke_testing/init.py ${PWD}/build/kubescape-ubuntu-latest
|
||||
if: matrix.os == 'ubuntu-20.04' && matrix.arch == ''
|
||||
|
||||
- name: Smoke Testing (Linux ${{ matrix.arch }})
|
||||
env:
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: ${{ env.DOCKER_CMD }} python3 smoke_testing/init.py ./build/kubescape-${{ matrix.arch }}-ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags') && matrix.os == 'ubuntu-20.04' && matrix.arch != ''
|
||||
|
||||
- name: golangci-lint
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
@@ -165,7 +256,7 @@ jobs:
|
||||
name: Upload artifact (Linux)
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
with:
|
||||
name: kubescape-ubuntu-latest
|
||||
name: kubescape${{ matrix.arch }}-ubuntu-latest
|
||||
path: build/
|
||||
if-no-files-found: error
|
||||
|
||||
@@ -173,7 +264,7 @@ jobs:
|
||||
name: Upload artifact (MacOS, Win)
|
||||
if: matrix.os != 'ubuntu-20.04'
|
||||
with:
|
||||
name: kubescape-${{ matrix.os }}
|
||||
name: kubescape${{ matrix.arch }}-${{ matrix.os }}
|
||||
path: build/
|
||||
if-no-files-found: error
|
||||
|
||||
|
||||
34
.github/workflows/build-image.yaml
vendored
Normal file
34
.github/workflows/build-image.yaml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: build-image
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
CLIENT:
|
||||
required: false
|
||||
type: string
|
||||
default: "test"
|
||||
IMAGE_TAG:
|
||||
required: true
|
||||
type: string
|
||||
CO_SIGN:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
PLATFORMS:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
jobs:
|
||||
publish-image:
|
||||
permissions:
|
||||
id-token: write
|
||||
packages: write
|
||||
contents: read
|
||||
uses: ./.github/workflows/d-publish-image.yaml
|
||||
with:
|
||||
client: ${{ inputs.CLIENT }}
|
||||
image_name: "quay.io/${{ github.repository_owner }}/kubescape"
|
||||
image_tag: ${{ inputs.IMAGE_TAG }}
|
||||
support_platforms: ${{ inputs.PLATFORMS }}
|
||||
cosign: ${{ inputs.CO_SIGN }}
|
||||
secrets: inherit
|
||||
23
.github/workflows/c-create-release.yaml
vendored
23
.github/workflows/c-create-release.yaml
vendored
@@ -19,6 +19,10 @@ jobs:
|
||||
create-release:
|
||||
name: create-release
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
MAC_OS: macos-latest
|
||||
UBUNTU_OS: ubuntu-latest
|
||||
WINDOWS_OS: windows-latest
|
||||
# permissions:
|
||||
# contents: write
|
||||
steps:
|
||||
@@ -26,6 +30,11 @@ jobs:
|
||||
id: download-artifact
|
||||
with:
|
||||
path: .
|
||||
|
||||
# TODO: kubescape-windows-latest is deprecated and should be removed
|
||||
- name: Get kubescape.exe from kubescape-windows-latest
|
||||
run: cp ./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }} ./kubescape-${{ env.WINDOWS_OS }}/kubescape.exe
|
||||
|
||||
- name: Set release token
|
||||
run: |
|
||||
if [ "${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}" != "" ]; then
|
||||
@@ -35,10 +44,6 @@ jobs:
|
||||
fi
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # ratchet:softprops/action-gh-release@v1
|
||||
env:
|
||||
MAC_OS: macos-latest
|
||||
UBUNTU_OS: ubuntu-latest
|
||||
WINDOWS_OS: windows-latest
|
||||
with:
|
||||
token: ${{ env.TOKEN }}
|
||||
name: ${{ inputs.RELEASE_NAME }}
|
||||
@@ -47,13 +52,21 @@ jobs:
|
||||
draft: ${{ inputs.DRAFT }}
|
||||
fail_on_unmatched_files: true
|
||||
prerelease: false
|
||||
# TODO: kubescape-windows-latest is deprecated and should be removed
|
||||
files: |
|
||||
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}
|
||||
./kubescape-${{ env.MAC_OS }}/kubescape-${{ env.MAC_OS }}
|
||||
./kubescape-${{ env.MAC_OS }}/kubescape-${{ env.MAC_OS }}.sha256
|
||||
./kubescape-${{ env.MAC_OS }}/kubescape-${{ env.MAC_OS }}.tar.gz
|
||||
./kubescape-${{ env.UBUNTU_OS }}/kubescape-${{ env.UBUNTU_OS }}
|
||||
./kubescape-${{ env.UBUNTU_OS }}/kubescape-${{ env.UBUNTU_OS }}.sha256
|
||||
./kubescape-${{ env.UBUNTU_OS }}/kubescape-${{ env.UBUNTU_OS }}.tar.gz
|
||||
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}
|
||||
./kubescape-${{ env.WINDOWS_OS }}/kubescape.exe
|
||||
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}.sha256
|
||||
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}.tar.gz
|
||||
./kubescapearm64-${{ env.MAC_OS }}/kubescape-arm64-${{ env.MAC_OS }}
|
||||
./kubescapearm64-${{ env.MAC_OS }}/kubescape-arm64-${{ env.MAC_OS }}.sha256
|
||||
./kubescapearm64-${{ env.MAC_OS }}/kubescape-arm64-${{ env.MAC_OS }}.tar.gz
|
||||
./kubescapearm64-${{ env.UBUNTU_OS }}/kubescape-arm64-${{ env.UBUNTU_OS }}
|
||||
./kubescapearm64-${{ env.UBUNTU_OS }}/kubescape-arm64-${{ env.UBUNTU_OS }}.sha256
|
||||
./kubescapearm64-${{ env.UBUNTU_OS }}/kubescape-arm64-${{ env.UBUNTU_OS }}.tar.gz
|
||||
|
||||
30
.github/workflows/codesee-arch-diagram.yml
vendored
Normal file
30
.github/workflows/codesee-arch-diagram.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
# This workflow was added by CodeSee. Learn more at https://codesee.io/
|
||||
# This is v2.0 of this workflow file
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, synchronize, reopened]
|
||||
paths-ignore:
|
||||
- '**.yaml'
|
||||
- '**.yml'
|
||||
- '**.md'
|
||||
- '**.sh'
|
||||
- 'website/*'
|
||||
- 'examples/*'
|
||||
- 'docs/*'
|
||||
- 'build/*'
|
||||
- '.github/*'
|
||||
|
||||
name: CodeSee
|
||||
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
codesee:
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
name: Analyze the repo with CodeSee
|
||||
steps:
|
||||
- uses: Codesee-io/codesee-action@v2
|
||||
with:
|
||||
codesee-token: ${{ secrets.CODESEE_ARCH_DIAG_API_TOKEN }}
|
||||
codesee-url: https://app.codesee.io
|
||||
23
.github/workflows/comments.yaml
vendored
Normal file
23
.github/workflows/comments.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: pr-agent
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
pr_agent:
|
||||
runs-on: ubuntu-latest
|
||||
name: Run pr agent on every pull request, respond to user comments
|
||||
steps:
|
||||
- name: PR Agent action step
|
||||
continue-on-error: true
|
||||
id: pragent
|
||||
uses: Codium-ai/pr-agent@main
|
||||
env:
|
||||
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
|
||||
12
.krew.yaml
12
.krew.yaml
@@ -16,12 +16,24 @@ spec:
|
||||
arch: amd64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-macos-latest.tar.gz" .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: arm64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-arm64-macos-latest.tar.gz" .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: amd64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-ubuntu-latest.tar.gz" .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: arm64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-arm64-ubuntu-latest.tar.gz" .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
|
||||
@@ -15,6 +15,10 @@ so the maintainers are able to help guide you and let you know if you are going
|
||||
|
||||
Please follow our [code of conduct](CODE_OF_CONDUCT.md) in all of your interactions within the project.
|
||||
|
||||
## Build and test locally
|
||||
|
||||
Please follow the [instructions here](https://github.com/kubescape/kubescape/wiki/Building).
|
||||
|
||||
## Pull Request Process
|
||||
|
||||
1. Ensure any install or build dependencies are removed before the end of the layer when doing a
|
||||
|
||||
8
Makefile
8
Makefile
@@ -10,6 +10,14 @@ libgit2:
|
||||
-git submodule update --init --recursive
|
||||
cd git2go; make install-static
|
||||
|
||||
# build and install libgit2 for macOS m1
|
||||
libgit2arm64:
|
||||
git submodule update --init --recursive
|
||||
if [ "$(shell uname -s)" = "Darwin" ]; then \
|
||||
sed -i '' 's/cmake -D/cmake -DCMAKE_OSX_ARCHITECTURES="arm64" -D/' git2go/script/build-libgit2.sh; \
|
||||
fi
|
||||
cd git2go; make install-static
|
||||
|
||||
# go build tags
|
||||
TAGS = "gitenabled,static"
|
||||
|
||||
|
||||
@@ -37,11 +37,11 @@ curl -s https://raw.githubusercontent.com/kubescape/kubescape/master/install.sh
|
||||
|
||||
Learn more about:
|
||||
|
||||
* [Installing Kubescape](docs/getting-started.md#install-kubescape)
|
||||
* [Installing Kubescape](docs/installation.md)
|
||||
* [Running your first scan](docs/getting-started.md#run-your-first-scan)
|
||||
* [Usage](docs/getting-started.md#examples)
|
||||
* [Architecture](docs/architecture.md)
|
||||
* [Building Kubescape from source](docs/building.md)
|
||||
* [Building Kubescape from source](https://github.com/kubescape/kubescape/wiki/Building)
|
||||
|
||||
_Did you know you can use Kubescape in all these places?_
|
||||
|
||||
|
||||
51
build.bat
51
build.bat
@@ -1,51 +0,0 @@
|
||||
@ECHO OFF
|
||||
|
||||
IF "%1"=="install" goto Install
|
||||
IF "%1"=="build" goto Build
|
||||
IF "%1"=="all" goto All
|
||||
IF "%1"=="" goto Error ELSE goto Error
|
||||
|
||||
:Install
|
||||
|
||||
if exist C:\MSYS64\ (
|
||||
echo "MSYS2 already installed"
|
||||
) else (
|
||||
mkdir temp_install & cd temp_install
|
||||
|
||||
echo "Downloading MSYS2..."
|
||||
curl -L https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-x86_64-20220603.exe > msys2-x86_64-20220603.exe
|
||||
|
||||
echo "Installing MSYS2..."
|
||||
msys2-x86_64-20220603.exe install --root C:\MSYS64 --confirm-command
|
||||
|
||||
cd .. && rmdir /s /q temp_install
|
||||
)
|
||||
|
||||
|
||||
echo "Adding MSYS2 to path..."
|
||||
SET "PATH=C:\MSYS64\mingw64\bin;C:\MSYS64\usr\bin;%PATH%"
|
||||
echo %PATH%
|
||||
|
||||
echo "Installing MSYS2 packages..."
|
||||
pacman -S --needed --noconfirm make
|
||||
pacman -S --needed --noconfirm mingw-w64-x86_64-cmake
|
||||
pacman -S --needed --noconfirm mingw-w64-x86_64-gcc
|
||||
pacman -S --needed --noconfirm mingw-w64-x86_64-pkg-config
|
||||
pacman -S --needed --noconfirm msys2-w32api-runtime
|
||||
|
||||
IF "%1"=="all" GOTO Build
|
||||
GOTO End
|
||||
|
||||
:Build
|
||||
SET "PATH=C:\MSYS2\mingw64\bin;C:\MSYS2\usr\bin;%PATH%"
|
||||
make libgit2
|
||||
GOTO End
|
||||
|
||||
:All
|
||||
GOTO Install
|
||||
|
||||
:Error
|
||||
echo "Error: Unknown option"
|
||||
GOTO End
|
||||
|
||||
:End
|
||||
78
build.ps1
Normal file
78
build.ps1
Normal file
@@ -0,0 +1,78 @@
|
||||
# Defining input params
|
||||
param (
|
||||
[string]$mode = "error"
|
||||
)
|
||||
|
||||
# Function to install MSYS
|
||||
function Install {
|
||||
Write-Host "Starting install..." -ForegroundColor Cyan
|
||||
|
||||
# Check to see if already installed
|
||||
if (Test-Path "C:\MSYS64\") {
|
||||
Write-Host "MSYS2 already installed" -ForegroundColor Green
|
||||
} else {
|
||||
# Create a temp directory
|
||||
New-Item -Path "$PSScriptRoot\temp_install" -ItemType Directory > $null
|
||||
|
||||
# Download MSYS
|
||||
Write-Host "Downloading MSYS2..." -ForegroundColor Cyan
|
||||
$bitsJobObj = Start-BitsTransfer "https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-x86_64-20220603.exe" -Destination "$PSScriptRoot\temp_install\msys2-x86_64-20220603.exe"
|
||||
switch ($bitsJobObj.JobState) {
|
||||
"Transferred" {
|
||||
Complete-BitsTransfer -BitsJob $bitsJobObj
|
||||
break
|
||||
}
|
||||
"Error" {
|
||||
throw "Error downloading"
|
||||
}
|
||||
}
|
||||
Write-Host "MSYS2 download complete" -ForegroundColor Green
|
||||
|
||||
# Install MSYS
|
||||
Write-Host "Installing MSYS2..." -ForegroundColor Cyan
|
||||
Start-Process -Filepath "$PSScriptRoot\temp_install\msys2-x86_64-20220603.exe" -ArgumentList @("install", "--root", "C:\MSYS64", "--confirm-command") -Wait
|
||||
Write-Host "MSYS2 install complete" -ForegroundColor Green
|
||||
|
||||
# Remove temp directory
|
||||
Remove-Item "$PSScriptRoot\temp_install" -Recurse
|
||||
}
|
||||
|
||||
# Set PATH
|
||||
$env:Path = "C:\MSYS64\mingw64\bin;C:\MSYS64\usr\bin;" + $env:Path
|
||||
|
||||
# Install MSYS packages
|
||||
Write-Host "Installing MSYS2 packages..." -ForegroundColor Cyan
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "make") -Wait
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "mingw-w64-x86_64-cmake") -Wait
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "mingw-w64-x86_64-gcc") -Wait
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "mingw-w64-x86_64-pkg-config") -Wait
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "msys2-w32api-runtime") -Wait
|
||||
Write-Host "MSYS2 packages install complete" -ForegroundColor Green
|
||||
|
||||
Write-Host "Install complete" -ForegroundColor Green
|
||||
}
|
||||
|
||||
# Function to build libgit2
|
||||
function Build {
|
||||
Write-Host "Starting build..." -ForegroundColor Cyan
|
||||
|
||||
# Set PATH
|
||||
$env:Path = "C:\MSYS64\mingw64\bin;C:\MSYS64\usr\bin;" + $env:Path
|
||||
|
||||
# Build
|
||||
Start-Process -Filepath "make" -ArgumentList @("libgit2") -Wait -NoNewWindow
|
||||
|
||||
Write-Host "Build complete" -ForegroundColor Green
|
||||
}
|
||||
|
||||
# Check user call mode
|
||||
if ($mode -eq "all") {
|
||||
Install
|
||||
Build
|
||||
} elseif ($mode -eq "install") {
|
||||
Install
|
||||
} elseif ($mode -eq "build") {
|
||||
Build
|
||||
} else {
|
||||
Write-Host "Error: -mode should be one of (all|install|build)" -ForegroundColor Red
|
||||
}
|
||||
8
build.py
8
build.py
@@ -27,7 +27,13 @@ def get_build_dir():
|
||||
def get_package_name():
|
||||
if CURRENT_PLATFORM not in platformSuffixes: raise OSError("Platform %s is not supported!" % (CURRENT_PLATFORM))
|
||||
|
||||
return "kubescape-" + platformSuffixes[CURRENT_PLATFORM]
|
||||
# # TODO: kubescape-windows-latest is deprecated and should be removed
|
||||
# if CURRENT_PLATFORM == "Windows": return "kubescape.exe"
|
||||
|
||||
package_name = "kubescape-"
|
||||
if os.getenv("GOARCH"):
|
||||
package_name += os.getenv("GOARCH") + "-"
|
||||
return package_name + platformSuffixes[CURRENT_PLATFORM]
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.19-alpine as builder
|
||||
FROM golang:1.20-alpine as builder
|
||||
|
||||
ARG image_version
|
||||
ARG client
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
v1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -55,7 +56,7 @@ func GetDownloadCmd(ks meta.IKubescape) *cobra.Command {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("policy type required, supported: %v", supported)
|
||||
}
|
||||
if cautils.StringInSlice(core.DownloadSupportCommands(), args[0]) == cautils.ValueNotFound {
|
||||
if !slices.Contains(core.DownloadSupportCommands(), args[0]) {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: %s", args[0], supported)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -17,7 +17,7 @@ var fixCmdExamples = fmt.Sprintf(`
|
||||
Use with caution, this command will change your files in-place.
|
||||
|
||||
# Fix kubernetes YAML manifest files based on a scan command output (output.json)
|
||||
1) %[1]s scan --format json --format-version v2 --output output.json
|
||||
1) %[1]s scan . --format json --output output.json
|
||||
2) %[1]s fix output.json
|
||||
|
||||
`, cautils.ExecName())
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
v1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -43,7 +44,7 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("policy type requeued, supported: %s", supported)
|
||||
}
|
||||
if cautils.StringInSlice(core.ListSupportActions(), args[0]) == cautils.ValueNotFound {
|
||||
if !slices.Contains(core.ListSupportActions(), args[0]) {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: %s", args[0], supported)
|
||||
}
|
||||
return nil
|
||||
@@ -63,8 +64,6 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
|
||||
},
|
||||
}
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-print'/'json'")
|
||||
listCmd.PersistentFlags().MarkDeprecated("id", "Control ID's are included in list outputs")
|
||||
|
||||
|
||||
@@ -110,6 +110,9 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
|
||||
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
|
||||
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
|
||||
}
|
||||
if results.GetComplianceScore() < float32(scanInfo.ComplianceThreshold) {
|
||||
logger.L().Fatal("scan compliance-score is below permitted threshold", helpers.String("compliance score", fmt.Sprintf("%.2f", results.GetComplianceScore())), helpers.String("compliance-threshold", fmt.Sprintf("%.2f", scanInfo.ComplianceThreshold)))
|
||||
}
|
||||
enforceSeverityThresholds(results.GetResults().SummaryDetails.GetResourcesSeverityCounters(), scanInfo, terminateOnExceedingSeverity)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
reporthandlingapis "github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
@@ -78,14 +79,15 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
|
||||
var frameworks []string
|
||||
|
||||
if len(args) == 0 { // scan all frameworks
|
||||
if len(args) == 0 {
|
||||
scanInfo.ScanAll = true
|
||||
} else {
|
||||
// Read frameworks from input args
|
||||
frameworks = strings.Split(args[0], ",")
|
||||
if cautils.StringInSlice(frameworks, "all") != cautils.ValueNotFound {
|
||||
if slices.Contains(frameworks, "all") {
|
||||
scanInfo.ScanAll = true
|
||||
frameworks = getter.NativeFrameworks
|
||||
|
||||
}
|
||||
if len(args) > 1 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
@@ -105,6 +107,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
}
|
||||
}
|
||||
}
|
||||
scanInfo.SetScanType(cautils.ScanTypeFramework)
|
||||
scanInfo.FrameworkScan = true
|
||||
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, apisv1.KindFramework)
|
||||
@@ -118,12 +121,16 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
if err = results.HandleResults(ctx); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
if !scanInfo.VerboseMode {
|
||||
|
||||
if !scanInfo.VerboseMode && scanInfo.ScanType == cautils.ScanTypeFramework {
|
||||
logger.L().Info("Run with '--verbose'/'-v' flag for detailed resources view\n")
|
||||
}
|
||||
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
|
||||
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
|
||||
}
|
||||
if results.GetComplianceScore() < float32(scanInfo.ComplianceThreshold) {
|
||||
logger.L().Fatal("scan compliance-score is below permitted threshold", helpers.String("compliance-score", fmt.Sprintf("%.2f", results.GetComplianceScore())), helpers.String("compliance-threshold", fmt.Sprintf("%.2f", scanInfo.ComplianceThreshold)))
|
||||
}
|
||||
|
||||
enforceSeverityThresholds(results.GetData().Report.SummaryDetails.GetResourcesSeverityCounters(), scanInfo, terminateOnExceedingSeverity)
|
||||
return nil
|
||||
@@ -204,6 +211,9 @@ func validateFrameworkScanInfo(scanInfo *cautils.ScanInfo) error {
|
||||
if scanInfo.Submit && scanInfo.Local {
|
||||
return fmt.Errorf("you can use `keep-local` or `submit`, but not both")
|
||||
}
|
||||
if 100 < scanInfo.ComplianceThreshold || 0 > scanInfo.ComplianceThreshold {
|
||||
return fmt.Errorf("bad argument: out of range threshold")
|
||||
}
|
||||
if 100 < scanInfo.FailThreshold || 0 > scanInfo.FailThreshold {
|
||||
return fmt.Errorf("bad argument: out of range threshold")
|
||||
}
|
||||
|
||||
117
cmd/scan/image.go
Normal file
117
cmd/scan/image.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/iconlogger"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/core"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling"
|
||||
"github.com/kubescape/kubescape/v2/pkg/imagescan"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type imageScanInfo struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// TODO(vladklokun): document image scanning on the Kubescape Docs Hub?
|
||||
var (
|
||||
imageExample = fmt.Sprintf(`
|
||||
This command is still in BETA. Feel free to contact the kubescape maintainers for more information.
|
||||
|
||||
Scan an image for vulnerabilities.
|
||||
|
||||
# Scan the 'nginx' image
|
||||
%[1]s scan image "nginx"
|
||||
|
||||
# Image scan documentation:
|
||||
# https://hub.armosec.io/docs/images
|
||||
`, cautils.ExecName())
|
||||
)
|
||||
|
||||
// imageCmd represents the image command
|
||||
func getImageCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo, imgScanInfo *imageScanInfo) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "image <IMAGE_NAME>",
|
||||
Short: "Scan an image for vulnerabilities",
|
||||
Example: imageExample,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("the command takes exactly one image name as an argument")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := validateImageScanInfo(scanInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
failOnSeverity := imagescan.ParseSeverity(scanInfo.FailThresholdSeverity)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
logger.InitLogger(iconlogger.LoggerName)
|
||||
|
||||
dbCfg, _ := imagescan.NewDefaultDBConfig()
|
||||
svc := imagescan.NewScanService(dbCfg)
|
||||
|
||||
creds := imagescan.RegistryCredentials{
|
||||
Username: imgScanInfo.Username,
|
||||
Password: imgScanInfo.Password,
|
||||
}
|
||||
|
||||
userInput := args[0]
|
||||
|
||||
logger.L().Start(fmt.Sprintf("Scanning image: %s", userInput))
|
||||
scanResults, err := svc.Scan(ctx, userInput, creds)
|
||||
if err != nil {
|
||||
logger.L().StopError(fmt.Sprintf("Failed to scan image: %s", userInput))
|
||||
return err
|
||||
}
|
||||
logger.L().StopSuccess(fmt.Sprintf("Successfully scanned image: %s", userInput))
|
||||
|
||||
scanInfo.SetScanType(cautils.ScanTypeImage)
|
||||
|
||||
outputPrinters := core.GetOutputPrinters(scanInfo, ctx)
|
||||
|
||||
uiPrinter := core.GetUIPrinter(ctx, scanInfo)
|
||||
|
||||
resultsHandler := resultshandling.NewResultsHandler(nil, outputPrinters, uiPrinter)
|
||||
|
||||
resultsHandler.ImageScanData = []cautils.ImageScanData{
|
||||
{
|
||||
PresenterConfig: scanResults,
|
||||
Image: userInput,
|
||||
},
|
||||
}
|
||||
|
||||
resultsHandler.HandleResults(ctx)
|
||||
|
||||
if imagescan.ExceedsSeverityThreshold(scanResults, failOnSeverity) {
|
||||
terminateOnExceedingSeverity(scanInfo, logger.L())
|
||||
}
|
||||
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
cmd.PersistentFlags().StringVarP(&imgScanInfo.Username, "username", "u", "", "Username for registry login")
|
||||
cmd.PersistentFlags().StringVarP(&imgScanInfo.Password, "password", "p", "", "Password for registry login")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// validateImageScanInfo validates the ScanInfo struct for the `image` command
|
||||
func validateImageScanInfo(scanInfo *cautils.ScanInfo) error {
|
||||
severity := scanInfo.FailThresholdSeverity
|
||||
|
||||
if err := validateSeverity(severity); severity != "" && err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
v1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -16,7 +18,7 @@ var scanCmdExamples = fmt.Sprintf(`
|
||||
Scan command is for scanning an existing cluster or kubernetes manifest files based on pre-defined frameworks
|
||||
|
||||
# Scan current cluster with all frameworks
|
||||
%[1]s scan --enable-host-scan --verbose
|
||||
%[1]s scan
|
||||
|
||||
# Scan kubernetes YAML manifest files
|
||||
%[1]s scan .
|
||||
@@ -41,7 +43,8 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
Long: `The action you want to perform`,
|
||||
Example: scanCmdExamples,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
// setting input patterns for framework scan is only relevancy for non-security view
|
||||
if len(args) > 0 && scanInfo.View != string(cautils.SecurityViewType) {
|
||||
if args[0] != "framework" && args[0] != "control" {
|
||||
return getFrameworkCmd(ks, &scanInfo).RunE(cmd, append([]string{strings.Join(getter.NativeFrameworks, ",")}, args...))
|
||||
}
|
||||
@@ -49,6 +52,11 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if scanInfo.View == string(cautils.SecurityViewType) {
|
||||
setSecurityViewScanInfo(args, &scanInfo)
|
||||
|
||||
return securityScan(scanInfo, ks)
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return getFrameworkCmd(ks, &scanInfo).RunE(cmd, []string{strings.Join(getter.NativeFrameworks, ",")})
|
||||
@@ -66,15 +74,14 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.CreateAccount, "create-account", false, "Create a Kubescape SaaS account ID account ID is not found in cache. After creating the account, the account ID will be saved in cache. In addition, the scanning results will be uploaded to the Kubescape SaaS")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "kube-context", "", "", "Kube context. Default will use the current-context")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseArtifactsFrom, "use-artifacts-from", "", "Load artifacts from local directory. If not used will download them")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Notice, when running with `exclude-namespace` kubescape does not scan cluster-scoped objects.")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. e.g: --exclude-namespaces ns-a,ns-b. Notice, when running with `exclude-namespace` kubescape does not scan cluster-scoped objects.")
|
||||
|
||||
scanCmd.PersistentFlags().Float32VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
|
||||
scanCmd.PersistentFlags().Float32VarP(&scanInfo.ComplianceThreshold, "compliance-threshold", "", 0, "Compliance threshold is the percent below which the command fails and returns exit code 1")
|
||||
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.FailThresholdSeverity, "severity-threshold", "", "Severity threshold is the severity of failed controls at which the command fails and returns exit code 1")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "", `Output file format. Supported formats: "pretty-printer", "json", "junit", "prometheus", "pdf", "html", "sarif"`)
|
||||
@@ -82,7 +89,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to configured backend.")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.VerboseMode, "verbose", "v", false, "Display all of the input resources and not only failed resources")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.View, "view", string(cautils.ResourceViewType), fmt.Sprintf("View results based on the %s/%s. default is --view=%s", cautils.ResourceViewType, cautils.ControlViewType, cautils.ResourceViewType))
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.View, "view", string(cautils.ResourceViewType), fmt.Sprintf("View results based on the %s/%s/%s. default is --view=%s", cautils.ResourceViewType, cautils.ControlViewType, cautils.SecurityViewType, cautils.ResourceViewType))
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.HostSensorYamlPath, "host-scan-yaml", "", "Override default host scanner DaemonSet. Use this flag cautiously")
|
||||
@@ -91,11 +98,17 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Submit the scan results to Kubescape SaaS where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.OmitRawResources, "omit-raw-resources", "", false, "Omit raw resources from the output. By default the raw resources are included in the output")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.PrintAttackTree, "print-attack-tree", "", false, "Print attack tree")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.ScanImages, "scan-images", "", false, "Scan resources images")
|
||||
|
||||
scanCmd.PersistentFlags().MarkDeprecated("silent", "use '--logger' flag instead. Flag will be removed at 1.May.2022")
|
||||
scanCmd.PersistentFlags().MarkDeprecated("fail-threshold", "use '--compliance-threshold' flag instead. Flag will be removed at 1.Dec.2023")
|
||||
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
scanCmd.PersistentFlags().MarkDeprecated("client-id", "login to Kubescape SaaS will be unsupported, please contact the Kubescape maintainers for more information")
|
||||
scanCmd.PersistentFlags().MarkDeprecated("secret-key", "login to Kubescape SaaS will be unsupported, please contact the Kubescape maintainers for more information")
|
||||
|
||||
// hidden flags
|
||||
scanCmd.PersistentFlags().MarkHidden("host-scan-yaml") // this flag should be used very cautiously. We prefer users will not use it at all unless the DaemonSet can not run pods on the nodes
|
||||
scanCmd.PersistentFlags().MarkHidden("omit-raw-resources")
|
||||
scanCmd.PersistentFlags().MarkHidden("print-attack-tree")
|
||||
|
||||
@@ -105,9 +118,46 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy Kubescape host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://github.com/kubescape/kubescape/blob/master/core/pkg/hostsensorutils/hostsensor.yaml")
|
||||
hostF.NoOptDefVal = "true"
|
||||
hostF.DefValue = "false, for no TTY in stdin"
|
||||
scanCmd.PersistentFlags().MarkHidden("enable-host-scan")
|
||||
scanCmd.PersistentFlags().MarkDeprecated("enable-host-scan", "To activate the host scanner capability, proceed with the installation of the kubescape operator chart found here: https://github.com/kubescape/helm-charts/tree/main/charts/kubescape-cloud-operator. The flag will be removed at 1.Dec.2023")
|
||||
|
||||
scanCmd.PersistentFlags().MarkHidden("host-scan-yaml") // this flag should be used very cautiously. We prefer users will not use it at all unless the DaemonSet can not run pods on the nodes
|
||||
scanCmd.PersistentFlags().MarkDeprecated("host-scan-yaml", "To activate the host scanner capability, proceed with the installation of the kubescape operator chart found here: https://github.com/kubescape/helm-charts/tree/main/charts/kubescape-cloud-operator. The flag will be removed at 1.Dec.2023")
|
||||
|
||||
scanCmd.AddCommand(getControlCmd(ks, &scanInfo))
|
||||
scanCmd.AddCommand(getFrameworkCmd(ks, &scanInfo))
|
||||
scanCmd.AddCommand(getWorkloadCmd(ks, &scanInfo))
|
||||
|
||||
isi := &imageScanInfo{}
|
||||
scanCmd.AddCommand(getImageCmd(ks, &scanInfo, isi))
|
||||
|
||||
return scanCmd
|
||||
}
|
||||
|
||||
func setSecurityViewScanInfo(args []string, scanInfo *cautils.ScanInfo) {
|
||||
if len(args) > 0 {
|
||||
scanInfo.SetScanType(cautils.ScanTypeRepo)
|
||||
scanInfo.InputPatterns = args
|
||||
} else {
|
||||
scanInfo.SetScanType(cautils.ScanTypeCluster)
|
||||
}
|
||||
scanInfo.SetPolicyIdentifiers([]string{"clusterscan", "mitre", "nsa"}, v1.KindFramework)
|
||||
}
|
||||
|
||||
func securityScan(scanInfo cautils.ScanInfo, ks meta.IKubescape) error {
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
results, err := ks.Scan(ctx, &scanInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = results.HandleResults(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enforceSeverityThresholds(results.GetData().Report.SummaryDetails.GetResourcesSeverityCounters(), &scanInfo, terminateOnExceedingSeverity)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
v1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
|
||||
@@ -184,17 +185,20 @@ type spyLogger struct {
|
||||
setItems []spyLogMessage
|
||||
}
|
||||
|
||||
func (l *spyLogger) Error(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Success(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Warning(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Info(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Debug(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) SetLevel(level string) error { return nil }
|
||||
func (l *spyLogger) GetLevel() string { return "" }
|
||||
func (l *spyLogger) SetWriter(w *os.File) {}
|
||||
func (l *spyLogger) GetWriter() *os.File { return &os.File{} }
|
||||
func (l *spyLogger) LoggerName() string { return "" }
|
||||
func (l *spyLogger) Ctx(_ context.Context) helpers.ILogger { return l }
|
||||
func (l *spyLogger) Error(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Success(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Warning(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Info(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Debug(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) SetLevel(level string) error { return nil }
|
||||
func (l *spyLogger) GetLevel() string { return "" }
|
||||
func (l *spyLogger) SetWriter(w *os.File) {}
|
||||
func (l *spyLogger) GetWriter() *os.File { return &os.File{} }
|
||||
func (l *spyLogger) LoggerName() string { return "" }
|
||||
func (l *spyLogger) Ctx(_ context.Context) helpers.ILogger { return l }
|
||||
func (l *spyLogger) Start(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) StopSuccess(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) StopError(msg string, details ...helpers.IDetails) {}
|
||||
|
||||
func (l *spyLogger) Fatal(msg string, details ...helpers.IDetails) {
|
||||
firstDetail := details[0]
|
||||
@@ -254,3 +258,106 @@ func Test_terminateOnExceedingSeverity(t *testing.T) {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetSecurityViewScanInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
want *cautils.ScanInfo
|
||||
}{
|
||||
{
|
||||
name: "no args",
|
||||
args: []string{},
|
||||
want: &cautils.ScanInfo{
|
||||
InputPatterns: []string{},
|
||||
ScanType: cautils.ScanTypeCluster,
|
||||
PolicyIdentifier: []cautils.PolicyIdentifier{
|
||||
{
|
||||
Kind: v1.KindFramework,
|
||||
Identifier: "clusterscan",
|
||||
},
|
||||
{
|
||||
Kind: v1.KindFramework,
|
||||
Identifier: "mitre",
|
||||
},
|
||||
{
|
||||
Kind: v1.KindFramework,
|
||||
Identifier: "nsa",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with args",
|
||||
args: []string{
|
||||
"file.yaml",
|
||||
"file2.yaml",
|
||||
},
|
||||
want: &cautils.ScanInfo{
|
||||
ScanType: cautils.ScanTypeRepo,
|
||||
InputPatterns: []string{
|
||||
"file.yaml",
|
||||
"file2.yaml",
|
||||
},
|
||||
PolicyIdentifier: []cautils.PolicyIdentifier{
|
||||
{
|
||||
Kind: v1.KindFramework,
|
||||
Identifier: "clusterscan",
|
||||
},
|
||||
{
|
||||
Kind: v1.KindFramework,
|
||||
Identifier: "mitre",
|
||||
},
|
||||
{
|
||||
Kind: v1.KindFramework,
|
||||
Identifier: "nsa",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := &cautils.ScanInfo{
|
||||
View: string(cautils.SecurityViewType),
|
||||
}
|
||||
setSecurityViewScanInfo(tt.args, got)
|
||||
|
||||
if len(tt.want.InputPatterns) != len(got.InputPatterns) {
|
||||
t.Errorf("in test: %s, got: %v, want: %v", tt.name, got.InputPatterns, tt.want.InputPatterns)
|
||||
}
|
||||
|
||||
if tt.want.ScanType != got.ScanType {
|
||||
t.Errorf("in test: %s, got: %v, want: %v", tt.name, got.ScanType, tt.want.ScanType)
|
||||
}
|
||||
|
||||
for i := range tt.want.InputPatterns {
|
||||
found := false
|
||||
for j := range tt.want.InputPatterns[i] {
|
||||
if tt.want.InputPatterns[i][j] == got.InputPatterns[i][j] {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("in test: %s, got: %v, want: %v", tt.name, got.InputPatterns, tt.want.InputPatterns)
|
||||
}
|
||||
}
|
||||
|
||||
for i := range tt.want.PolicyIdentifier {
|
||||
found := false
|
||||
for j := range got.PolicyIdentifier {
|
||||
if tt.want.PolicyIdentifier[i].Kind == got.PolicyIdentifier[j].Kind && tt.want.PolicyIdentifier[i].Identifier == got.PolicyIdentifier[j].Identifier {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("in test: %s, got: %v, want: %v", tt.name, got.PolicyIdentifier, tt.want.PolicyIdentifier)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -114,3 +114,27 @@ func Test_validateSeverity(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_validateWorkloadIdentifier(t *testing.T) {
|
||||
testCases := []struct {
|
||||
Description string
|
||||
Input string
|
||||
Want error
|
||||
}{
|
||||
{"valid workload identifier should be valid", "deployment/test", nil},
|
||||
{"invalid workload identifier missing kind", "deployment", ErrInvalidWorkloadIdentifier},
|
||||
{"invalid workload identifier with namespace", "ns/deployment/name", ErrInvalidWorkloadIdentifier},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.Description, func(t *testing.T) {
|
||||
input := testCase.Input
|
||||
want := testCase.Want
|
||||
got := validateWorkloadIdentifier(input)
|
||||
|
||||
if got != want {
|
||||
t.Errorf("got: %v, want: %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
126
cmd/scan/workload.go
Normal file
126
cmd/scan/workload.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
v1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
workloadExample = fmt.Sprintf(`
|
||||
This command is still in BETA. Feel free to contact the kubescape maintainers for more information.
|
||||
|
||||
Scan a workload for misconfigurations and image vulnerabilities.
|
||||
|
||||
# Scan an workload
|
||||
%[1]s scan workload <kind>/<name>
|
||||
|
||||
# Scan an workload in a specific namespace
|
||||
%[1]s scan workload <kind>/<name> --namespace <namespace>
|
||||
|
||||
# Scan an workload from a file path
|
||||
%[1]s scan workload <kind>/<name> --file-path <file path>
|
||||
|
||||
# Scan an workload from a helm-chart template
|
||||
%[1]s scan workload <kind>/<name> --chart-path <chart path> --file-path <file path>
|
||||
|
||||
|
||||
`, cautils.ExecName())
|
||||
|
||||
ErrInvalidWorkloadIdentifier = errors.New("invalid workload identifier")
|
||||
)
|
||||
|
||||
var namespace string
|
||||
|
||||
// controlCmd represents the control command
|
||||
func getWorkloadCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Command {
|
||||
workloadCmd := &cobra.Command{
|
||||
Use: "workload <kind>/<name> [`<glob pattern>`/`-`] [flags]",
|
||||
Short: "Scan a workload for misconfigurations and image vulnerabilities",
|
||||
Example: workloadExample,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("usage: <kind>/<name> [`<glob pattern>`/`-`] [flags]")
|
||||
}
|
||||
|
||||
if scanInfo.ChartPath != "" && scanInfo.FilePath == "" {
|
||||
return fmt.Errorf("usage: --chart-path <chart path> --file-path <file path>")
|
||||
}
|
||||
|
||||
return validateWorkloadIdentifier(args[0])
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
kind, name, err := parseWorkloadIdentifierString(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid input: %s", err.Error())
|
||||
}
|
||||
|
||||
setWorkloadScanInfo(scanInfo, kind, name)
|
||||
|
||||
// todo: add api version if provided
|
||||
ctx := context.TODO()
|
||||
results, err := ks.Scan(ctx, scanInfo)
|
||||
if err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
|
||||
if err = results.HandleResults(ctx); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
workloadCmd.PersistentFlags().StringVarP(&namespace, "namespace", "n", "", "Namespace of the workload. Default will be empty.")
|
||||
workloadCmd.PersistentFlags().StringVar(&scanInfo.FilePath, "file-path", "", "Path to the workload file.")
|
||||
workloadCmd.PersistentFlags().StringVar(&scanInfo.ChartPath, "chart-path", "", "Path to the helm chart the workload is part of. Must be used with --file-path.")
|
||||
|
||||
return workloadCmd
|
||||
}
|
||||
|
||||
func setWorkloadScanInfo(scanInfo *cautils.ScanInfo, kind string, name string) {
|
||||
scanInfo.SetScanType(cautils.ScanTypeWorkload)
|
||||
scanInfo.ScanImages = true
|
||||
|
||||
scanInfo.ScanObject = &objectsenvelopes.ScanObject{}
|
||||
scanInfo.ScanObject.SetNamespace(namespace)
|
||||
scanInfo.ScanObject.SetKind(kind)
|
||||
scanInfo.ScanObject.SetName(name)
|
||||
|
||||
scanInfo.SetPolicyIdentifiers([]string{"workloadscan"}, v1.KindFramework)
|
||||
|
||||
if scanInfo.FilePath != "" {
|
||||
scanInfo.InputPatterns = []string{scanInfo.FilePath}
|
||||
}
|
||||
}
|
||||
|
||||
func validateWorkloadIdentifier(workloadIdentifier string) error {
|
||||
// workloadIdentifier is in the form of kind/name
|
||||
x := strings.Split(workloadIdentifier, "/")
|
||||
if len(x) != 2 || x[0] == "" || x[1] == "" {
|
||||
return ErrInvalidWorkloadIdentifier
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseWorkloadIdentifierString(workloadIdentifier string) (kind, name string, err error) {
|
||||
// workloadIdentifier is in the form of namespace/kind/name
|
||||
// example: default/Deployment/nginx-deployment
|
||||
x := strings.Split(workloadIdentifier, "/")
|
||||
if len(x) != 2 {
|
||||
return "", "", ErrInvalidWorkloadIdentifier
|
||||
}
|
||||
|
||||
return x[0], x[1], nil
|
||||
}
|
||||
69
cmd/scan/workload_test.go
Normal file
69
cmd/scan/workload_test.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
v1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
)
|
||||
|
||||
func TestSetWorkloadScanInfo(t *testing.T) {
|
||||
test := []struct {
|
||||
Description string
|
||||
kind string
|
||||
name string
|
||||
want *cautils.ScanInfo
|
||||
}{
|
||||
{
|
||||
Description: "Set workload scan info",
|
||||
kind: "Deployment",
|
||||
name: "test",
|
||||
want: &cautils.ScanInfo{
|
||||
PolicyIdentifier: []cautils.PolicyIdentifier{
|
||||
{
|
||||
Identifier: "workloadscan",
|
||||
Kind: v1.KindFramework,
|
||||
},
|
||||
},
|
||||
ScanType: cautils.ScanTypeWorkload,
|
||||
ScanObject: &objectsenvelopes.ScanObject{
|
||||
Kind: "Deployment",
|
||||
Metadata: objectsenvelopes.ScanObjectMetadata{
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range test {
|
||||
t.Run(
|
||||
tc.Description,
|
||||
func(t *testing.T) {
|
||||
scanInfo := &cautils.ScanInfo{}
|
||||
setWorkloadScanInfo(scanInfo, tc.kind, tc.name)
|
||||
|
||||
if scanInfo.ScanType != tc.want.ScanType {
|
||||
t.Errorf("got: %v, want: %v", scanInfo.ScanType, tc.want.ScanType)
|
||||
}
|
||||
|
||||
if scanInfo.ScanObject.Kind != tc.want.ScanObject.Kind {
|
||||
t.Errorf("got: %v, want: %v", scanInfo.ScanObject.Kind, tc.want.ScanObject.Kind)
|
||||
}
|
||||
|
||||
if scanInfo.ScanObject.Metadata.Name != tc.want.ScanObject.Metadata.Name {
|
||||
t.Errorf("got: %v, want: %v", scanInfo.ScanObject.Metadata.Name, tc.want.ScanObject.Metadata.Name)
|
||||
}
|
||||
|
||||
if len(scanInfo.PolicyIdentifier) != 1 {
|
||||
t.Errorf("got: %v, want: %v", len(scanInfo.PolicyIdentifier), 1)
|
||||
}
|
||||
|
||||
if scanInfo.PolicyIdentifier[0].Identifier != tc.want.PolicyIdentifier[0].Identifier {
|
||||
t.Errorf("got: %v, want: %v", scanInfo.PolicyIdentifier[0].Identifier, tc.want.PolicyIdentifier[0].Identifier)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -6,14 +6,16 @@ package update
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
installationLink string = "https://github.com/kubescape/kubescape/blob/master/docs/installation.md"
|
||||
)
|
||||
|
||||
var updateCmdExamples = fmt.Sprintf(`
|
||||
# Update to the latest kubescape release
|
||||
%[1]s update
|
||||
@@ -31,33 +33,7 @@ func GetUpdateCmd() *cobra.Command {
|
||||
//your version == latest version
|
||||
logger.L().Info(("You are in the latest version"))
|
||||
} else {
|
||||
|
||||
const OSTYPE string = runtime.GOOS
|
||||
var ShellToUse string
|
||||
switch OSTYPE {
|
||||
|
||||
case "windows":
|
||||
cautils.StartSpinner()
|
||||
//run the installation command for windows
|
||||
ShellToUse = "powershell"
|
||||
_, err := exec.Command(ShellToUse, "-c", "iwr -useb https://raw.githubusercontent.com/kubescape/kubescape/master/install.ps1 | iex").Output()
|
||||
|
||||
if err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
cautils.StopSpinner()
|
||||
|
||||
default:
|
||||
ShellToUse = "bash"
|
||||
cautils.StartSpinner()
|
||||
//run the installation command for linux and macOS
|
||||
_, err := exec.Command(ShellToUse, "-c", "curl -s https://raw.githubusercontent.com/kubescape/kubescape/master/install.sh | /bin/bash").Output()
|
||||
if err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
|
||||
cautils.StopSpinner()
|
||||
}
|
||||
fmt.Printf("please refer to our installation docs in the following link: %s", installationLink)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -17,7 +17,11 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const configFileName = "config"
|
||||
const (
|
||||
configFileName string = "config"
|
||||
kubescapeNamespace string = "kubescape"
|
||||
kubescapeConfigMapName string = "kubescape-config"
|
||||
)
|
||||
|
||||
func ConfigFileFullPath() string { return getter.GetDefaultPath(configFileName + ".json") }
|
||||
|
||||
@@ -29,7 +33,6 @@ type ConfigObj struct {
|
||||
AccountID string `json:"accountID,omitempty"`
|
||||
ClientID string `json:"clientID,omitempty"`
|
||||
SecretKey string `json:"secretKey,omitempty"`
|
||||
CustomerGUID string `json:"customerGUID,omitempty"` // Deprecated
|
||||
Token string `json:"invitationParam,omitempty"`
|
||||
CustomerAdminEMail string `json:"adminMail,omitempty"`
|
||||
ClusterName string `json:"clusterName,omitempty"`
|
||||
@@ -63,6 +66,35 @@ func (co *ConfigObj) Config() []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
func (co *ConfigObj) updateEmptyFields(inCO *ConfigObj) error {
|
||||
if inCO.AccountID != "" {
|
||||
co.AccountID = inCO.AccountID
|
||||
}
|
||||
if inCO.CloudAPIURL != "" {
|
||||
co.CloudAPIURL = inCO.CloudAPIURL
|
||||
}
|
||||
if inCO.CloudAuthURL != "" {
|
||||
co.CloudAuthURL = inCO.CloudAuthURL
|
||||
}
|
||||
if inCO.CloudReportURL != "" {
|
||||
co.CloudReportURL = inCO.CloudReportURL
|
||||
}
|
||||
if inCO.CloudUIURL != "" {
|
||||
co.CloudUIURL = inCO.CloudUIURL
|
||||
}
|
||||
if inCO.ClusterName != "" {
|
||||
co.ClusterName = inCO.ClusterName
|
||||
}
|
||||
if inCO.CustomerAdminEMail != "" {
|
||||
co.CustomerAdminEMail = inCO.CustomerAdminEMail
|
||||
}
|
||||
if inCO.Token != "" {
|
||||
co.Token = inCO.Token
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// =============================== interface ============================================
|
||||
// ======================================================================================
|
||||
@@ -242,18 +274,19 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
backendAPI: backendAPI,
|
||||
configObj: &ConfigObj{},
|
||||
configMapName: getConfigMapName(),
|
||||
configMapNamespace: getConfigMapNamespace(),
|
||||
configMapNamespace: GetConfigMapNamespace(),
|
||||
}
|
||||
|
||||
// first, load from configMap
|
||||
if c.existsConfigMap() {
|
||||
c.loadConfigFromConfigMap()
|
||||
}
|
||||
|
||||
// second, load from file
|
||||
// first, load from file
|
||||
if existsConfigFile() { // get from file
|
||||
loadConfigFromFile(c.configObj)
|
||||
}
|
||||
|
||||
// second, load from configMap
|
||||
if c.existsConfigMap() {
|
||||
c.updateConfigEmptyFieldsFromConfigMap()
|
||||
}
|
||||
|
||||
updateCredentials(c.configObj, credentials)
|
||||
updateCloudURLs(c.configObj)
|
||||
|
||||
@@ -359,6 +392,22 @@ func (c *ClusterConfig) ToMapString() map[string]interface{} {
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) updateConfigEmptyFieldsFromConfigMap() error {
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tempCO := ConfigObj{}
|
||||
if jsonConf, ok := configMap.Data["config.json"]; ok {
|
||||
json.Unmarshal([]byte(jsonConf), &tempCO)
|
||||
return c.configObj.updateEmptyFields(&tempCO)
|
||||
}
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) loadConfigFromConfigMap() error {
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -509,10 +558,6 @@ func readConfig(dat []byte, configObj *ConfigObj) error {
|
||||
if err := json.Unmarshal(dat, configObj); err != nil {
|
||||
return err
|
||||
}
|
||||
if configObj.AccountID == "" {
|
||||
configObj.AccountID = configObj.CustomerGUID
|
||||
}
|
||||
configObj.CustomerGUID = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -554,14 +599,15 @@ func getConfigMapName() string {
|
||||
if n := os.Getenv("KS_DEFAULT_CONFIGMAP_NAME"); n != "" {
|
||||
return n
|
||||
}
|
||||
return "kubescape"
|
||||
return kubescapeConfigMapName
|
||||
}
|
||||
|
||||
func getConfigMapNamespace() string {
|
||||
// GetConfigMapNamespace returns the namespace of the cluster config, which is the same for all in-cluster components
|
||||
func GetConfigMapNamespace() string {
|
||||
if n := os.Getenv("KS_DEFAULT_CONFIGMAP_NAMESPACE"); n != "" {
|
||||
return n
|
||||
}
|
||||
return "default"
|
||||
return kubescapeNamespace
|
||||
}
|
||||
|
||||
func getAccountFromEnv(credentials *Credentials) {
|
||||
|
||||
@@ -299,3 +299,159 @@ func Test_initializeCloudAPI(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetConfigMapNamespace(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
env string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "no env",
|
||||
want: kubescapeNamespace,
|
||||
},
|
||||
{
|
||||
name: "default ns",
|
||||
env: kubescapeNamespace,
|
||||
want: kubescapeNamespace,
|
||||
},
|
||||
{
|
||||
name: "custom ns",
|
||||
env: "my-ns",
|
||||
want: "my-ns",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.env != "" {
|
||||
_ = os.Setenv("KS_DEFAULT_CONFIGMAP_NAMESPACE", tt.env)
|
||||
}
|
||||
assert.Equalf(t, tt.want, GetConfigMapNamespace(), "GetConfigMapNamespace()")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
anyString string = "anyString"
|
||||
shouldNotUpdate string = "shouldNotUpdate"
|
||||
shouldUpdate string = "shouldUpdate"
|
||||
)
|
||||
|
||||
func checkIsUpdateCorrectly(t *testing.T, beforeField string, afterField string) {
|
||||
switch beforeField {
|
||||
case anyString:
|
||||
assert.Equal(t, anyString, afterField)
|
||||
case "":
|
||||
assert.Equal(t, shouldUpdate, afterField)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateEmptyFields(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
inCo *ConfigObj
|
||||
outCo *ConfigObj
|
||||
}{
|
||||
{
|
||||
outCo: &ConfigObj{
|
||||
AccountID: "",
|
||||
Token: "",
|
||||
CustomerAdminEMail: "",
|
||||
ClusterName: "",
|
||||
CloudReportURL: "",
|
||||
CloudAPIURL: "",
|
||||
CloudUIURL: "",
|
||||
CloudAuthURL: "",
|
||||
},
|
||||
inCo: &ConfigObj{
|
||||
AccountID: shouldUpdate,
|
||||
Token: shouldUpdate,
|
||||
CustomerAdminEMail: shouldUpdate,
|
||||
ClusterName: shouldUpdate,
|
||||
CloudReportURL: shouldUpdate,
|
||||
CloudAPIURL: shouldUpdate,
|
||||
CloudUIURL: shouldUpdate,
|
||||
CloudAuthURL: shouldUpdate,
|
||||
},
|
||||
},
|
||||
{
|
||||
outCo: &ConfigObj{
|
||||
AccountID: anyString,
|
||||
Token: anyString,
|
||||
CustomerAdminEMail: "",
|
||||
ClusterName: "",
|
||||
CloudReportURL: "",
|
||||
CloudAPIURL: "",
|
||||
CloudUIURL: "",
|
||||
CloudAuthURL: "",
|
||||
},
|
||||
inCo: &ConfigObj{
|
||||
AccountID: shouldNotUpdate,
|
||||
Token: shouldNotUpdate,
|
||||
CustomerAdminEMail: shouldUpdate,
|
||||
ClusterName: shouldUpdate,
|
||||
CloudReportURL: shouldUpdate,
|
||||
CloudAPIURL: shouldUpdate,
|
||||
CloudUIURL: shouldUpdate,
|
||||
CloudAuthURL: shouldUpdate,
|
||||
},
|
||||
},
|
||||
{
|
||||
outCo: &ConfigObj{
|
||||
AccountID: "",
|
||||
Token: "",
|
||||
CustomerAdminEMail: anyString,
|
||||
ClusterName: anyString,
|
||||
CloudReportURL: anyString,
|
||||
CloudAPIURL: anyString,
|
||||
CloudUIURL: anyString,
|
||||
CloudAuthURL: anyString,
|
||||
},
|
||||
inCo: &ConfigObj{
|
||||
AccountID: shouldUpdate,
|
||||
Token: shouldUpdate,
|
||||
CustomerAdminEMail: shouldNotUpdate,
|
||||
ClusterName: shouldNotUpdate,
|
||||
CloudReportURL: shouldNotUpdate,
|
||||
CloudAPIURL: shouldNotUpdate,
|
||||
CloudUIURL: shouldNotUpdate,
|
||||
CloudAuthURL: shouldNotUpdate,
|
||||
},
|
||||
},
|
||||
{
|
||||
outCo: &ConfigObj{
|
||||
AccountID: anyString,
|
||||
Token: anyString,
|
||||
CustomerAdminEMail: "",
|
||||
ClusterName: anyString,
|
||||
CloudReportURL: "",
|
||||
CloudAPIURL: anyString,
|
||||
CloudUIURL: "",
|
||||
CloudAuthURL: anyString,
|
||||
},
|
||||
inCo: &ConfigObj{
|
||||
AccountID: shouldNotUpdate,
|
||||
Token: shouldNotUpdate,
|
||||
CustomerAdminEMail: shouldUpdate,
|
||||
ClusterName: shouldNotUpdate,
|
||||
CloudReportURL: shouldUpdate,
|
||||
CloudAPIURL: shouldNotUpdate,
|
||||
CloudUIURL: shouldUpdate,
|
||||
CloudAuthURL: shouldNotUpdate,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i := range tests {
|
||||
beforeChangesOutCO := tests[i].outCo
|
||||
tests[i].outCo.updateEmptyFields(tests[i].inCo)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.AccountID, tests[i].outCo.AccountID)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.CloudAPIURL, tests[i].outCo.CloudAPIURL)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.CloudAuthURL, tests[i].outCo.CloudAuthURL)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.CloudReportURL, tests[i].outCo.CloudReportURL)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.CloudUIURL, tests[i].outCo.CloudUIURL)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.ClusterName, tests[i].outCo.ClusterName)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.CustomerAdminEMail, tests[i].outCo.CustomerAdminEMail)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.Token, tests[i].outCo.Token)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,9 @@ package cautils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/anchore/grype/grype/presenter/models"
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
@@ -15,12 +17,29 @@ import (
|
||||
|
||||
// K8SResources map[<api group>/<api version>/<resource>][]<resourceID>
|
||||
type K8SResources map[string][]string
|
||||
type KSResources map[string][]string
|
||||
type ExternalResources map[string][]string
|
||||
|
||||
type ImageScanData struct {
|
||||
PresenterConfig *models.PresenterConfig
|
||||
Image string
|
||||
}
|
||||
|
||||
type ScanTypes string
|
||||
|
||||
const (
|
||||
TopWorkloadsNumber = 5
|
||||
ScanTypeCluster ScanTypes = "cluster"
|
||||
ScanTypeRepo ScanTypes = "repo"
|
||||
ScanTypeImage ScanTypes = "image"
|
||||
ScanTypeWorkload ScanTypes = "workload"
|
||||
ScanTypeFramework ScanTypes = "framework"
|
||||
)
|
||||
|
||||
type OPASessionObj struct {
|
||||
K8SResources *K8SResources // input k8s objects
|
||||
ArmoResource *KSResources // input ARMO objects
|
||||
K8SResources K8SResources // input k8s objects
|
||||
ExternalResources ExternalResources // input non-k8s objects (external resources)
|
||||
AllPolicies *Policies // list of all frameworks
|
||||
ExcludedRules map[string]bool // rules to exclude map[rule name>]X
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<resource ID>]<resource>
|
||||
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<resource ID>]<resource result>
|
||||
ResourceSource map[string]reporthandling.Source // resources sources, map[<resource ID>]<resource result>
|
||||
@@ -36,9 +55,10 @@ type OPASessionObj struct {
|
||||
Policies []reporthandling.Framework // list of frameworks to scan
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
OmitRawResources bool // omit raw resources from output
|
||||
SingleResourceScan workloadinterface.IWorkload // single resource scan
|
||||
}
|
||||
|
||||
func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework, k8sResources *K8SResources, scanInfo *ScanInfo) *OPASessionObj {
|
||||
func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework, k8sResources K8SResources, scanInfo *ScanInfo) *OPASessionObj {
|
||||
return &OPASessionObj{
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
Policies: frameworks,
|
||||
@@ -55,6 +75,45 @@ func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework
|
||||
}
|
||||
}
|
||||
|
||||
// SetTopWorkloads sets the top workloads by score
|
||||
func (sessionObj *OPASessionObj) SetTopWorkloads() {
|
||||
count := 0
|
||||
|
||||
topWorkloadsSorted := make([]prioritization.PrioritizedResource, 0)
|
||||
|
||||
// create list in order to sort
|
||||
for _, wl := range sessionObj.ResourcesPrioritized {
|
||||
topWorkloadsSorted = append(topWorkloadsSorted, wl)
|
||||
}
|
||||
|
||||
// sort by score. If scores are equal, sort by resource ID
|
||||
sort.Slice(topWorkloadsSorted, func(i, j int) bool {
|
||||
if topWorkloadsSorted[i].Score == topWorkloadsSorted[j].Score {
|
||||
return topWorkloadsSorted[i].ResourceID < topWorkloadsSorted[j].ResourceID
|
||||
}
|
||||
return topWorkloadsSorted[i].Score > topWorkloadsSorted[j].Score
|
||||
})
|
||||
|
||||
if sessionObj.Report == nil {
|
||||
sessionObj.Report = &reporthandlingv2.PostureReport{}
|
||||
}
|
||||
|
||||
// set top workloads according to number of top workloads
|
||||
for i := 0; i < TopWorkloadsNumber; i++ {
|
||||
if i >= len(topWorkloadsSorted) {
|
||||
break
|
||||
}
|
||||
source := sessionObj.ResourceSource[topWorkloadsSorted[i].ResourceID]
|
||||
wlObj := &reporthandling.Resource{
|
||||
IMetadata: sessionObj.AllResources[topWorkloadsSorted[i].ResourceID],
|
||||
Source: &source,
|
||||
}
|
||||
|
||||
sessionObj.Report.SummaryDetails.TopWorkloadsByScore = append(sessionObj.Report.SummaryDetails.TopWorkloadsByScore, wlObj)
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
func (sessionObj *OPASessionObj) SetMapNamespaceToNumberOfResources(mapNamespaceToNumberOfResources map[string]int) {
|
||||
if sessionObj.Metadata.ContextMetadata.ClusterContextMetadata == nil {
|
||||
sessionObj.Metadata.ContextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
"github.com/armosec/utils-go/boolutils"
|
||||
cloudsupport "github.com/kubescape/k8s-interface/cloudsupport/v1"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
@@ -15,15 +17,25 @@ func NewPolicies() *Policies {
|
||||
}
|
||||
}
|
||||
|
||||
func (policies *Policies) Set(frameworks []reporthandling.Framework, version string) {
|
||||
func (policies *Policies) Set(frameworks []reporthandling.Framework, version string, excludedRules map[string]bool, scanningScope reporthandling.ScanningScopeType) {
|
||||
for i := range frameworks {
|
||||
if !isFrameworkFitToScanScope(frameworks[i], scanningScope) {
|
||||
continue
|
||||
}
|
||||
if frameworks[i].Name != "" && len(frameworks[i].Controls) > 0 {
|
||||
policies.Frameworks = append(policies.Frameworks, frameworks[i].Name)
|
||||
}
|
||||
for j := range frameworks[i].Controls {
|
||||
compatibleRules := []reporthandling.PolicyRule{}
|
||||
for r := range frameworks[i].Controls[j].Rules {
|
||||
if !ruleWithKSOpaDependency(frameworks[i].Controls[j].Rules[r].Attributes) && isRuleKubescapeVersionCompatible(frameworks[i].Controls[j].Rules[r].Attributes, version) {
|
||||
if excludedRules != nil {
|
||||
ruleName := frameworks[i].Controls[j].Rules[r].Name
|
||||
if _, exclude := excludedRules[ruleName]; exclude {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !ruleWithKSOpaDependency(frameworks[i].Controls[j].Rules[r].Attributes) && isRuleKubescapeVersionCompatible(frameworks[i].Controls[j].Rules[r].Attributes, version) && isControlFitToScanScope(frameworks[i].Controls[j], scanningScope) {
|
||||
compatibleRules = append(compatibleRules, frameworks[i].Controls[j].Rules[r])
|
||||
}
|
||||
}
|
||||
@@ -76,3 +88,89 @@ func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func getCloudType(scanInfo *ScanInfo) (bool, reporthandling.ScanningScopeType) {
|
||||
if cloudsupport.IsAKS() {
|
||||
return true, reporthandling.ScopeCloudAKS
|
||||
}
|
||||
if cloudsupport.IsEKS(k8sinterface.GetConfig()) {
|
||||
return true, reporthandling.ScopeCloudEKS
|
||||
}
|
||||
if cloudsupport.IsGKE(k8sinterface.GetConfig()) {
|
||||
return true, reporthandling.ScopeCloudGKE
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func GetScanningScope(scanInfo *ScanInfo) reporthandling.ScanningScopeType {
|
||||
var result reporthandling.ScanningScopeType
|
||||
|
||||
switch scanInfo.GetScanningContext() {
|
||||
case ContextCluster:
|
||||
isCloud, cloudType := getCloudType(scanInfo)
|
||||
if isCloud {
|
||||
result = cloudType
|
||||
} else {
|
||||
result = reporthandling.ScopeCluster
|
||||
}
|
||||
default:
|
||||
result = reporthandling.ScopeFile
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func isScanningScopeMatchToControlScope(scanScope reporthandling.ScanningScopeType, controlScope reporthandling.ScanningScopeType) bool {
|
||||
result := false
|
||||
|
||||
switch controlScope {
|
||||
case reporthandling.ScopeFile:
|
||||
result = (reporthandling.ScopeFile == scanScope)
|
||||
case reporthandling.ScopeCluster:
|
||||
result = (reporthandling.ScopeCluster == scanScope) || (reporthandling.ScopeCloud == scanScope) || (reporthandling.ScopeCloudAKS == scanScope) || (reporthandling.ScopeCloudEKS == scanScope) || (reporthandling.ScopeCloudGKE == scanScope)
|
||||
case reporthandling.ScopeCloud:
|
||||
result = (reporthandling.ScopeCloud == scanScope) || (reporthandling.ScopeCloudAKS == scanScope) || (reporthandling.ScopeCloudEKS == scanScope) || (reporthandling.ScopeCloudGKE == scanScope)
|
||||
case reporthandling.ScopeCloudAKS:
|
||||
result = (reporthandling.ScopeCloudAKS == scanScope)
|
||||
case reporthandling.ScopeCloudEKS:
|
||||
result = (reporthandling.ScopeCloudEKS == scanScope)
|
||||
case reporthandling.ScopeCloudGKE:
|
||||
result = (reporthandling.ScopeCloudGKE == scanScope)
|
||||
default:
|
||||
result = true
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func isControlFitToScanScope(control reporthandling.Control, scanScopeMatches reporthandling.ScanningScopeType) bool {
|
||||
// for backward compatibility - case: kubescape with scope(new one) and regolibrary without scope(old one)
|
||||
if control.ScanningScope == nil {
|
||||
return true
|
||||
}
|
||||
if len(control.ScanningScope.Matches) == 0 {
|
||||
return true
|
||||
}
|
||||
for i := range control.ScanningScope.Matches {
|
||||
if isScanningScopeMatchToControlScope(scanScopeMatches, control.ScanningScope.Matches[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isFrameworkFitToScanScope(framework reporthandling.Framework, scanScopeMatches reporthandling.ScanningScopeType) bool {
|
||||
// for backward compatibility - case: kubescape with scope(new one) and regolibrary without scope(old one)
|
||||
if framework.ScanningScope == nil {
|
||||
return true
|
||||
}
|
||||
if len(framework.ScanningScope.Matches) == 0 {
|
||||
return true
|
||||
}
|
||||
for i := range framework.ScanningScope.Matches {
|
||||
if isScanningScopeMatchToControlScope(scanScopeMatches, framework.ScanningScope.Matches[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
104
core/cautils/datastructuresmethods_test.go
Normal file
104
core/cautils/datastructuresmethods_test.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestIsControlFitToScanScope(t *testing.T) {
|
||||
tests := []struct {
|
||||
scanInfo *ScanInfo
|
||||
Control reporthandling.Control
|
||||
expected_res bool
|
||||
}{
|
||||
{
|
||||
scanInfo: &ScanInfo{
|
||||
InputPatterns: []string{
|
||||
"./testdata/any_file_for_test.json",
|
||||
},
|
||||
},
|
||||
Control: reporthandling.Control{
|
||||
ScanningScope: &reporthandling.ScanningScope{
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeFile,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected_res: true,
|
||||
},
|
||||
{
|
||||
scanInfo: &ScanInfo{
|
||||
InputPatterns: []string{
|
||||
"./testdata/any_file_for_test.json",
|
||||
},
|
||||
},
|
||||
Control: reporthandling.Control{
|
||||
ScanningScope: &reporthandling.ScanningScope{
|
||||
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCluster,
|
||||
reporthandling.ScopeFile,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected_res: true,
|
||||
},
|
||||
{
|
||||
scanInfo: &ScanInfo{},
|
||||
Control: reporthandling.Control{
|
||||
ScanningScope: &reporthandling.ScanningScope{
|
||||
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected_res: true,
|
||||
},
|
||||
{
|
||||
scanInfo: &ScanInfo{
|
||||
InputPatterns: []string{
|
||||
"./testdata/any_file_for_test.json",
|
||||
},
|
||||
},
|
||||
Control: reporthandling.Control{
|
||||
ScanningScope: &reporthandling.ScanningScope{
|
||||
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCloudGKE,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected_res: false,
|
||||
},
|
||||
{
|
||||
scanInfo: &ScanInfo{},
|
||||
Control: reporthandling.Control{
|
||||
ScanningScope: &reporthandling.ScanningScope{
|
||||
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCloudEKS,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected_res: false,
|
||||
},
|
||||
{
|
||||
scanInfo: &ScanInfo{},
|
||||
Control: reporthandling.Control{
|
||||
ScanningScope: &reporthandling.ScanningScope{
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCloud,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected_res: false,
|
||||
}}
|
||||
for i := range tests {
|
||||
assert.Equal(t, isControlFitToScanScope(tests[i].Control, GetScanningScope(tests[i].scanInfo)), tests[i].expected_res, fmt.Sprintf("tests_true index %d", i))
|
||||
}
|
||||
}
|
||||
@@ -1,25 +1,54 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
spinnerpkg "github.com/briandowns/spinner"
|
||||
"github.com/fatih/color"
|
||||
"github.com/jwalton/gchalk"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/schollz/progressbar/v3"
|
||||
)
|
||||
|
||||
var FailureDisplay = color.New(color.Bold, color.FgHiRed).FprintfFunc()
|
||||
var WarningDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
|
||||
var FailureTextDisplay = color.New(color.Faint, color.FgHiRed).FprintfFunc()
|
||||
var InfoDisplay = color.New(color.Bold, color.FgCyan).FprintfFunc()
|
||||
var InfoTextDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
|
||||
var SimpleDisplay = color.New().FprintfFunc()
|
||||
var SuccessDisplay = color.New(color.Bold, color.FgHiGreen).FprintfFunc()
|
||||
var DescriptionDisplay = color.New(color.Faint, color.FgWhite).FprintfFunc()
|
||||
func FailureDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithBrightRed().Bold(format), a...)
|
||||
}
|
||||
|
||||
func WarningDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithBrightYellow().Bold(format), a...)
|
||||
}
|
||||
|
||||
func FailureTextDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithBrightRed().Dim(format), a...)
|
||||
}
|
||||
|
||||
func InfoDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithCyan().Bold(format), a...)
|
||||
}
|
||||
|
||||
func InfoTextDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithBrightYellow().Bold(format), a...)
|
||||
}
|
||||
|
||||
func SimpleDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.White(format), a...)
|
||||
}
|
||||
|
||||
func SuccessDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithBlue().Bold(format), a...)
|
||||
}
|
||||
|
||||
func DescriptionDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithWhite().Dim(format), a...)
|
||||
}
|
||||
|
||||
func BoldDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.Bold(format), a...)
|
||||
}
|
||||
|
||||
var spinner *spinnerpkg.Spinner
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
@@ -31,8 +32,13 @@ const (
|
||||
JSON_FILE_FORMAT FileFormat = "json"
|
||||
)
|
||||
|
||||
type Chart struct {
|
||||
Name string
|
||||
Path string
|
||||
}
|
||||
|
||||
// LoadResourcesFromHelmCharts scans a given path (recursively) for helm charts, renders the templates and returns a map of workloads and a map of chart names
|
||||
func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[string][]workloadinterface.IMetadata, map[string]string) {
|
||||
func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[string][]workloadinterface.IMetadata, map[string]Chart) {
|
||||
directories, _ := listDirs(basePath)
|
||||
helmDirectories := make([]string, 0)
|
||||
for _, dir := range directories {
|
||||
@@ -42,7 +48,7 @@ func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[stri
|
||||
}
|
||||
|
||||
sourceToWorkloads := map[string][]workloadinterface.IMetadata{}
|
||||
sourceToChartName := map[string]string{}
|
||||
sourceToChart := make(map[string]Chart, 0)
|
||||
for _, helmDir := range helmDirectories {
|
||||
chart, err := NewHelmChart(helmDir)
|
||||
if err == nil {
|
||||
@@ -55,11 +61,14 @@ func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[stri
|
||||
chartName := chart.GetName()
|
||||
for k, v := range wls {
|
||||
sourceToWorkloads[k] = v
|
||||
sourceToChartName[k] = chartName
|
||||
sourceToChart[k] = Chart{
|
||||
Name: chartName,
|
||||
Path: helmDir,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return sourceToWorkloads, sourceToChartName
|
||||
return sourceToWorkloads, sourceToChart
|
||||
}
|
||||
|
||||
// If the contents at given path is a Kustomize Directory, LoadResourcesFromKustomizeDirectory will
|
||||
@@ -284,11 +293,11 @@ func convertYamlToJson(i interface{}) interface{} {
|
||||
}
|
||||
|
||||
func IsYaml(filePath string) bool {
|
||||
return StringInSlice(YAML_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
|
||||
return slices.Contains(YAML_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", ""))
|
||||
}
|
||||
|
||||
func IsJson(filePath string) bool {
|
||||
return StringInSlice(JSON_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
|
||||
return slices.Contains(JSON_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", ""))
|
||||
}
|
||||
|
||||
func glob(root, pattern string, onlyDirectories bool) ([]string, error) {
|
||||
|
||||
@@ -53,7 +53,8 @@ func TestLoadResourcesFromHelmCharts(t *testing.T) {
|
||||
|
||||
w := workloads[0]
|
||||
assert.True(t, localworkload.IsTypeLocalWorkload(w.GetObject()), "Expected localworkload as object type")
|
||||
assert.Equal(t, "kubescape", sourceToChartName[file])
|
||||
assert.Equal(t, "kubescape", sourceToChartName[file].Name)
|
||||
assert.Equal(t, helmChartPath(), sourceToChartName[file].Path)
|
||||
|
||||
switch filepath.Base(file) {
|
||||
case "serviceaccount.yaml":
|
||||
|
||||
@@ -78,12 +78,12 @@ func (drp *DownloadReleasedPolicy) ListControls() ([]string, error) {
|
||||
}
|
||||
var controlsFrameworksList [][]string
|
||||
for _, control := range controls {
|
||||
controlsFrameworksList = append(controlsFrameworksList, control.FrameworkNames)
|
||||
controlsFrameworksList = append(controlsFrameworksList, drp.gs.GetOpaFrameworkListByControlID(control.ControlID))
|
||||
}
|
||||
controlsNamesWithIDsandFrameworksList := make([]string, len(controlsIDsList))
|
||||
// by design all slices have the same lengt
|
||||
for i := range controlsIDsList {
|
||||
controlsNamesWithIDsandFrameworksList[i] = fmt.Sprintf("%v|%v|%v", controlsIDsList[i], controlsNamesList[i], strings.Join(controlsFrameworksList[i], ","))
|
||||
controlsNamesWithIDsandFrameworksList[i] = fmt.Sprintf("%v|%v|%v", controlsIDsList[i], controlsNamesList[i], strings.Join(controlsFrameworksList[i], ", "))
|
||||
}
|
||||
return controlsNamesWithIDsandFrameworksList, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/armoapi-go/identifiers"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/kubescape/kubescape/v2/internal/testutils"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
@@ -152,7 +153,7 @@ func mockExceptions() []armotypes.PostureExceptionPolicy {
|
||||
Actions: []armotypes.PostureExceptionPolicyActions{
|
||||
"alertOnly",
|
||||
},
|
||||
Resources: []armotypes.PortalDesignator{
|
||||
Resources: []identifiers.PortalDesignator{
|
||||
{
|
||||
DesignatorType: "Attributes",
|
||||
Attributes: map[string]string{
|
||||
@@ -187,7 +188,7 @@ func mockExceptions() []armotypes.PostureExceptionPolicy {
|
||||
Actions: []armotypes.PostureExceptionPolicyActions{
|
||||
"alertOnly",
|
||||
},
|
||||
Resources: []armotypes.PortalDesignator{
|
||||
Resources: []identifiers.PortalDesignator{
|
||||
{
|
||||
DesignatorType: "Attributes",
|
||||
Attributes: map[string]string{
|
||||
@@ -237,7 +238,7 @@ func mockCustomerConfig(cluster, scope string) func() *armotypes.CustomerConfig
|
||||
Attributes: map[string]interface{}{
|
||||
"label": "value",
|
||||
},
|
||||
Scope: armotypes.PortalDesignator{
|
||||
Scope: identifiers.PortalDesignator{
|
||||
DesignatorType: "Attributes",
|
||||
Attributes: map[string]string{
|
||||
"kind": "Cluster",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
1
core/cautils/getter/testdata/MITRE.json
vendored
1
core/cautils/getter/testdata/MITRE.json
vendored
@@ -6,6 +6,7 @@
|
||||
},
|
||||
"creationTime": "",
|
||||
"description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png",
|
||||
"typeTags": ["compliance"],
|
||||
"controls": [
|
||||
{
|
||||
"guid": "",
|
||||
|
||||
1
core/cautils/getter/testdata/NSA.json
vendored
1
core/cautils/getter/testdata/NSA.json
vendored
@@ -6,6 +6,7 @@
|
||||
},
|
||||
"creationTime": "",
|
||||
"description": "Implement NSA security advices for K8s ",
|
||||
"typeTags": ["compliance"],
|
||||
"controls": [
|
||||
{
|
||||
"guid": "",
|
||||
|
||||
4
core/cautils/getter/testdata/policy.json
vendored
4
core/cautils/getter/testdata/policy.json
vendored
@@ -25789,7 +25789,7 @@
|
||||
},
|
||||
{
|
||||
"guid": "",
|
||||
"name": "exclude-kubescape-host-scanner-resources",
|
||||
"name": "exclude-host-scanner-resources",
|
||||
"attributes": {
|
||||
"systemException": true
|
||||
},
|
||||
@@ -25804,7 +25804,7 @@
|
||||
"attributes": {
|
||||
"kind": "DaemonSet",
|
||||
"name": "host-scanner",
|
||||
"namespace": "kubescape-host-scanner"
|
||||
"namespace": "kubescape"
|
||||
}
|
||||
}
|
||||
],
|
||||
|
||||
@@ -67,6 +67,12 @@ func errAuth(resp *http.Response) error {
|
||||
}
|
||||
|
||||
func readString(rdr io.Reader, sizeHint int64) (string, error) {
|
||||
|
||||
// if the response is empty, return an empty string
|
||||
if sizeHint < 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
|
||||
b.Grow(int(sizeHint))
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -43,3 +44,56 @@ func TestIsNativeFramework(t *testing.T) {
|
||||
require.Truef(t, isNativeFramework("nSa"), "expected nsa to be native (case insensitive)")
|
||||
require.Falsef(t, isNativeFramework("foo"), "expected framework to be custom")
|
||||
}
|
||||
|
||||
func Test_readString(t *testing.T) {
|
||||
type args struct {
|
||||
rdr io.Reader
|
||||
sizeHint int64
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "should return empty string if sizeHint is negative",
|
||||
args: args{
|
||||
rdr: nil,
|
||||
sizeHint: -1,
|
||||
},
|
||||
want: "",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "should return empty string if sizeHint is zero",
|
||||
args: args{
|
||||
rdr: &io.LimitedReader{},
|
||||
sizeHint: 0,
|
||||
},
|
||||
want: "",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "should return empty string if sizeHint is positive",
|
||||
args: args{
|
||||
rdr: &io.LimitedReader{},
|
||||
sizeHint: 1,
|
||||
},
|
||||
want: "",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := readString(tt.args.rdr, tt.args.sizeHint)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("readString() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("readString() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package cautils
|
||||
import (
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
helpersv1 "github.com/kubescape/opa-utils/reporthandling/helpers/v1"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
@@ -54,10 +55,8 @@ func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, con
|
||||
crv1.Remediation = crv2.Remediation
|
||||
|
||||
rulesv1 := map[string]reporthandling.RuleReport{}
|
||||
|
||||
iter := crv2.ListResourcesIDs().All()
|
||||
for iter.HasNext() {
|
||||
resourceID := iter.Next()
|
||||
l := helpersv1.GetAllListsFromPool()
|
||||
for resourceID := range crv2.ListResourcesIDs(l).All() {
|
||||
if result, ok := opaSessionObj.ResourcesResult[resourceID]; ok {
|
||||
for _, rulev2 := range result.ListRulesOfControl(crv2.GetID(), "") {
|
||||
|
||||
@@ -104,6 +103,7 @@ func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, con
|
||||
}
|
||||
}
|
||||
}
|
||||
helpersv1.PutAllListsToPool(l)
|
||||
if len(rulesv1) > 0 {
|
||||
for i := range rulesv1 {
|
||||
crv1.RuleReports = append(crv1.RuleReports, rulesv1[i])
|
||||
|
||||
@@ -8,13 +8,13 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
giturl "github.com/kubescape/go-git-url"
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
|
||||
@@ -87,51 +87,59 @@ func (bpf *BoolPtrFlag) Set(val string) error {
|
||||
|
||||
// TODO - UPDATE
|
||||
type ViewTypes string
|
||||
type EnvScopeTypes string
|
||||
type ManageClusterTypes string
|
||||
|
||||
const (
|
||||
ResourceViewType ViewTypes = "resource"
|
||||
SecurityViewType ViewTypes = "security"
|
||||
ControlViewType ViewTypes = "control"
|
||||
)
|
||||
|
||||
type PolicyIdentifier struct {
|
||||
Identifier string // policy Identifier e.g. c-0012 for control, nsa,mitre for frameworks
|
||||
Kind apisv1.NotificationPolicyKind // policy kind e.g. Framework,Control,Rule
|
||||
Designators armotypes.PortalDesignator
|
||||
Identifier string // policy Identifier e.g. c-0012 for control, nsa,mitre for frameworks
|
||||
Kind apisv1.NotificationPolicyKind // policy kind e.g. Framework,Control,Rule
|
||||
}
|
||||
|
||||
type ScanInfo struct {
|
||||
Getters // TODO - remove from object
|
||||
PolicyIdentifier []PolicyIdentifier // TODO - remove from object
|
||||
UseExceptions string // Load file with exceptions configuration
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
AttackTracks string // Load file with attack tracks
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
|
||||
VerboseMode bool // Display all of the input resources and not only failed resources
|
||||
View string // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
FormatVersion string // Output object can be different between versions, this is for testing and backward compatibility
|
||||
CustomClusterName string // Set the custom name of the cluster
|
||||
ExcludedNamespaces string // used for host scanner namespace
|
||||
IncludeNamespaces string //
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold float32 // Failure score threshold
|
||||
FailThresholdSeverity string // Severity at and above which the command should fail
|
||||
Submit bool // Submit results to Kubescape Cloud BE
|
||||
CreateAccount bool // Create account in Kubescape Cloud BE if no account found in local cache
|
||||
ScanID string // Report id of the current scan
|
||||
HostSensorEnabled BoolPtrFlag // Deploy Kubescape K8s host scanner to collect data from certain controls
|
||||
HostSensorYamlPath string // Path to hostsensor file
|
||||
Local bool // Do not submit results
|
||||
Credentials Credentials // account ID
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
OmitRawResources bool // true if omit raw resources from the output
|
||||
PrintAttackTree bool // true if print attack tree
|
||||
Getters // TODO - remove from object
|
||||
PolicyIdentifier []PolicyIdentifier // TODO - remove from object
|
||||
UseExceptions string // Load file with exceptions configuration
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
AttackTracks string // Load file with attack tracks
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
|
||||
VerboseMode bool // Display all of the input resources and not only failed resources
|
||||
View string // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
FormatVersion string // Output object can be different between versions, this is for testing and backward compatibility
|
||||
CustomClusterName string // Set the custom name of the cluster
|
||||
ExcludedNamespaces string // used for host scanner namespace
|
||||
IncludeNamespaces string //
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold float32 // DEPRECATED - Failure score threshold
|
||||
ComplianceThreshold float32 // Compliance score threshold
|
||||
FailThresholdSeverity string // Severity at and above which the command should fail
|
||||
Submit bool // Submit results to Kubescape Cloud BE
|
||||
CreateAccount bool // Create account in Kubescape Cloud BE if no account found in local cache
|
||||
ScanID string // Report id of the current scan
|
||||
HostSensorEnabled BoolPtrFlag // Deploy Kubescape K8s host scanner to collect data from certain controls
|
||||
HostSensorYamlPath string // Path to hostsensor file
|
||||
Local bool // Do not submit results
|
||||
Credentials Credentials // account ID
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
OmitRawResources bool // true if omit raw resources from the output
|
||||
PrintAttackTree bool // true if print attack tree
|
||||
ScanObject *objectsenvelopes.ScanObject // identifies a single resource (k8s object) to be scanned
|
||||
ScanType ScanTypes
|
||||
ScanImages bool
|
||||
ChartPath string
|
||||
FilePath string
|
||||
}
|
||||
|
||||
type Getters struct {
|
||||
@@ -203,6 +211,10 @@ func (scanInfo *ScanInfo) Formats() []string {
|
||||
}
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) SetScanType(scanType ScanTypes) {
|
||||
scanInfo.ScanType = scanType
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind apisv1.NotificationPolicyKind) {
|
||||
for _, policy := range policies {
|
||||
if !scanInfo.contains(policy) {
|
||||
@@ -250,6 +262,7 @@ func scanInfoToScanMetadata(ctx context.Context, scanInfo *ScanInfo) *reporthand
|
||||
metadata.ScanMetadata.KubescapeVersion = BuildNumber
|
||||
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
|
||||
metadata.ScanMetadata.FailThreshold = scanInfo.FailThreshold
|
||||
metadata.ScanMetadata.ComplianceThreshold = scanInfo.ComplianceThreshold
|
||||
metadata.ScanMetadata.HostScanner = scanInfo.HostSensorEnabled.GetBool()
|
||||
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
|
||||
metadata.ScanMetadata.ControlsInputs = scanInfo.ControlsInputs
|
||||
@@ -339,11 +352,30 @@ func setContextMetadata(ctx context.Context, contextMetadata *reporthandlingv2.C
|
||||
BasePath: getAbsPath(input),
|
||||
HostName: getHostname(),
|
||||
}
|
||||
// add repo context for submitting
|
||||
contextMetadata.RepoContextMetadata = &reporthandlingv2.RepoContextMetadata{
|
||||
Provider: "none",
|
||||
Repo: fmt.Sprintf("path@%s", getAbsPath(input)),
|
||||
Owner: getHostname(),
|
||||
Branch: "none",
|
||||
DefaultBranch: "none",
|
||||
LocalRootPath: getAbsPath(input),
|
||||
}
|
||||
|
||||
case ContextFile:
|
||||
contextMetadata.FileContextMetadata = &reporthandlingv2.FileContextMetadata{
|
||||
FilePath: getAbsPath(input),
|
||||
HostName: getHostname(),
|
||||
}
|
||||
// add repo context for submitting
|
||||
contextMetadata.RepoContextMetadata = &reporthandlingv2.RepoContextMetadata{
|
||||
Provider: "none",
|
||||
Repo: fmt.Sprintf("file@%s", getAbsPath(input)),
|
||||
Owner: getHostname(),
|
||||
Branch: "none",
|
||||
DefaultBranch: "none",
|
||||
LocalRootPath: getAbsPath(input),
|
||||
}
|
||||
case ContextGitLocal:
|
||||
// local
|
||||
context, err := metadataGitLocal(input)
|
||||
|
||||
@@ -2,11 +2,12 @@ package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const ValueNotFound = -1
|
||||
|
||||
func ConvertLabelsToString(labels map[string]string) string {
|
||||
labelsStr := ""
|
||||
delimiter := ""
|
||||
@@ -34,11 +35,31 @@ func ConvertStringToLabels(labelsStr string) map[string]string {
|
||||
return labels
|
||||
}
|
||||
|
||||
func StringInSlice(strSlice []string, str string) int {
|
||||
for i := range strSlice {
|
||||
if strSlice[i] == str {
|
||||
return i
|
||||
func StringSlicesAreEqual(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
sort.Strings(a)
|
||||
sort.Strings(b)
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return ValueNotFound
|
||||
return true
|
||||
}
|
||||
|
||||
func ParseIntEnvVar(varName string, defaultValue int) (int, error) {
|
||||
varValue, exists := os.LookupEnv(varName)
|
||||
if !exists {
|
||||
return defaultValue, nil
|
||||
}
|
||||
|
||||
intValue, err := strconv.Atoi(varValue)
|
||||
if err != nil {
|
||||
return defaultValue, fmt.Errorf("failed to parse %s env var as int: %w", varName, err)
|
||||
}
|
||||
|
||||
return intValue, nil
|
||||
}
|
||||
|
||||
@@ -2,8 +2,11 @@ package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConvertLabelsToString(t *testing.T) {
|
||||
@@ -33,3 +36,102 @@ func TestConvertStringToLabels(t *testing.T) {
|
||||
t.Errorf("%s != %s", fmt.Sprintf("%v", rstrMap), fmt.Sprintf("%v", strMap))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseIntEnvVar(t *testing.T) {
|
||||
testCases := []struct {
|
||||
expectedErr string
|
||||
name string
|
||||
varName string
|
||||
varValue string
|
||||
defaultValue int
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
name: "Variable does not exist",
|
||||
varName: "DOES_NOT_EXIST",
|
||||
varValue: "",
|
||||
defaultValue: 123,
|
||||
expected: 123,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
name: "Variable exists and is a valid integer",
|
||||
varName: "MY_VAR",
|
||||
varValue: "456",
|
||||
defaultValue: 123,
|
||||
expected: 456,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
name: "Variable exists but is not a valid integer",
|
||||
varName: "MY_VAR",
|
||||
varValue: "not_an_integer",
|
||||
defaultValue: 123,
|
||||
expected: 123,
|
||||
expectedErr: "failed to parse MY_VAR env var as int",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.varValue != "" {
|
||||
os.Setenv(tc.varName, tc.varValue)
|
||||
} else {
|
||||
os.Unsetenv(tc.varName)
|
||||
}
|
||||
|
||||
actual, err := ParseIntEnvVar(tc.varName, tc.defaultValue)
|
||||
if tc.expectedErr != "" {
|
||||
assert.NotNil(t, err)
|
||||
assert.ErrorContains(t, err, tc.expectedErr)
|
||||
} else {
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
assert.Equalf(t, tc.expected, actual, "unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringSlicesAreEqual(t *testing.T) {
|
||||
tt := []struct {
|
||||
name string
|
||||
a []string
|
||||
b []string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "equal unsorted slices",
|
||||
a: []string{"foo", "bar", "baz"},
|
||||
b: []string{"baz", "foo", "bar"},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "equal sorted slices",
|
||||
a: []string{"bar", "baz", "foo"},
|
||||
b: []string{"bar", "baz", "foo"},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "unequal slices",
|
||||
a: []string{"foo", "bar", "baz"},
|
||||
b: []string{"foo", "bar", "qux"},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "different length slices",
|
||||
a: []string{"foo", "bar", "baz"},
|
||||
b: []string{"foo", "bar"},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := StringSlicesAreEqual(tc.a, tc.b)
|
||||
if got != tc.want {
|
||||
t.Errorf("StringSlicesAreEqual(%v, %v) = %v; want %v", tc.a, tc.b, got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
0
core/cautils/testdata/any_file_for_test.json
vendored
Normal file
0
core/cautils/testdata/any_file_for_test.json
vendored
Normal file
@@ -31,7 +31,7 @@ type IVersionCheckHandler interface {
|
||||
|
||||
func NewIVersionCheckHandler(ctx context.Context) IVersionCheckHandler {
|
||||
if BuildNumber == "" {
|
||||
logger.L().Ctx(ctx).Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
|
||||
logger.L().Ctx(ctx).Warning("Unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
|
||||
}
|
||||
|
||||
if v, ok := os.LookupEnv(CLIENT_ENV); ok && v != "" {
|
||||
|
||||
@@ -32,9 +32,9 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func MapKSResource(ksResourceMap *KSResources, resources []string) []string {
|
||||
func MapExternalResource(externalResourceMap ExternalResources, resources []string) []string {
|
||||
var hostResources []string
|
||||
for k := range *ksResourceMap {
|
||||
for k := range externalResourceMap {
|
||||
for _, resource := range resources {
|
||||
if strings.Contains(k, resource) {
|
||||
hostResources = append(hostResources, k)
|
||||
@@ -44,16 +44,16 @@ func MapKSResource(ksResourceMap *KSResources, resources []string) []string {
|
||||
return hostResources
|
||||
}
|
||||
|
||||
func MapHostResources(ksResourceMap *KSResources) []string {
|
||||
return MapKSResource(ksResourceMap, HostSensorResources)
|
||||
func MapHostResources(externalResourceMap ExternalResources) []string {
|
||||
return MapExternalResource(externalResourceMap, HostSensorResources)
|
||||
}
|
||||
|
||||
func MapImageVulnResources(ksResourceMap *KSResources) []string {
|
||||
return MapKSResource(ksResourceMap, ImageVulnResources)
|
||||
func MapImageVulnResources(externalResourceMap ExternalResources) []string {
|
||||
return MapExternalResource(externalResourceMap, ImageVulnResources)
|
||||
}
|
||||
|
||||
func MapCloudResources(ksResourceMap *KSResources) []string {
|
||||
return MapKSResource(ksResourceMap, CloudResources)
|
||||
func MapCloudResources(externalResourceMap ExternalResources) []string {
|
||||
return MapExternalResource(externalResourceMap, CloudResources)
|
||||
}
|
||||
|
||||
func SetInfoMapForResources(info string, resources []string, errorMap map[string]apis.StatusInfo) {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
func (ks *Kubescape) SetCachedConfig(setConfig *metav1.SetConfig) error {
|
||||
|
||||
tenant := getTenantConfig(nil, "", "", getKubernetesApi())
|
||||
tenant := getTenantConfig(nil, "", "", nil)
|
||||
|
||||
if setConfig.Account != "" {
|
||||
tenant.GetConfigObj().AccountID = setConfig.Account
|
||||
@@ -45,6 +45,6 @@ func (ks *Kubescape) ViewCachedConfig(viewConfig *metav1.ViewConfig) error {
|
||||
|
||||
func (ks *Kubescape) DeleteCachedConfig(ctx context.Context, deleteConfig *metav1.DeleteConfig) error {
|
||||
|
||||
tenant := getTenantConfig(nil, "", "", getKubernetesApi()) // change k8sinterface
|
||||
tenant := getTenantConfig(nil, "", "", nil) // change k8sinterface
|
||||
return tenant.DeleteCachedConfig(ctx)
|
||||
}
|
||||
|
||||
@@ -65,13 +65,13 @@ func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.Kubern
|
||||
return nil
|
||||
}
|
||||
|
||||
func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan bool, scanningContext cautils.ScanningContext) reporter.IReport {
|
||||
func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan bool, scanInfo cautils.ScanInfo) reporter.IReport {
|
||||
_, span := otel.Tracer("").Start(ctx, "getReporter")
|
||||
defer span.End()
|
||||
|
||||
if submit {
|
||||
submitData := reporterv2.SubmitContextScan
|
||||
if scanningContext != cautils.ContextCluster {
|
||||
if scanInfo.GetScanningContext() != cautils.ContextCluster {
|
||||
submitData = reporterv2.SubmitContextRepository
|
||||
}
|
||||
return reporterv2.NewReportEventReceiver(tenantConfig.GetConfigObj(), reportID, submitData)
|
||||
@@ -81,7 +81,8 @@ func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, report
|
||||
return reporterv2.NewReportMock("", "")
|
||||
}
|
||||
var message string
|
||||
if !fwScan {
|
||||
|
||||
if !fwScan && scanInfo.ScanType != cautils.ScanTypeWorkload {
|
||||
message = "Kubescape does not submit scan results when scanning controls"
|
||||
}
|
||||
|
||||
@@ -94,11 +95,12 @@ func getResourceHandler(ctx context.Context, scanInfo *cautils.ScanInfo, tenantC
|
||||
|
||||
if len(scanInfo.InputPatterns) > 0 || k8s == nil {
|
||||
// scanInfo.HostSensor.SetBool(false)
|
||||
return resourcehandler.NewFileResourceHandler(ctx, scanInfo.InputPatterns, registryAdaptors)
|
||||
return resourcehandler.NewFileResourceHandler()
|
||||
}
|
||||
|
||||
getter.GetKSCloudAPIConnector()
|
||||
rbacObjects := getRBACHandler(tenantConfig, k8s, scanInfo.Submit)
|
||||
return resourcehandler.NewK8sResourceHandler(k8s, getFieldSelector(scanInfo), hostSensorHandler, rbacObjects, registryAdaptors)
|
||||
return resourcehandler.NewK8sResourceHandler(k8s, hostSensorHandler, rbacObjects, registryAdaptors)
|
||||
}
|
||||
|
||||
// getHostSensorHandler yields a IHostSensor that knows how to collect a host's scanned resources.
|
||||
@@ -133,17 +135,6 @@ func getHostSensorHandler(ctx context.Context, scanInfo *cautils.ScanInfo, k8s *
|
||||
}
|
||||
}
|
||||
|
||||
func getFieldSelector(scanInfo *cautils.ScanInfo) resourcehandler.IFieldSelector {
|
||||
if scanInfo.IncludeNamespaces != "" {
|
||||
return resourcehandler.NewIncludeSelector(scanInfo.IncludeNamespaces)
|
||||
}
|
||||
if scanInfo.ExcludedNamespaces != "" {
|
||||
return resourcehandler.NewExcludeSelector(scanInfo.ExcludedNamespaces)
|
||||
}
|
||||
|
||||
return &resourcehandler.EmptySelector{}
|
||||
}
|
||||
|
||||
func policyIdentifierIdentities(pi []cautils.PolicyIdentifier) string {
|
||||
policiesIdentities := ""
|
||||
for i := range pi {
|
||||
@@ -183,13 +174,13 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
|
||||
return
|
||||
}
|
||||
|
||||
scanningContext := scanInfo.GetScanningContext()
|
||||
if scanningContext == cautils.ContextFile || scanningContext == cautils.ContextDir {
|
||||
if scanInfo.Local {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
|
||||
if scanInfo.Local {
|
||||
// do not submit single resource scan to BE
|
||||
if scanInfo.ScanObject != nil {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
@@ -286,12 +277,12 @@ func getAttackTracksGetter(ctx context.Context, attackTracks, accountID string,
|
||||
}
|
||||
|
||||
// getUIPrinter returns a printer that will be used to print to the program’s UI (terminal)
|
||||
func getUIPrinter(ctx context.Context, verboseMode bool, formatVersion string, attackTree bool, viewType cautils.ViewTypes) printer.IPrinter {
|
||||
func GetUIPrinter(ctx context.Context, scanInfo *cautils.ScanInfo) printer.IPrinter {
|
||||
var p printer.IPrinter
|
||||
if helpers.ToLevel(logger.L().GetLevel()) >= helpers.WarningLevel {
|
||||
p = &printerv2.SilentPrinter{}
|
||||
} else {
|
||||
p = printerv2.NewPrettyPrinter(verboseMode, formatVersion, attackTree, viewType)
|
||||
p = printerv2.NewPrettyPrinter(scanInfo.VerboseMode, scanInfo.FormatVersion, scanInfo.PrintAttackTree, cautils.ViewTypes(scanInfo.View), scanInfo.ScanType, scanInfo.InputPatterns)
|
||||
|
||||
// Since the UI of the program is a CLI (Stdout), it means that it should always print to Stdout
|
||||
p.SetWriter(ctx, os.Stdout.Name())
|
||||
|
||||
@@ -81,7 +81,14 @@ func Test_getUIPrinter(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
logger.L().SetLevel(tt.args.loggerLevel.String())
|
||||
got := getUIPrinter(tt.args.ctx, tt.args.verboseMode, tt.args.formatVersion, tt.args.printAttack, tt.args.viewType)
|
||||
scanInfo := &cautils.ScanInfo{
|
||||
FormatVersion: tt.args.formatVersion,
|
||||
VerboseMode: tt.args.verboseMode,
|
||||
PrintAttackTree: tt.args.printAttack,
|
||||
View: string(tt.args.viewType),
|
||||
}
|
||||
|
||||
got := GetUIPrinter(tt.args.ctx, scanInfo)
|
||||
|
||||
assert.Equal(t, tt.want.structType, reflect.TypeOf(got).String())
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
v2 "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/utils"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
@@ -87,19 +88,27 @@ func prettyPrintListFormat(ctx context.Context, targetPolicy string, policies []
|
||||
return
|
||||
}
|
||||
|
||||
header := fmt.Sprintf("Supported %s", targetPolicy)
|
||||
|
||||
policyTable := tablewriter.NewWriter(printer.GetWriter(ctx, ""))
|
||||
|
||||
policyTable.SetAutoWrapText(true)
|
||||
header := fmt.Sprintf("Supported %s", targetPolicy)
|
||||
policyTable.SetHeader([]string{header})
|
||||
policyTable.SetHeaderLine(true)
|
||||
policyTable.SetRowLine(true)
|
||||
policyTable.SetAlignment(tablewriter.ALIGN_CENTER)
|
||||
policyTable.SetUnicodeHV(tablewriter.Regular, tablewriter.Regular)
|
||||
data := v2.Matrix{}
|
||||
|
||||
controlRows := generatePolicyRows(policies)
|
||||
|
||||
var headerColors []tablewriter.Colors
|
||||
for range controlRows[0] {
|
||||
headerColors = append(headerColors, tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiYellowColor})
|
||||
}
|
||||
policyTable.SetHeaderColor(headerColors...)
|
||||
|
||||
data = append(data, controlRows...)
|
||||
|
||||
policyTable.SetAlignment(tablewriter.ALIGN_CENTER)
|
||||
policyTable.AppendBulk(data)
|
||||
policyTable.Render()
|
||||
}
|
||||
@@ -112,13 +121,29 @@ func jsonListFormat(_ context.Context, _ string, policies []string) {
|
||||
|
||||
func prettyPrintControls(ctx context.Context, policies []string) {
|
||||
controlsTable := tablewriter.NewWriter(printer.GetWriter(ctx, ""))
|
||||
controlsTable.SetAutoWrapText(true)
|
||||
controlsTable.SetHeader([]string{"Control ID", "Control Name", "Docs", "Frameworks"})
|
||||
|
||||
controlsTable.SetAutoWrapText(false)
|
||||
controlsTable.SetHeaderLine(true)
|
||||
controlsTable.SetRowLine(true)
|
||||
data := v2.Matrix{}
|
||||
controlsTable.SetUnicodeHV(tablewriter.Regular, tablewriter.Regular)
|
||||
|
||||
controlRows := generateControlRows(policies)
|
||||
|
||||
short := utils.CheckShortTerminalWidth(controlRows, []string{"Control ID", "Control Name", "Docs", "Frameworks"})
|
||||
if short {
|
||||
controlsTable.SetAutoWrapText(false)
|
||||
controlsTable.SetHeader([]string{"Controls"})
|
||||
controlRows = shortFormatControlRows(controlRows)
|
||||
} else {
|
||||
controlsTable.SetHeader([]string{"Control ID", "Control Name", "Docs", "Frameworks"})
|
||||
}
|
||||
var headerColors []tablewriter.Colors
|
||||
for range controlRows[0] {
|
||||
headerColors = append(headerColors, tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiYellowColor})
|
||||
}
|
||||
controlsTable.SetHeaderColor(headerColors...)
|
||||
|
||||
data := v2.Matrix{}
|
||||
data = append(data, controlRows...)
|
||||
|
||||
controlsTable.AppendBulk(data)
|
||||
@@ -134,7 +159,7 @@ func generateControlRows(policies []string) [][]string {
|
||||
|
||||
docs := cautils.GetControlLink(id)
|
||||
|
||||
currentRow := []string{id, control, docs, framework}
|
||||
currentRow := []string{id, control, docs, strings.Replace(framework, " ", "\n", -1)}
|
||||
|
||||
rows = append(rows, currentRow)
|
||||
}
|
||||
@@ -151,3 +176,11 @@ func generatePolicyRows(policies []string) [][]string {
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func shortFormatControlRows(controlRows [][]string) [][]string {
|
||||
rows := [][]string{}
|
||||
for _, controlRow := range controlRows {
|
||||
rows = append(rows, []string{fmt.Sprintf("Control ID"+strings.Repeat(" ", 3)+": %+v\nControl Name"+strings.Repeat(" ", 1)+": %+v\nDocs"+strings.Repeat(" ", 9)+": %+v\nFrameworks"+strings.Repeat(" ", 3)+": %+v", controlRow[0], controlRow[1], controlRow[2], strings.Replace(controlRow[3], "\n", " ", -1))})
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
@@ -6,7 +6,9 @@ import (
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/go-logger/iconlogger"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/hostsensorutils"
|
||||
@@ -17,8 +19,10 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
"github.com/kubescape/kubescape/v2/pkg/imagescan"
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"go.opentelemetry.io/otel"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/kubescape/opa-utils/resources"
|
||||
)
|
||||
@@ -27,9 +31,9 @@ type componentInterfaces struct {
|
||||
tenantConfig cautils.ITenantConfig
|
||||
resourceHandler resourcehandler.IResourceHandler
|
||||
report reporter.IReport
|
||||
outputPrinters []printer.IPrinter
|
||||
uiPrinter printer.IPrinter
|
||||
hostSensorHandler hostsensorutils.IHostSensor
|
||||
outputPrinters []printer.IPrinter
|
||||
}
|
||||
|
||||
func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
@@ -80,10 +84,7 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
|
||||
|
||||
// ================== setup registry adaptors ======================================
|
||||
|
||||
registryAdaptors, err := resourcehandler.NewRegistryAdaptors()
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Error("failed to initialize registry adaptors", helpers.Error(err))
|
||||
}
|
||||
registryAdaptors, _ := resourcehandler.NewRegistryAdaptors()
|
||||
|
||||
// ================== setup resource collector object ======================================
|
||||
|
||||
@@ -92,19 +93,12 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
|
||||
// ================== setup reporter & printer objects ======================================
|
||||
|
||||
// reporting behavior - setup reporter
|
||||
reportHandler := getReporter(ctx, tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan, scanInfo.GetScanningContext())
|
||||
reportHandler := getReporter(ctx, tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan, *scanInfo)
|
||||
|
||||
// setup printers
|
||||
formats := scanInfo.Formats()
|
||||
outputPrinters := GetOutputPrinters(scanInfo, ctx)
|
||||
|
||||
outputPrinters := make([]printer.IPrinter, 0)
|
||||
for _, format := range formats {
|
||||
printerHandler := resultshandling.NewPrinter(ctx, format, scanInfo.FormatVersion, scanInfo.PrintAttackTree, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
|
||||
printerHandler.SetWriter(ctx, scanInfo.Output)
|
||||
outputPrinters = append(outputPrinters, printerHandler)
|
||||
}
|
||||
|
||||
uiPrinter := getUIPrinter(ctx, scanInfo.VerboseMode, scanInfo.FormatVersion, scanInfo.PrintAttackTree, cautils.ViewTypes(scanInfo.View))
|
||||
uiPrinter := GetUIPrinter(ctx, scanInfo)
|
||||
|
||||
// ================== return interface ======================================
|
||||
|
||||
@@ -118,10 +112,22 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
|
||||
}
|
||||
}
|
||||
|
||||
func GetOutputPrinters(scanInfo *cautils.ScanInfo, ctx context.Context) []printer.IPrinter {
|
||||
formats := scanInfo.Formats()
|
||||
|
||||
outputPrinters := make([]printer.IPrinter, 0)
|
||||
for _, format := range formats {
|
||||
printerHandler := resultshandling.NewPrinter(ctx, format, scanInfo.FormatVersion, scanInfo.PrintAttackTree, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
|
||||
printerHandler.SetWriter(ctx, scanInfo.Output)
|
||||
outputPrinters = append(outputPrinters, printerHandler)
|
||||
}
|
||||
return outputPrinters
|
||||
}
|
||||
|
||||
func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*resultshandling.ResultsHandler, error) {
|
||||
ctxInit, spanInit := otel.Tracer("").Start(ctx, "initialization")
|
||||
|
||||
logger.L().Info("Kubescape scanner starting")
|
||||
logger.InitLogger(iconlogger.LoggerName)
|
||||
logger.L().Start("Kubescape scanner initializing")
|
||||
|
||||
// ===================== Initialization =====================
|
||||
scanInfo.Init(ctxInit) // initialize scan info
|
||||
@@ -149,21 +155,32 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
|
||||
// remove host scanner components
|
||||
defer func() {
|
||||
if err := interfaces.hostSensorHandler.TearDown(); err != nil {
|
||||
logger.L().Ctx(ctx).Error("failed to tear down host scanner", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).StopError("Failed to tear down host scanner", helpers.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
logger.L().StopSuccess("Initialized scanner")
|
||||
|
||||
resultsHandling := resultshandling.NewResultsHandler(interfaces.report, interfaces.outputPrinters, interfaces.uiPrinter)
|
||||
|
||||
// ===================== policies & resources =====================
|
||||
ctxPolicies, spanPolicies := otel.Tracer("").Start(ctxInit, "policies & resources")
|
||||
policyHandler := policyhandler.NewPolicyHandler(interfaces.resourceHandler)
|
||||
scanData, err := policyHandler.CollectResources(ctxPolicies, scanInfo.PolicyIdentifier, scanInfo, cautils.NewProgressHandler(""))
|
||||
// ===================== policies =====================
|
||||
ctxPolicies, spanPolicies := otel.Tracer("").Start(ctxInit, "policies")
|
||||
policyHandler := policyhandler.NewPolicyHandler()
|
||||
scanData, err := policyHandler.CollectPolicies(ctxPolicies, scanInfo.PolicyIdentifier, scanInfo)
|
||||
if err != nil {
|
||||
spanInit.End()
|
||||
return resultsHandling, err
|
||||
}
|
||||
spanPolicies.End()
|
||||
|
||||
// ===================== resources =====================
|
||||
ctxResources, spanResources := otel.Tracer("").Start(ctxInit, "resources")
|
||||
err = resourcehandler.CollectResources(ctxResources, interfaces.resourceHandler, scanInfo.PolicyIdentifier, scanData, cautils.NewProgressHandler(""), scanInfo)
|
||||
if err != nil {
|
||||
spanInit.End()
|
||||
return resultsHandling, err
|
||||
}
|
||||
spanResources.End()
|
||||
spanInit.End()
|
||||
|
||||
// ========================= opa testing =====================
|
||||
@@ -172,20 +189,28 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
|
||||
|
||||
deps := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), interfaces.tenantConfig.GetContextName())
|
||||
reportResults := opaprocessor.NewOPAProcessor(scanData, deps)
|
||||
if err := reportResults.ProcessRulesListener(ctxOpa, cautils.NewProgressHandler("")); err != nil {
|
||||
if err := reportResults.ProcessRulesListener(ctxOpa, cautils.NewProgressHandler(""), scanInfo); err != nil {
|
||||
// TODO - do something
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
// ======================== prioritization ===================
|
||||
_, spanPrioritization := otel.Tracer("").Start(ctxOpa, "prioritization")
|
||||
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(ctxOpa, scanInfo.Getters.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to get attack tracks, this may affect the scanning results", helpers.Error(err))
|
||||
} else if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
if scanInfo.PrintAttackTree || isPrioritizationScanType(scanInfo.ScanType) {
|
||||
_, spanPrioritization := otel.Tracer("").Start(ctxOpa, "prioritization")
|
||||
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(ctxOpa, scanInfo.Getters.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to get attack tracks, this may affect the scanning results", helpers.Error(err))
|
||||
} else if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
}
|
||||
if err == nil && isPrioritizationScanType(scanInfo.ScanType) {
|
||||
scanData.SetTopWorkloads()
|
||||
}
|
||||
spanPrioritization.End()
|
||||
}
|
||||
spanPrioritization.End()
|
||||
|
||||
if scanInfo.ScanImages {
|
||||
scanImages(scanInfo.ScanType, scanData, ctx, resultsHandling)
|
||||
}
|
||||
// ========================= results handling =====================
|
||||
resultsHandling.SetData(scanData)
|
||||
|
||||
@@ -195,3 +220,62 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
|
||||
|
||||
return resultsHandling, nil
|
||||
}
|
||||
|
||||
func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ctx context.Context, resultsHandling *resultshandling.ResultsHandler) {
|
||||
imagesToScan := []string{}
|
||||
|
||||
if scanType == cautils.ScanTypeWorkload {
|
||||
containers, err := workloadinterface.NewWorkloadObj(scanData.SingleResourceScan.GetObject()).GetContainers()
|
||||
if err != nil {
|
||||
logger.L().Error("failed to get containers", helpers.Error(err))
|
||||
return
|
||||
}
|
||||
for _, container := range containers {
|
||||
if !slices.Contains(imagesToScan, container.Image) {
|
||||
imagesToScan = append(imagesToScan, container.Image)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, workload := range scanData.AllResources {
|
||||
containers, err := workloadinterface.NewWorkloadObj(workload.GetObject()).GetContainers()
|
||||
if err != nil {
|
||||
logger.L().Error(fmt.Sprintf("failed to get containers for kind: %s, name: %s, namespace: %s", workload.GetKind(), workload.GetName(), workload.GetNamespace()), helpers.Error(err))
|
||||
continue
|
||||
}
|
||||
for _, container := range containers {
|
||||
if !slices.Contains(imagesToScan, container.Image) {
|
||||
imagesToScan = append(imagesToScan, container.Image)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dbCfg, _ := imagescan.NewDefaultDBConfig()
|
||||
svc := imagescan.NewScanService(dbCfg)
|
||||
|
||||
for _, img := range imagesToScan {
|
||||
logger.L().Start("Scanning", helpers.String("image", img))
|
||||
if err := scanSingleImage(ctx, img, svc, resultsHandling); err != nil {
|
||||
logger.L().StopError("failed to scan", helpers.String("image", img), helpers.Error(err))
|
||||
}
|
||||
logger.L().StopSuccess("Scanned successfully", helpers.String("image", img))
|
||||
}
|
||||
}
|
||||
|
||||
func scanSingleImage(ctx context.Context, img string, svc imagescan.Service, resultsHandling *resultshandling.ResultsHandler) error {
|
||||
|
||||
scanResults, err := svc.Scan(ctx, img, imagescan.RegistryCredentials{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resultsHandling.ImageScanData = append(resultsHandling.ImageScanData, cautils.ImageScanData{
|
||||
Image: img,
|
||||
PresenterConfig: scanResults,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func isPrioritizationScanType(scanType cautils.ScanTypes) bool {
|
||||
return scanType == cautils.ScanTypeCluster || scanType == cautils.ScanTypeRepo
|
||||
}
|
||||
|
||||
59
core/metrics/metrics.go
Normal file
59
core/metrics/metrics.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
)
|
||||
|
||||
const (
|
||||
METER_NAME = "github.com/kubescape/kubescape/v2"
|
||||
METRIC_NAME_PREFIX = "kubescape"
|
||||
)
|
||||
|
||||
var initOnce sync.Once
|
||||
|
||||
// Metrics are defined here
|
||||
var (
|
||||
kubernetesResourcesCount metric.Int64UpDownCounter
|
||||
workerNodesCount metric.Int64UpDownCounter
|
||||
)
|
||||
|
||||
// Init initializes the metrics
|
||||
func Init() {
|
||||
initOnce.Do(func() {
|
||||
var err error
|
||||
meterProvider := otel.GetMeterProvider()
|
||||
meter := meterProvider.Meter(METER_NAME)
|
||||
metricName := func(name string) string {
|
||||
return strings.Join([]string{METRIC_NAME_PREFIX, name}, "_")
|
||||
}
|
||||
|
||||
if kubernetesResourcesCount, err = meter.Int64UpDownCounter(metricName("kubernetes_resources_count")); err != nil {
|
||||
logger.L().Error("failed to register instrument", helpers.Error(err))
|
||||
}
|
||||
|
||||
if workerNodesCount, err = meter.Int64UpDownCounter(metricName("worker_nodes_count")); err != nil {
|
||||
logger.L().Error("failed to register instrument", helpers.Error(err))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateKubernetesResourcesCount updates the kubernetes resources count metric
|
||||
func UpdateKubernetesResourcesCount(ctx context.Context, value int64) {
|
||||
if kubernetesResourcesCount != nil {
|
||||
kubernetesResourcesCount.Add(ctx, value)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateWorkerNodesCount updates the worker nodes count metric
|
||||
func UpdateWorkerNodesCount(ctx context.Context, value int64) {
|
||||
if workerNodesCount != nil {
|
||||
workerNodesCount.Add(ctx, value)
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/armoapi-go/identifiers"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
)
|
||||
@@ -38,9 +39,17 @@ func MockFramework_0006_0013() *reporthandling.Framework {
|
||||
Name: "framework-0006-0013",
|
||||
},
|
||||
}
|
||||
c06 := &reporthandling.Control{}
|
||||
c06 := &reporthandling.Control{ScanningScope: &reporthandling.ScanningScope{
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCluster,
|
||||
},
|
||||
}}
|
||||
json.Unmarshal([]byte(mockControl_0006), c06)
|
||||
c13 := &reporthandling.Control{}
|
||||
c13 := &reporthandling.Control{ScanningScope: &reporthandling.ScanningScope{
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCluster,
|
||||
},
|
||||
}}
|
||||
json.Unmarshal([]byte(mockControl_0013), c13)
|
||||
fw.Controls = []reporthandling.Control{*c06, *c13}
|
||||
return fw
|
||||
@@ -53,7 +62,11 @@ func MockFramework_0044() *reporthandling.Framework {
|
||||
Name: "framework-0044",
|
||||
},
|
||||
}
|
||||
c44 := &reporthandling.Control{}
|
||||
c44 := &reporthandling.Control{ScanningScope: &reporthandling.ScanningScope{
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCluster,
|
||||
},
|
||||
}}
|
||||
json.Unmarshal([]byte(mockControl_0044), c44)
|
||||
|
||||
fw.Controls = []reporthandling.Control{*c44}
|
||||
@@ -73,11 +86,11 @@ func MockExceptionAllKinds(policy *armotypes.PosturePolicy) *armotypes.PostureEx
|
||||
return &armotypes.PostureExceptionPolicy{
|
||||
PosturePolicies: []armotypes.PosturePolicy{*policy},
|
||||
Actions: []armotypes.PostureExceptionPolicyActions{armotypes.AlertOnly},
|
||||
Resources: []armotypes.PortalDesignator{
|
||||
Resources: []identifiers.PortalDesignator{
|
||||
{
|
||||
DesignatorType: armotypes.DesignatorAttributes,
|
||||
DesignatorType: identifiers.DesignatorAttributes,
|
||||
Attributes: map[string]string{
|
||||
armotypes.AttributeKind: ".*",
|
||||
identifiers.AttributeKind: ".*",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -3,7 +3,7 @@ package containerscan
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/armoapi-go/identifiers"
|
||||
)
|
||||
|
||||
func (layer *ScanResultLayer) GetFilesByPackage(pkgname string) (files *PkgFiles) {
|
||||
@@ -24,11 +24,11 @@ func (layer *ScanResultLayer) GetPackagesNames() []string {
|
||||
return pkgsNames
|
||||
}
|
||||
|
||||
func (scanresult *ScanResultReport) GetDesignatorsNContext() (*armotypes.PortalDesignator, []armotypes.ArmoContext) {
|
||||
designatorsObj := armotypes.AttributesDesignatorsFromWLID(scanresult.WLID)
|
||||
func (scanresult *ScanResultReport) GetDesignatorsNContext() (*identifiers.PortalDesignator, []identifiers.ArmoContext) {
|
||||
designatorsObj := identifiers.AttributesDesignatorsFromWLID(scanresult.WLID)
|
||||
designatorsObj.Attributes["containerName"] = scanresult.ContainerName
|
||||
designatorsObj.Attributes["customerGUID"] = scanresult.CustomerGUID
|
||||
contextObj := armotypes.DesignatorToArmoContext(designatorsObj, "designators")
|
||||
contextObj := identifiers.DesignatorToArmoContext(designatorsObj, "designators")
|
||||
return designatorsObj, contextObj
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/armoapi-go/identifiers"
|
||||
cautils "github.com/armosec/utils-k8s-go/armometadata"
|
||||
)
|
||||
|
||||
@@ -69,8 +69,8 @@ func (scanresult *ScanResultReport) Summarize() *ElasticContainerScanSummaryResu
|
||||
ListOfDangerousArtifcats: scanresult.ListOfDangerousArtifcats,
|
||||
}
|
||||
|
||||
summary.Cluster = designatorsObj.Attributes[armotypes.AttributeCluster]
|
||||
summary.Namespace = designatorsObj.Attributes[armotypes.AttributeNamespace]
|
||||
summary.Cluster = designatorsObj.Attributes[identifiers.AttributeCluster]
|
||||
summary.Namespace = designatorsObj.Attributes[identifiers.AttributeNamespace]
|
||||
|
||||
imageInfo, e2 := cautils.ImageTagToImageInfo(scanresult.ImgTag)
|
||||
if e2 == nil {
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package containerscan
|
||||
|
||||
import "github.com/armosec/armoapi-go/armotypes"
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/identifiers"
|
||||
)
|
||||
|
||||
type ElasticContainerVulnerabilityResult struct {
|
||||
Designators armotypes.PortalDesignator `json:"designators"`
|
||||
Context []armotypes.ArmoContext `json:"context"`
|
||||
Designators identifiers.PortalDesignator `json:"designators"`
|
||||
Context []identifiers.ArmoContext `json:"context"`
|
||||
|
||||
WLID string `json:"wlid"`
|
||||
ContainerScanID string `json:"containersScanID"`
|
||||
@@ -35,8 +37,8 @@ type SeverityStats struct {
|
||||
}
|
||||
|
||||
type ElasticContainerScanSeveritySummary struct {
|
||||
Designators armotypes.PortalDesignator `json:"designators"`
|
||||
Context []armotypes.ArmoContext `json:"context"`
|
||||
Designators identifiers.PortalDesignator `json:"designators"`
|
||||
Context []identifiers.ArmoContext `json:"context"`
|
||||
|
||||
SeverityStats
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
@@ -57,8 +59,8 @@ type ElasticContainerScanSeveritySummary struct {
|
||||
|
||||
type ElasticContainerScanSummaryResult struct {
|
||||
SeverityStats
|
||||
Designators armotypes.PortalDesignator `json:"designators"`
|
||||
Context []armotypes.ArmoContext `json:"context"`
|
||||
Designators identifiers.PortalDesignator `json:"designators"`
|
||||
Context []identifiers.ArmoContext `json:"context"`
|
||||
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
ContainerScanID string `json:"containersScanID"`
|
||||
|
||||
@@ -20,7 +20,7 @@ type FixHandler struct {
|
||||
|
||||
// ResourceFixInfo is a struct that holds the information about the resource that needs to be fixed
|
||||
type ResourceFixInfo struct {
|
||||
YamlExpressions map[string]*armotypes.FixPath
|
||||
YamlExpressions map[string]armotypes.FixPath
|
||||
Resource *reporthandling.Resource
|
||||
FilePath string
|
||||
DocumentIndex int
|
||||
|
||||
@@ -73,19 +73,17 @@ func isSupportedScanningTarget(report *reporthandlingv2.PostureReport) error {
|
||||
}
|
||||
|
||||
func getLocalPath(report *reporthandlingv2.PostureReport) string {
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.GitLocal {
|
||||
|
||||
switch report.Metadata.ScanMetadata.ScanningTarget {
|
||||
case reporthandlingv2.GitLocal:
|
||||
return report.Metadata.ContextMetadata.RepoContextMetadata.LocalRootPath
|
||||
}
|
||||
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.Directory {
|
||||
case reporthandlingv2.Directory:
|
||||
return report.Metadata.ContextMetadata.DirectoryContextMetadata.BasePath
|
||||
}
|
||||
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.File {
|
||||
case reporthandlingv2.File:
|
||||
return filepath.Dir(report.Metadata.ContextMetadata.FileContextMetadata.FilePath)
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *FixHandler) buildResourcesMap() map[string]*reporthandling.Resource {
|
||||
@@ -155,7 +153,7 @@ func (h *FixHandler) PrepareResourcesToFix(ctx context.Context) []ResourceFixInf
|
||||
rfi := ResourceFixInfo{
|
||||
FilePath: absolutePath,
|
||||
Resource: resourceObj,
|
||||
YamlExpressions: make(map[string]*armotypes.FixPath, 0),
|
||||
YamlExpressions: make(map[string]armotypes.FixPath, 0),
|
||||
DocumentIndex: documentIndex,
|
||||
}
|
||||
|
||||
@@ -185,7 +183,7 @@ func (h *FixHandler) PrintExpectedChanges(resourcesToFix []ResourceFixInfo) {
|
||||
|
||||
i := 1
|
||||
for _, fixPath := range resourceFixInfo.YamlExpressions {
|
||||
sb.WriteString(fmt.Sprintf("\t%d) %s = %s\n", i, (*fixPath).Path, (*fixPath).Value))
|
||||
sb.WriteString(fmt.Sprintf("\t%d) %s = %s\n", i, fixPath.Path, fixPath.Value))
|
||||
i++
|
||||
}
|
||||
sb.WriteString("\n------\n")
|
||||
@@ -201,14 +199,14 @@ func (h *FixHandler) ApplyChanges(ctx context.Context, resourcesToFix []Resource
|
||||
fileYamlExpressions := h.getFileYamlExpressions(resourcesToFix)
|
||||
|
||||
for filepath, yamlExpression := range fileYamlExpressions {
|
||||
fileAsString, err := getFileString(filepath)
|
||||
fileAsString, err := GetFileString(filepath)
|
||||
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
|
||||
fixedYamlString, err := h.ApplyFixToContent(ctx, fileAsString, yamlExpression)
|
||||
fixedYamlString, err := ApplyFixToContent(ctx, fileAsString, yamlExpression)
|
||||
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("Failed to fix file %s: %w ", filepath, err))
|
||||
@@ -242,7 +240,8 @@ func (h *FixHandler) getFilePathAndIndex(filePathWithIndex string) (filePath str
|
||||
}
|
||||
}
|
||||
|
||||
func (h *FixHandler) ApplyFixToContent(ctx context.Context, yamlAsString, yamlExpression string) (fixedString string, err error) {
|
||||
func ApplyFixToContent(ctx context.Context, yamlAsString, yamlExpression string) (fixedString string, err error) {
|
||||
yamlAsString = sanitizeYaml(yamlAsString)
|
||||
newline := determineNewlineSeparator(yamlAsString)
|
||||
|
||||
yamlLines := strings.Split(yamlAsString, newline)
|
||||
@@ -264,6 +263,7 @@ func (h *FixHandler) ApplyFixToContent(ctx context.Context, yamlAsString, yamlEx
|
||||
fixedYamlLines := getFixedYamlLines(yamlLines, fixInfo, newline)
|
||||
|
||||
fixedString = getStringFromSlice(fixedYamlLines, newline)
|
||||
fixedString = revertSanitizeYaml(fixedString)
|
||||
|
||||
return fixedString, nil
|
||||
}
|
||||
@@ -301,8 +301,8 @@ func (rfi *ResourceFixInfo) addYamlExpressionsFromResourceAssociatedControl(docu
|
||||
continue
|
||||
}
|
||||
|
||||
yamlExpression := fixPathToValidYamlExpression(rulePaths.FixPath.Path, rulePaths.FixPath.Value, documentIndex)
|
||||
rfi.YamlExpressions[yamlExpression] = &rulePaths.FixPath
|
||||
yamlExpression := FixPathToValidYamlExpression(rulePaths.FixPath.Path, rulePaths.FixPath.Value, documentIndex)
|
||||
rfi.YamlExpressions[yamlExpression] = rulePaths.FixPath
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -317,7 +317,7 @@ func reduceYamlExpressions(resource *ResourceFixInfo) string {
|
||||
return strings.Join(expressions, " | ")
|
||||
}
|
||||
|
||||
func fixPathToValidYamlExpression(fixPath, value string, documentIndexInYaml int) string {
|
||||
func FixPathToValidYamlExpression(fixPath, value string, documentIndexInYaml int) string {
|
||||
isStringValue := true
|
||||
if _, err := strconv.ParseBool(value); err == nil {
|
||||
isStringValue = false
|
||||
@@ -340,7 +340,7 @@ func joinStrings(inputStrings ...string) string {
|
||||
return strings.Join(inputStrings, "")
|
||||
}
|
||||
|
||||
func getFileString(filepath string) (string, error) {
|
||||
func GetFileString(filepath string) (string, error) {
|
||||
bytes, err := os.ReadFile(filepath)
|
||||
|
||||
if err != nil {
|
||||
@@ -368,3 +368,28 @@ func determineNewlineSeparator(contents string) string {
|
||||
return unixNewline
|
||||
}
|
||||
}
|
||||
|
||||
// sanitizeYaml receives a YAML file as a string, sanitizes it and returns the result
|
||||
//
|
||||
// Callers should remember to call the corresponding revertSanitizeYaml function.
|
||||
//
|
||||
// It applies the following sanitization:
|
||||
//
|
||||
// - Since `yaml/v3` fails to serialize documents starting with a document
|
||||
// separator, we comment it out to be compatible.
|
||||
func sanitizeYaml(fileAsString string) string {
|
||||
if fileAsString[:3] == "---" {
|
||||
fileAsString = "# " + fileAsString
|
||||
}
|
||||
return fileAsString
|
||||
}
|
||||
|
||||
// revertSanitizeYaml receives a sanitized YAML file as a string and reverts the applied sanitization
|
||||
//
|
||||
// For sanitization details, refer to the sanitizeYaml() function.
|
||||
func revertSanitizeYaml(fixedYamlString string) string {
|
||||
if fixedYamlString[:5] == "# ---" {
|
||||
fixedYamlString = fixedYamlString[2:]
|
||||
}
|
||||
return fixedYamlString
|
||||
}
|
||||
|
||||
@@ -101,6 +101,13 @@ func getTestCases() []indentationTestCase {
|
||||
"inserts/tc-11-01-expected.yaml",
|
||||
},
|
||||
|
||||
// Starts with ---
|
||||
{
|
||||
"inserts/tc-12-00-begin-with-document-separator.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false",
|
||||
"inserts/tc-12-01-expected.yaml",
|
||||
},
|
||||
|
||||
// Removal Scenarios
|
||||
{
|
||||
"removals/tc-01-00-input.yaml",
|
||||
@@ -118,10 +125,10 @@ func getTestCases() []indentationTestCase {
|
||||
"removals/tc-03-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"removes/tc-04-00-input.yaml",
|
||||
"removals/tc-04-00-input.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
del(select(di==1).spec.containers[1])`,
|
||||
"removes/tc-04-01-expected.yaml",
|
||||
"removals/tc-04-01-expected.yaml",
|
||||
},
|
||||
|
||||
// Replace Scenarios
|
||||
@@ -162,6 +169,12 @@ func getTestCases() []indentationTestCase {
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-04-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"hybrids/tc-05-00-input-leading-doc-separator.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-05-01-expected.yaml",
|
||||
},
|
||||
}
|
||||
|
||||
return indentationTestCases
|
||||
@@ -169,22 +182,28 @@ func getTestCases() []indentationTestCase {
|
||||
|
||||
func TestApplyFixKeepsFormatting(t *testing.T) {
|
||||
testCases := getTestCases()
|
||||
getTestDataPath := func(filename string) string {
|
||||
currentFile := "testdata/" + filename
|
||||
return filepath.Join(testutils.CurrentDir(), currentFile)
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.inputFile, func(t *testing.T) {
|
||||
getTestDataPath := func(filename string) string {
|
||||
currentFile := "testdata/" + filename
|
||||
return filepath.Join(testutils.CurrentDir(), currentFile)
|
||||
inputFilename := getTestDataPath(tc.inputFile)
|
||||
input, err := os.ReadFile(inputFilename)
|
||||
if err != nil {
|
||||
t.Fatalf(`Unable to open file %s due to: %v`, inputFilename, err)
|
||||
}
|
||||
expectedFilename := getTestDataPath(tc.expectedFile)
|
||||
wantRaw, err := os.ReadFile(expectedFilename)
|
||||
if err != nil {
|
||||
t.Fatalf(`Unable to open file %s due to: %v`, expectedFilename, err)
|
||||
}
|
||||
|
||||
input, _ := os.ReadFile(getTestDataPath(tc.inputFile))
|
||||
wantRaw, _ := os.ReadFile(getTestDataPath(tc.expectedFile))
|
||||
want := string(wantRaw)
|
||||
expression := tc.yamlExpression
|
||||
|
||||
h, _ := NewFixHandlerMock()
|
||||
|
||||
got, _ := h.ApplyFixToContent(context.TODO(), string(input), expression)
|
||||
fileAsString := string(input)
|
||||
got, _ := ApplyFixToContent(context.TODO(), fileAsString, expression)
|
||||
|
||||
assert.Equalf(
|
||||
t, want, got,
|
||||
@@ -241,7 +260,7 @@ func Test_fixPathToValidYamlExpression(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := fixPathToValidYamlExpression(tt.args.fixPath, tt.args.value, tt.args.documentIndexInYaml); got != tt.want {
|
||||
if got := FixPathToValidYamlExpression(tt.args.fixPath, tt.args.value, tt.args.documentIndexInYaml); got != tt.want {
|
||||
t.Errorf("fixPathToValidYamlExpression() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
|
||||
22
core/pkg/fixhandler/testdata/hybrids/tc-05-00-input-leading-doc-separator.yaml
vendored
Normal file
22
core/pkg/fixhandler/testdata/hybrids/tc-05-00-input-leading-doc-separator.yaml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
|
||||
image: nginx
|
||||
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
22
core/pkg/fixhandler/testdata/hybrids/tc-05-01-expected.yaml
vendored
Normal file
22
core/pkg/fixhandler/testdata/hybrids/tc-05-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
|
||||
10
core/pkg/fixhandler/testdata/inserts/tc-12-00-begin-with-document-separator.yaml
vendored
Normal file
10
core/pkg/fixhandler/testdata/inserts/tc-12-00-begin-with-document-separator.yaml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: begin-with-document-separator
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
12
core/pkg/fixhandler/testdata/inserts/tc-12-01-expected.yaml
vendored
Normal file
12
core/pkg/fixhandler/testdata/inserts/tc-12-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: begin-with-document-separator
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -23,8 +23,6 @@ func decodeDocumentRoots(yamlAsString string) ([]yaml.Node, error) {
|
||||
var node yaml.Node
|
||||
err := dec.Decode(&node)
|
||||
|
||||
nodes = append(nodes, node)
|
||||
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
@@ -32,6 +30,8 @@ func decodeDocumentRoots(yamlAsString string) ([]yaml.Node, error) {
|
||||
return nil, fmt.Errorf("Cannot Decode File as YAML")
|
||||
|
||||
}
|
||||
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
|
||||
@@ -6,13 +6,13 @@ metadata:
|
||||
k8s-app: kubescape-host-scanner
|
||||
kubernetes.io/metadata.name: kubescape-host-scanner
|
||||
tier: kubescape-host-scanner-control-plane
|
||||
name: kubescape-host-scanner
|
||||
name: kubescape
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: host-scanner
|
||||
namespace: kubescape-host-scanner
|
||||
namespace: kubescape
|
||||
labels:
|
||||
app: host-scanner
|
||||
k8s-app: kubescape-host-scanner
|
||||
@@ -32,7 +32,7 @@ spec:
|
||||
- operator: Exists
|
||||
containers:
|
||||
- name: host-sensor
|
||||
image: quay.io/kubescape/host-scanner:v1.0.54
|
||||
image: quay.io/kubescape/host-scanner:v1.0.61
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: true
|
||||
privileged: true
|
||||
@@ -52,12 +52,17 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /host_fs
|
||||
name: host-filesystem
|
||||
readinessProbe:
|
||||
startupProbe:
|
||||
httpGet:
|
||||
path: /kernelVersion
|
||||
path: /readyz
|
||||
port: 7888
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 1
|
||||
failureThreshold: 30
|
||||
periodSeconds: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 7888
|
||||
periodSeconds: 10
|
||||
terminationGracePeriodSeconds: 120
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
automountServiceAccountToken: false
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
@@ -51,7 +52,7 @@ func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, hostSensorYAMLFile
|
||||
if hostSensorYAMLFile != "" {
|
||||
d, err := loadHostSensorFromFile(hostSensorYAMLFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load host-scan yaml file, reason: %w", err)
|
||||
return nil, fmt.Errorf("failed to load host-scanner yaml file, reason: %w", err)
|
||||
}
|
||||
hostSensorYAML = d
|
||||
}
|
||||
@@ -82,7 +83,10 @@ func (hsh *HostSensorHandler) Init(ctx context.Context) error {
|
||||
// store pod names
|
||||
// make sure all pods are running, after X seconds treat has running anyway, and log an error on the pods not running yet
|
||||
logger.L().Info("Installing host scanner")
|
||||
logger.L().Debug("The host scanner is a DaemonSet that runs on each node in the cluster. The DaemonSet will be running in it's own namespace and will be deleted once the scan is completed. If you do not wish to install the host scanner, please run the scan without the --enable-host-scan flag.")
|
||||
|
||||
// log is used to avoid log duplication
|
||||
// coming from the different host-scanner instances
|
||||
log := NewLogCoupling()
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
@@ -91,9 +95,9 @@ func (hsh *HostSensorHandler) Init(ctx context.Context) error {
|
||||
return fmt.Errorf("failed to apply host scanner YAML, reason: %v", err)
|
||||
}
|
||||
|
||||
hsh.populatePodNamesToNodeNames(ctx)
|
||||
hsh.populatePodNamesToNodeNames(ctx, log)
|
||||
if err := hsh.checkPodForEachNode(); err != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to validate host-sensor pods status", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning(failedToValidateHostSensorPodStatus, helpers.Error(err))
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -131,7 +135,7 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Get namespace name
|
||||
namespaceName := ""
|
||||
namespaceName := cautils.GetConfigMapNamespace()
|
||||
for i := range workloads {
|
||||
if workloads[i].GetKind() == "Namespace" {
|
||||
namespaceName = workloads[i].GetName()
|
||||
@@ -149,6 +153,7 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
|
||||
}
|
||||
// set namespace in all objects
|
||||
if w.GetKind() != "Namespace" {
|
||||
logger.L().Debug("Setting namespace", helpers.String("kind", w.GetKind()), helpers.String("name", w.GetName()), helpers.String("namespace", namespaceName))
|
||||
w.SetNamespace(namespaceName)
|
||||
}
|
||||
// Get container port
|
||||
@@ -156,7 +161,7 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
|
||||
containers, err := w.GetContainers()
|
||||
if err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("container not found in DaemonSet: %v", err)
|
||||
}
|
||||
@@ -180,7 +185,7 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
|
||||
}
|
||||
if e != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to create/update YAML, reason: %v", e)
|
||||
}
|
||||
@@ -190,14 +195,14 @@ func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
|
||||
b, err := json.Marshal(newWorkload.GetObject())
|
||||
if err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to Marshal YAML of DaemonSet, reason: %v", err)
|
||||
}
|
||||
var ds appsv1.DaemonSet
|
||||
if err := json.Unmarshal(b, &ds); err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to Unmarshal YAML of DaemonSet, reason: %v", err)
|
||||
}
|
||||
@@ -228,7 +233,7 @@ func (hsh *HostSensorHandler) checkPodForEachNode() error {
|
||||
hsh.podListLock.RLock()
|
||||
podsMap := hsh.hostSensorPodNames
|
||||
hsh.podListLock.RUnlock()
|
||||
return fmt.Errorf("host-sensor pods number (%d) differ than nodes number (%d) after deadline exceeded. Kubescape will take data only from the pods below: %v",
|
||||
return fmt.Errorf("host-scanner pods number (%d) differ than nodes number (%d) after deadline exceeded. Kubescape will take data only from the pods below: %v",
|
||||
podsNum, len(nodesList.Items), podsMap)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
@@ -238,7 +243,7 @@ func (hsh *HostSensorHandler) checkPodForEachNode() error {
|
||||
}
|
||||
|
||||
// initiating routine to keep pod list updated
|
||||
func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context) {
|
||||
func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context, log *LogsMap) {
|
||||
go func() {
|
||||
var watchRes watch.Interface
|
||||
var err error
|
||||
@@ -247,10 +252,10 @@ func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context) {
|
||||
LabelSelector: fmt.Sprintf("name=%s", hsh.daemonSet.Spec.Template.Labels["name"]),
|
||||
})
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to watch over DaemonSet pods - are we missing watch pods permissions?", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning(failedToWatchOverDaemonSetPods, helpers.Error(err))
|
||||
}
|
||||
if watchRes == nil {
|
||||
logger.L().Ctx(ctx).Error("failed to watch over DaemonSet pods, will not be able to get host-sensor data")
|
||||
logger.L().Ctx(ctx).Error("failed to watch over DaemonSet pods, will not be able to get host-scanner data")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -259,12 +264,12 @@ func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context) {
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
go hsh.updatePodInListAtomic(ctx, eve.Type, pod)
|
||||
go hsh.updatePodInListAtomic(ctx, eve.Type, pod, log)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventType watch.EventType, podObj *corev1.Pod) {
|
||||
func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventType watch.EventType, podObj *corev1.Pod, log *LogsMap) {
|
||||
hsh.podListLock.Lock()
|
||||
defer hsh.podListLock.Unlock()
|
||||
|
||||
@@ -285,10 +290,11 @@ func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventTy
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values) > 0 {
|
||||
nodeName = podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
|
||||
}
|
||||
logger.L().Ctx(ctx).Warning("One host-sensor pod is unable to schedule on node. We will fail to collect the data from this node",
|
||||
helpers.String("message", podObj.Status.Conditions[0].Message),
|
||||
helpers.String("nodeName", nodeName),
|
||||
helpers.String("podName", podObj.ObjectMeta.Name))
|
||||
if !log.isDuplicated(oneHostSensorPodIsUnabledToSchedule) {
|
||||
logger.L().Ctx(ctx).Warning(oneHostSensorPodIsUnabledToSchedule,
|
||||
helpers.String("message", podObj.Status.Conditions[0].Message))
|
||||
log.update(oneHostSensorPodIsUnabledToSchedule)
|
||||
}
|
||||
if nodeName != "" {
|
||||
hsh.hostSensorUnscheduledPodNames[podObj.ObjectMeta.Name] = nodeName
|
||||
}
|
||||
@@ -301,14 +307,84 @@ func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventTy
|
||||
}
|
||||
}
|
||||
|
||||
// tearDownNamespace manage the host-scanner deletion.
|
||||
func (hsh *HostSensorHandler) tearDownHostScanner(namespace string) error {
|
||||
client := hsh.k8sObj.KubernetesClient
|
||||
|
||||
// delete host-scanner DaemonSet
|
||||
err := client.AppsV1().
|
||||
DaemonSets(namespace).
|
||||
Delete(
|
||||
hsh.k8sObj.Context,
|
||||
hsh.daemonSet.Name,
|
||||
metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &hsh.gracePeriod,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
// wait for DaemonSet to be deleted
|
||||
err = hsh.waitHostScannerDeleted(hsh.k8sObj.Context)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// tearDownNamespace manage the given namespace deletion.
|
||||
// At first, it checks if the namespace was already present before installing host-scanner.
|
||||
// In that case skips the deletion.
|
||||
// If was not, then patches the namespace in order to remove the finalizers,
|
||||
// and finally delete the it.
|
||||
func (hsh *HostSensorHandler) tearDownNamespace(namespace string) error {
|
||||
// if namespace was already present on kubernetes (before installing host-scanner),
|
||||
// then we shouldn't delete it.
|
||||
if hsh.namespaceWasPresent() {
|
||||
return nil
|
||||
}
|
||||
if err := hsh.k8sObj.KubernetesClient.CoreV1().Namespaces().Delete(hsh.k8sObj.Context, namespace, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
|
||||
return fmt.Errorf("failed to delete host-sensor namespace: %v", err)
|
||||
// to make it more readable we store the object client in a variable
|
||||
client := hsh.k8sObj.KubernetesClient
|
||||
|
||||
// prepare patch json to remove finalizers from namespace
|
||||
patchData := `
|
||||
[
|
||||
{
|
||||
"op": "replace",
|
||||
"path": "/metadata/finalizers",
|
||||
"value": []
|
||||
}
|
||||
]
|
||||
`
|
||||
// patch namespace object removing finalizers
|
||||
_, err := client.CoreV1().
|
||||
Namespaces().
|
||||
Patch(
|
||||
hsh.k8sObj.Context,
|
||||
namespace,
|
||||
types.JSONPatchType,
|
||||
[]byte(patchData),
|
||||
metav1.PatchOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove finalizers from Namespace: %v", err)
|
||||
}
|
||||
|
||||
// patch namespace object removing finalizers
|
||||
// delete namespace object
|
||||
err = client.CoreV1().
|
||||
Namespaces().
|
||||
Delete(
|
||||
hsh.k8sObj.Context,
|
||||
namespace,
|
||||
metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &hsh.gracePeriod,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete %s Namespace: %v", namespace, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -317,14 +393,13 @@ func (hsh *HostSensorHandler) tearDownNamespace(namespace string) error {
|
||||
func (hsh *HostSensorHandler) TearDown() error {
|
||||
namespace := hsh.GetNamespace()
|
||||
// delete DaemonSet
|
||||
if err := hsh.k8sObj.KubernetesClient.AppsV1().DaemonSets(hsh.GetNamespace()).Delete(hsh.k8sObj.Context, hsh.daemonSet.Name, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
|
||||
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
|
||||
if err := hsh.tearDownHostScanner(namespace); err != nil {
|
||||
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
|
||||
}
|
||||
// delete Namespace
|
||||
if err := hsh.tearDownNamespace(namespace); err != nil {
|
||||
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
|
||||
return fmt.Errorf("failed to delete host-scanner Namespace: %v", err)
|
||||
}
|
||||
// TODO: wait for termination? may take up to 120 seconds!!!
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -344,3 +419,32 @@ func loadHostSensorFromFile(hostSensorYAMLFile string) (string, error) {
|
||||
// TODO - Add file validation
|
||||
return string(dat), err
|
||||
}
|
||||
|
||||
// waitHostScannerDeleted watch for host-scanner deletion.
|
||||
// In case it fails it returns an error.
|
||||
func (hsh *HostSensorHandler) waitHostScannerDeleted(ctx context.Context) error {
|
||||
labelSelector := fmt.Sprintf("name=%s", hsh.daemonSet.Name)
|
||||
opts := metav1.ListOptions{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
LabelSelector: labelSelector,
|
||||
FieldSelector: "",
|
||||
}
|
||||
watcher, err := hsh.k8sObj.KubernetesClient.CoreV1().
|
||||
Pods(hsh.daemonSet.Namespace).
|
||||
Watch(ctx, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer watcher.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-watcher.ResultChan():
|
||||
if event.Type == watch.Deleted {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,14 +34,14 @@ func TestHostSensorHandler(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("should return namespace", func(t *testing.T) {
|
||||
require.Equal(t, "kubescape-host-scanner", h.GetNamespace())
|
||||
require.Equal(t, "kubescape", h.GetNamespace())
|
||||
})
|
||||
|
||||
t.Run("should collect resources from pods - happy path", func(t *testing.T) {
|
||||
envelope, status, err := h.CollectResources(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, envelope, 11*2) // has cloud provider, no control plane requested
|
||||
require.Len(t, envelope, 9*2) // has cloud provider, no control plane requested
|
||||
require.Len(t, status, 0)
|
||||
|
||||
foundControl, foundProvider := false, false
|
||||
@@ -91,7 +91,7 @@ func TestHostSensorHandler(t *testing.T) {
|
||||
envelope, status, err := h.CollectResources(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, envelope, 12*2) // has empty cloud provider, has control plane info
|
||||
require.Len(t, envelope, 10*2) // has empty cloud provider, has control plane info
|
||||
require.Len(t, status, 0)
|
||||
|
||||
foundControl, foundProvider := false, false
|
||||
@@ -141,37 +141,6 @@ func TestHostSensorHandler(t *testing.T) {
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("should build host sensor with error in response from /kubeletConfigurations", func(t *testing.T) {
|
||||
k8s := NewKubernetesApiMock(WithNode(mockNode1()),
|
||||
WithPod(mockPod1()),
|
||||
WithPod(mockPod2()),
|
||||
WithResponses(mockResponsesNoCloudProvider()),
|
||||
WithErrorResponse(RestURL{"http", "pod1", "7888", "/kubeletConfigurations"}), // this endpoint will return an error from this pod
|
||||
)
|
||||
|
||||
h, err := NewHostSensorHandler(k8s, "")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, h)
|
||||
|
||||
t.Run("should initialize host sensor", func(t *testing.T) {
|
||||
require.NoError(t, h.Init(ctx))
|
||||
|
||||
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
w.Stop()
|
||||
|
||||
require.Len(t, h.hostSensorPodNames, 2)
|
||||
})
|
||||
|
||||
t.Run("should collect resources from pods, with some errors", func(t *testing.T) {
|
||||
envelope, status, err := h.CollectResources(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, envelope, 12*2-1) // one resource is missing
|
||||
require.Len(t, status, 0) // error is not reported in status: this is due to the worker pool not bubbling up errors
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("should FAIL to build host sensor because there are no nodes", func(t *testing.T) {
|
||||
h, err := NewHostSensorHandler(NewKubernetesApiMock(), "")
|
||||
require.Error(t, err)
|
||||
|
||||
@@ -10,12 +10,9 @@ import (
|
||||
"sync"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
// getPodList clones the internal list of pods being watched as a map of pod names.
|
||||
@@ -87,12 +84,17 @@ func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(ctx context.Context, pat
|
||||
podList := hsh.getPodList()
|
||||
res := make([]hostsensor.HostSensorDataEnvelope, 0, len(podList))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// initialization of the channels
|
||||
hsh.workerPool.init(len(podList))
|
||||
|
||||
// log is used to avoid log duplication
|
||||
// coming from the different host-scanner instances
|
||||
log := NewLogCoupling()
|
||||
|
||||
hsh.workerPool.hostSensorApplyJobs(podList, path, requestKind)
|
||||
hsh.workerPool.hostSensorGetResults(&res)
|
||||
hsh.workerPool.createWorkerPool(ctx, hsh, &wg)
|
||||
hsh.workerPool.createWorkerPool(ctx, hsh, &wg, log)
|
||||
hsh.workerPool.waitForDone(&wg)
|
||||
|
||||
return res, nil
|
||||
@@ -165,27 +167,6 @@ func (hsh *HostSensorHandler) getCloudProviderInfo(ctx context.Context) ([]hosts
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/cloudProviderInfo", CloudProviderInfo)
|
||||
}
|
||||
|
||||
// getKubeletCommandLine returns the list of kubelet command lines.
|
||||
func (hsh *HostSensorHandler) getKubeletCommandLine(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
resps, err := hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeletCommandLine", KubeletCommandLine)
|
||||
if err != nil {
|
||||
return resps, err
|
||||
}
|
||||
|
||||
for resp := range resps {
|
||||
var data = make(map[string]interface{})
|
||||
data["fullCommand"] = string(resps[resp].Data)
|
||||
resBytesMarshal, err := json.Marshal(data)
|
||||
// TODO catch error
|
||||
if err == nil {
|
||||
resps[resp].Data = stdjson.RawMessage(resBytesMarshal)
|
||||
}
|
||||
}
|
||||
|
||||
return resps, nil
|
||||
}
|
||||
|
||||
// getCNIInfo returns the list of CNI metadata
|
||||
func (hsh *HostSensorHandler) getCNIInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
@@ -204,22 +185,6 @@ func (hsh *HostSensorHandler) getOsReleaseFile(ctx context.Context) ([]hostsenso
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/osRelease", "OsReleaseFile")
|
||||
}
|
||||
|
||||
// getKubeletConfigurations returns the list of kubelet configurations.
|
||||
func (hsh *HostSensorHandler) getKubeletConfigurations(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
res, err := hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeletConfigurations", "KubeletConfiguration") // empty kind, will be overridden
|
||||
for resIdx := range res {
|
||||
jsonBytes, ery := yaml.YAMLToJSON(res[resIdx].Data)
|
||||
if ery != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to convert kubelet configurations from yaml to json", helpers.Error(ery))
|
||||
continue
|
||||
}
|
||||
res[resIdx].SetData(jsonBytes)
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
// hasCloudProviderInfo iterates over the []hostsensor.HostSensorDataEnvelope list to find info about the cloud provider.
|
||||
//
|
||||
// If information are found, then return true. Return false otherwise.
|
||||
@@ -259,14 +224,6 @@ func (hsh *HostSensorHandler) CollectResources(ctx context.Context) ([]hostsenso
|
||||
Query func(context.Context) ([]hostsensor.HostSensorDataEnvelope, error)
|
||||
}{
|
||||
// queries to the deployed host-scanner
|
||||
{
|
||||
Resource: KubeletConfiguration,
|
||||
Query: hsh.getKubeletConfigurations,
|
||||
},
|
||||
{
|
||||
Resource: KubeletCommandLine,
|
||||
Query: hsh.getKubeletCommandLine,
|
||||
},
|
||||
{
|
||||
Resource: OsReleaseFile,
|
||||
Query: hsh.getOsReleaseFile,
|
||||
|
||||
@@ -43,22 +43,23 @@ func (wp *workerPool) init(noOfPods ...int) {
|
||||
}
|
||||
|
||||
// The worker takes a job out of the chan, executes the request, and pushes the result to the results chan
|
||||
func (wp *workerPool) hostSensorWorker(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup) {
|
||||
func (wp *workerPool) hostSensorWorker(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup, log *LogsMap) {
|
||||
defer wg.Done()
|
||||
for job := range wp.jobs {
|
||||
hostSensorDataEnvelope, err := hsh.getResourcesFromPod(job.podName, job.nodeName, job.requestKind, job.path)
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to get data", helpers.String("path", job.path), helpers.String("podName", job.podName), helpers.Error(err))
|
||||
if err != nil && !log.isDuplicated(failedToGetData) {
|
||||
logger.L().Ctx(ctx).Warning(failedToGetData, helpers.String("path", job.path), helpers.Error(err))
|
||||
log.update(failedToGetData)
|
||||
continue
|
||||
}
|
||||
wp.results <- hostSensorDataEnvelope
|
||||
}
|
||||
}
|
||||
|
||||
func (wp *workerPool) createWorkerPool(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup) {
|
||||
func (wp *workerPool) createWorkerPool(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup, log *LogsMap) {
|
||||
for i := 0; i < noOfWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go wp.hostSensorWorker(ctx, hsh, wg)
|
||||
go wp.hostSensorWorker(ctx, hsh, wg, log)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
51
core/pkg/hostsensorutils/log_coupling.go
Normal file
51
core/pkg/hostsensorutils/log_coupling.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package hostsensorutils
|
||||
|
||||
import "sync"
|
||||
|
||||
type LogsMap struct {
|
||||
// use sync.Mutex to avoid read/write
|
||||
// access issues in multi-thread environments.
|
||||
sync.Mutex
|
||||
usedLogs map[string]int
|
||||
}
|
||||
|
||||
// NewLogCoupling return an empty LogsMap struct ready to be used.
|
||||
func NewLogCoupling() *LogsMap {
|
||||
return &LogsMap{
|
||||
usedLogs: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
// update add the logContent to the internal map
|
||||
// and set the occurrencty to 1 (if it has never been used before),
|
||||
// increment its values otherwise.
|
||||
func (lm *LogsMap) update(logContent string) {
|
||||
lm.Lock()
|
||||
_, ok := lm.usedLogs[logContent]
|
||||
if !ok {
|
||||
lm.usedLogs[logContent] = 1
|
||||
} else {
|
||||
lm.usedLogs[logContent]++
|
||||
}
|
||||
lm.Unlock()
|
||||
}
|
||||
|
||||
// isDuplicated check if logContent is already present inside the internal map.
|
||||
// Return true in case logContent already exists, false otherwise.
|
||||
func (lm *LogsMap) isDuplicated(logContent string) bool {
|
||||
lm.Lock()
|
||||
_, ok := lm.usedLogs[logContent]
|
||||
lm.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// GgtOccurrence retrieve the number of occurrences logContent has been used.
|
||||
func (lm *LogsMap) getOccurrence(logContent string) int {
|
||||
lm.Lock()
|
||||
occurrence, ok := lm.usedLogs[logContent]
|
||||
lm.Unlock()
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
return occurrence
|
||||
}
|
||||
100
core/pkg/hostsensorutils/log_coupling_test.go
Normal file
100
core/pkg/hostsensorutils/log_coupling_test.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLogsMap_Update(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
logs []string
|
||||
expectedLog string
|
||||
expected int
|
||||
}{
|
||||
{
|
||||
name: "test_1",
|
||||
logs: []string{
|
||||
"log_1",
|
||||
"log_1",
|
||||
"log_1",
|
||||
},
|
||||
expectedLog: "log_1",
|
||||
expected: 3,
|
||||
},
|
||||
{
|
||||
name: "test_2",
|
||||
logs: []string{},
|
||||
expectedLog: "log_2",
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
name: "test_3",
|
||||
logs: []string{
|
||||
"log_3",
|
||||
},
|
||||
expectedLog: "log_3",
|
||||
expected: 1,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
lm := NewLogCoupling()
|
||||
for _, log := range tt.logs {
|
||||
lm.update(log)
|
||||
}
|
||||
if !assert.Equal(t, lm.getOccurrence(tt.expectedLog), tt.expected) {
|
||||
t.Log("log occurrences are different")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLogsMap_IsDuplicated(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
logs []string
|
||||
expectedLog string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "test_1",
|
||||
logs: []string{
|
||||
"log_1",
|
||||
"log_1",
|
||||
"log_1",
|
||||
},
|
||||
expectedLog: "log_1",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "test_2",
|
||||
logs: []string{
|
||||
"log_1",
|
||||
"log_1",
|
||||
},
|
||||
expectedLog: "log_2",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "test_3",
|
||||
logs: []string{},
|
||||
expectedLog: "log_3",
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
lm := NewLogCoupling()
|
||||
for _, log := range tt.logs {
|
||||
lm.update(log)
|
||||
}
|
||||
if !assert.Equal(t, lm.isDuplicated(tt.expectedLog), tt.expected) {
|
||||
t.Log("duplication value differ from expected")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
10
core/pkg/hostsensorutils/logging_messages.go
Normal file
10
core/pkg/hostsensorutils/logging_messages.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package hostsensorutils
|
||||
|
||||
// messages used for warnings
|
||||
var (
|
||||
failedToGetData = "failed to get data"
|
||||
failedToTeardownNamespace = "failed to teardown Namespace"
|
||||
oneHostSensorPodIsUnabledToSchedule = "One host-sensor pod is unable to schedule on node. We will fail to collect the data from this node"
|
||||
failedToWatchOverDaemonSetPods = "failed to watch over DaemonSet pods"
|
||||
failedToValidateHostSensorPodStatus = "failed to validate host-scanner pods status"
|
||||
)
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/sigstore/cosign/pkg/cosign"
|
||||
"github.com/sigstore/cosign/v2/pkg/cosign"
|
||||
)
|
||||
|
||||
func has_signature(img string) bool {
|
||||
|
||||
@@ -6,12 +6,12 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/sigstore/cosign/cmd/cosign/cli/options"
|
||||
"github.com/sigstore/cosign/cmd/cosign/cli/sign"
|
||||
"github.com/sigstore/cosign/pkg/cosign"
|
||||
"github.com/sigstore/cosign/pkg/cosign/pkcs11key"
|
||||
ociremote "github.com/sigstore/cosign/pkg/oci/remote"
|
||||
sigs "github.com/sigstore/cosign/pkg/signature"
|
||||
"github.com/sigstore/cosign/v2/cmd/cosign/cli/options"
|
||||
"github.com/sigstore/cosign/v2/cmd/cosign/cli/sign"
|
||||
"github.com/sigstore/cosign/v2/pkg/cosign"
|
||||
"github.com/sigstore/cosign/v2/pkg/cosign/pkcs11key"
|
||||
ociremote "github.com/sigstore/cosign/v2/pkg/oci/remote"
|
||||
sigs "github.com/sigstore/cosign/v2/pkg/signature"
|
||||
)
|
||||
|
||||
// VerifyCommand verifies a signature on a supplied container image
|
||||
|
||||
@@ -1,49 +1,42 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_verify(t *testing.T) {
|
||||
type args struct {
|
||||
img string
|
||||
key string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
"valid signature",
|
||||
args{
|
||||
img: "hisu/cosign-tests:signed",
|
||||
key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
|
||||
},
|
||||
true,
|
||||
assert.NoError,
|
||||
},
|
||||
{
|
||||
"no signature",
|
||||
args{
|
||||
img: "hisu/cosign-tests:unsigned",
|
||||
key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
|
||||
},
|
||||
false,
|
||||
assert.Error,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := verify(tt.args.img, tt.args.key)
|
||||
if !tt.wantErr(t, err, fmt.Sprintf("verify(%v, %v)", tt.args.img, tt.args.key)) {
|
||||
return
|
||||
}
|
||||
assert.Equalf(t, tt.want, got, "verify(%v, %v)", tt.args.img, tt.args.key)
|
||||
})
|
||||
}
|
||||
}
|
||||
// func Test_verify(t *testing.T) {
|
||||
// type args struct {
|
||||
// img string
|
||||
// key string
|
||||
// }
|
||||
// tests := []struct {
|
||||
// name string
|
||||
// args args
|
||||
// want bool
|
||||
// wantErr assert.ErrorAssertionFunc
|
||||
// }{
|
||||
// {
|
||||
// "valid signature",
|
||||
// args{
|
||||
// img: "hisu/cosign-tests:signed",
|
||||
// key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
|
||||
// },
|
||||
// true,
|
||||
// assert.NoError,
|
||||
// },
|
||||
// {
|
||||
// "no signature",
|
||||
// args{
|
||||
// img: "hisu/cosign-tests:unsigned",
|
||||
// key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
|
||||
// },
|
||||
// false,
|
||||
// assert.Error,
|
||||
// },
|
||||
// }
|
||||
// for _, tt := range tests {
|
||||
// t.Run(tt.name, func(t *testing.T) {
|
||||
// got, err := verify(tt.args.img, tt.args.key)
|
||||
// if !tt.wantErr(t, err, fmt.Sprintf("verify(%v, %v)", tt.args.img, tt.args.key)) {
|
||||
// return
|
||||
// }
|
||||
// assert.Equalf(t, tt.want, got, "verify(%v, %v)", tt.args.img, tt.args.key)
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
13
core/pkg/opaprocessor/normalize_image_name.go
Normal file
13
core/pkg/opaprocessor/normalize_image_name.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"github.com/docker/distribution/reference"
|
||||
)
|
||||
|
||||
func normalize_image_name(img string) (string, error) {
|
||||
name, err := reference.ParseNormalizedNamed(img)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name.String(), nil
|
||||
}
|
||||
28
core/pkg/opaprocessor/normalize_image_name_test.go
Normal file
28
core/pkg/opaprocessor/normalize_image_name_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_normalize_name(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
img string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "Normalize image name",
|
||||
img: "nginx",
|
||||
want: "docker.io/library/nginx",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
name, _ := normalize_image_name(tt.img)
|
||||
assert.Equal(t, tt.want, name, tt.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -3,9 +3,7 @@ package opaprocessor
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
@@ -23,7 +21,6 @@ import (
|
||||
"github.com/open-policy-agent/opa/rego"
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"go.opentelemetry.io/otel"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const ScoreConfigPath = "/resources/config"
|
||||
@@ -58,8 +55,9 @@ func NewOPAProcessor(sessionObj *cautils.OPASessionObj, regoDependenciesData *re
|
||||
}
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) ProcessRulesListener(ctx context.Context, progressListener IJobProgressNotificationClient) error {
|
||||
opap.OPASessionObj.AllPolicies = ConvertFrameworksToPolicies(opap.Policies, cautils.BuildNumber)
|
||||
func (opap *OPAProcessor) ProcessRulesListener(ctx context.Context, progressListener IJobProgressNotificationClient, ScanInfo *cautils.ScanInfo) error {
|
||||
scanningScope := cautils.GetScanningScope(ScanInfo)
|
||||
opap.OPASessionObj.AllPolicies = ConvertFrameworksToPolicies(opap.Policies, cautils.BuildNumber, opap.ExcludedRules, scanningScope)
|
||||
|
||||
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Policies, opap.OPASessionObj.AllPolicies)
|
||||
|
||||
@@ -86,130 +84,56 @@ func (opap *OPAProcessor) Process(ctx context.Context, policies *cautils.Policie
|
||||
opap.loggerStartScanning()
|
||||
defer opap.loggerDoneScanning()
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
if progressListener != nil {
|
||||
progressListener.Start(len(policies.Controls))
|
||||
defer progressListener.Stop()
|
||||
}
|
||||
|
||||
// results to collect from controls being processed in parallel
|
||||
type results struct {
|
||||
resourceAssociatedControl map[string]resourcesresults.ResourceAssociatedControl
|
||||
allResources map[string]workloadinterface.IMetadata
|
||||
}
|
||||
|
||||
resultsChan := make(chan results)
|
||||
controlsGroup, groupCtx := errgroup.WithContext(ctx)
|
||||
controlsGroup.SetLimit(2 * runtime.NumCPU())
|
||||
|
||||
allResources := make(map[string]workloadinterface.IMetadata, max(len(opap.AllResources), heuristicAllocResources))
|
||||
for k, v := range opap.AllResources {
|
||||
allResources[k] = v
|
||||
}
|
||||
|
||||
var resultsCollector sync.WaitGroup
|
||||
resultsCollector.Add(1)
|
||||
go func() {
|
||||
// collects the results from processing all rules for all controls.
|
||||
//
|
||||
// NOTE: since policies.Controls is a map, iterating over it doesn't guarantee any
|
||||
// specific ordering. Therefore, if a conflict is possible on resources, e.g. 2 rules,
|
||||
// referencing the same resource, the eventual result of the merge is not guaranteed to be
|
||||
// stable. This behavior is consistent with the previous (unparallelized) processing.
|
||||
defer resultsCollector.Done()
|
||||
|
||||
for result := range resultsChan {
|
||||
// merge both maps in parallel
|
||||
var merger sync.WaitGroup
|
||||
merger.Add(1)
|
||||
go func() {
|
||||
// merge all resources
|
||||
defer merger.Done()
|
||||
for k, v := range result.allResources {
|
||||
allResources[k] = v
|
||||
}
|
||||
}()
|
||||
|
||||
merger.Add(1)
|
||||
go func() {
|
||||
defer merger.Done()
|
||||
// update resources with latest results
|
||||
for resourceID, controlResult := range result.resourceAssociatedControl {
|
||||
result, found := opap.ResourcesResult[resourceID]
|
||||
if !found {
|
||||
result = resourcesresults.Result{ResourceID: resourceID}
|
||||
}
|
||||
result.AssociatedControls = append(result.AssociatedControls, controlResult)
|
||||
opap.ResourcesResult[resourceID] = result
|
||||
}
|
||||
}()
|
||||
|
||||
merger.Wait()
|
||||
}
|
||||
}()
|
||||
|
||||
// processes rules for all controls in parallel
|
||||
for _, controlToPin := range policies.Controls {
|
||||
for _, toPin := range policies.Controls {
|
||||
if progressListener != nil {
|
||||
progressListener.ProgressJob(1, fmt.Sprintf("Control: %s", controlToPin.ControlID))
|
||||
progressListener.ProgressJob(1, fmt.Sprintf("Control: %s", toPin.ControlID))
|
||||
}
|
||||
|
||||
control := controlToPin
|
||||
control := toPin
|
||||
|
||||
controlsGroup.Go(func() error {
|
||||
resourceAssociatedControl, allResourcesFromControl, err := opap.processControl(groupCtx, &control)
|
||||
if err != nil {
|
||||
logger.L().Ctx(groupCtx).Warning(err.Error())
|
||||
resourcesAssociatedControl, err := opap.processControl(ctx, &control)
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
|
||||
if len(resourcesAssociatedControl) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// update resources with latest results
|
||||
for resourceID, controlResult := range resourcesAssociatedControl {
|
||||
if _, ok := opap.ResourcesResult[resourceID]; !ok {
|
||||
opap.ResourcesResult[resourceID] = resourcesresults.Result{ResourceID: resourceID}
|
||||
}
|
||||
|
||||
select {
|
||||
case resultsChan <- results{
|
||||
resourceAssociatedControl: resourceAssociatedControl,
|
||||
allResources: allResourcesFromControl,
|
||||
}:
|
||||
case <-groupCtx.Done(): // interrupted (NOTE: at this moment, this never happens since errors are muted)
|
||||
return groupCtx.Err()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
t := opap.ResourcesResult[resourceID]
|
||||
t.AssociatedControls = append(t.AssociatedControls, controlResult)
|
||||
opap.ResourcesResult[resourceID] = t
|
||||
}
|
||||
}
|
||||
|
||||
// wait for all results from all rules to be collected
|
||||
err := controlsGroup.Wait()
|
||||
close(resultsChan)
|
||||
resultsCollector.Wait()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// merge the final result in resources
|
||||
for k, v := range allResources {
|
||||
opap.AllResources[k] = v
|
||||
}
|
||||
opap.Report.ReportGenerationTime = time.Now().UTC()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) loggerStartScanning() {
|
||||
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
|
||||
if reporthandlingv2.Cluster == targetScan {
|
||||
logger.L().Info("Scanning", helpers.String(targetScan.String(), cautils.ClusterName))
|
||||
logger.L().Start("Scanning", helpers.String(targetScan.String(), cautils.ClusterName))
|
||||
} else {
|
||||
logger.L().Info("Scanning " + targetScan.String())
|
||||
logger.L().Start("Scanning " + targetScan.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) loggerDoneScanning() {
|
||||
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
|
||||
if reporthandlingv2.Cluster == targetScan {
|
||||
logger.L().Success("Done scanning", helpers.String(targetScan.String(), cautils.ClusterName))
|
||||
logger.L().StopSuccess("Done scanning", helpers.String(targetScan.String(), cautils.ClusterName))
|
||||
} else {
|
||||
logger.L().Success("Done scanning " + targetScan.String())
|
||||
logger.L().StopSuccess("Done scanning " + targetScan.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -217,22 +141,16 @@ func (opap *OPAProcessor) loggerDoneScanning() {
|
||||
//
|
||||
// NOTE: the call to processControl no longer mutates the state of the current OPAProcessor instance,
|
||||
// but returns a map instead, to be merged by the caller.
|
||||
func (opap *OPAProcessor) processControl(ctx context.Context, control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, map[string]workloadinterface.IMetadata, error) {
|
||||
resourcesAssociatedControl := make(map[string]resourcesresults.ResourceAssociatedControl, heuristicAllocControls)
|
||||
allResources := make(map[string]workloadinterface.IMetadata, heuristicAllocResources)
|
||||
func (opap *OPAProcessor) processControl(ctx context.Context, control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, error) {
|
||||
resourcesAssociatedControl := make(map[string]resourcesresults.ResourceAssociatedControl)
|
||||
|
||||
for i := range control.Rules {
|
||||
resourceAssociatedRule, allResourcesFromRule, err := opap.processRule(ctx, &control.Rules[i], control.FixedInput)
|
||||
resourceAssociatedRule, err := opap.processRule(ctx, &control.Rules[i], control.FixedInput)
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// merge all resources for all processed rules in this control
|
||||
for k, v := range allResourcesFromRule {
|
||||
allResources[k] = v
|
||||
}
|
||||
|
||||
// append failed rules to controls
|
||||
for resourceID, ruleResponse := range resourceAssociatedRule {
|
||||
var controlResult resourcesresults.ResourceAssociatedControl
|
||||
@@ -254,85 +172,101 @@ func (opap *OPAProcessor) processControl(ctx context.Context, control *reporthan
|
||||
}
|
||||
}
|
||||
|
||||
return resourcesAssociatedControl, allResources, nil
|
||||
return resourcesAssociatedControl, nil
|
||||
}
|
||||
|
||||
// processRule processes a single policy rule, with some extra fixed control inputs.
|
||||
//
|
||||
// NOTE: processRule no longer mutates the state of the current OPAProcessor instance,
|
||||
// and returns a map instead, to be merged by the caller.
|
||||
func (opap *OPAProcessor) processRule(ctx context.Context, rule *reporthandling.PolicyRule, fixedControlInputs map[string][]string) (map[string]*resourcesresults.ResourceAssociatedRule, map[string]workloadinterface.IMetadata, error) {
|
||||
func (opap *OPAProcessor) processRule(ctx context.Context, rule *reporthandling.PolicyRule, fixedControlInputs map[string][]string) (map[string]*resourcesresults.ResourceAssociatedRule, error) {
|
||||
resources := make(map[string]*resourcesresults.ResourceAssociatedRule)
|
||||
|
||||
ruleRegoDependenciesData := opap.makeRegoDeps(rule.ConfigInputs, fixedControlInputs)
|
||||
|
||||
inputResources, err := reporthandling.RegoResourcesAggregator(
|
||||
rule,
|
||||
getAllSupportedObjects(opap.K8SResources, opap.ArmoResource, opap.AllResources, rule), // NOTE: this uses the initial snapshot of AllResources
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error getting aggregated k8sObjects: %w", err)
|
||||
}
|
||||
|
||||
if len(inputResources) == 0 {
|
||||
return nil, nil, nil // no resources found for testing
|
||||
}
|
||||
|
||||
inputRawResources := workloadinterface.ListMetaToMap(inputResources)
|
||||
|
||||
// the failed resources are a subgroup of the enumeratedData, so we store the enumeratedData like it was the input data
|
||||
enumeratedData, err := opap.enumerateData(ctx, rule, inputRawResources)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
inputResources = objectsenvelopes.ListMapToMeta(enumeratedData)
|
||||
resources := make(map[string]*resourcesresults.ResourceAssociatedRule, len(inputResources))
|
||||
allResources := make(map[string]workloadinterface.IMetadata, len(inputResources))
|
||||
|
||||
for i, inputResource := range inputResources {
|
||||
resources[inputResource.GetID()] = &resourcesresults.ResourceAssociatedRule{
|
||||
Name: rule.Name,
|
||||
ControlConfigurations: ruleRegoDependenciesData.PostureControlInputs,
|
||||
Status: apis.StatusPassed,
|
||||
resourcesPerNS := getAllSupportedObjects(opap.K8SResources, opap.ExternalResources, opap.AllResources, rule)
|
||||
for i := range resourcesPerNS {
|
||||
resourceToScan := resourcesPerNS[i]
|
||||
if _, ok := resourcesPerNS[clusterScope]; ok && i != clusterScope {
|
||||
resourceToScan = append(resourceToScan, resourcesPerNS[clusterScope]...)
|
||||
}
|
||||
inputResources, err := reporthandling.RegoResourcesAggregator(
|
||||
rule,
|
||||
resourceToScan, // NOTE: this uses the initial snapshot of AllResources
|
||||
)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
allResources[inputResource.GetID()] = inputResources[i]
|
||||
}
|
||||
|
||||
ruleResponses, err := opap.runOPAOnSingleRule(ctx, rule, inputRawResources, ruleData, ruleRegoDependenciesData)
|
||||
if err != nil {
|
||||
return resources, allResources, err
|
||||
}
|
||||
if len(inputResources) == 0 {
|
||||
continue // no resources found for testing
|
||||
}
|
||||
|
||||
// ruleResponse to ruleResult
|
||||
for _, ruleResponse := range ruleResponses {
|
||||
failedResources := objectsenvelopes.ListMapToMeta(ruleResponse.GetFailedResources())
|
||||
for _, failedResource := range failedResources {
|
||||
var ruleResult *resourcesresults.ResourceAssociatedRule
|
||||
if r, found := resources[failedResource.GetID()]; found {
|
||||
ruleResult = r
|
||||
} else {
|
||||
ruleResult = &resourcesresults.ResourceAssociatedRule{
|
||||
Paths: make([]armotypes.PosturePaths, 0, len(ruleResponse.FailedPaths)+len(ruleResponse.FixPaths)+1),
|
||||
inputRawResources := workloadinterface.ListMetaToMap(inputResources)
|
||||
|
||||
// the failed resources are a subgroup of the enumeratedData, so we store the enumeratedData like it was the input data
|
||||
enumeratedData, err := opap.enumerateData(ctx, rule, inputRawResources)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
inputResources = objectsenvelopes.ListMapToMeta(enumeratedData)
|
||||
|
||||
for i, inputResource := range inputResources {
|
||||
resources[inputResource.GetID()] = &resourcesresults.ResourceAssociatedRule{
|
||||
Name: rule.Name,
|
||||
ControlConfigurations: ruleRegoDependenciesData.PostureControlInputs,
|
||||
Status: apis.StatusPassed,
|
||||
}
|
||||
opap.AllResources[inputResource.GetID()] = inputResources[i]
|
||||
}
|
||||
|
||||
ruleResponses, err := opap.runOPAOnSingleRule(ctx, rule, inputRawResources, ruleData, ruleRegoDependenciesData)
|
||||
if err != nil {
|
||||
continue
|
||||
// return resources, allResources, err
|
||||
}
|
||||
|
||||
// ruleResponse to ruleResult
|
||||
for _, ruleResponse := range ruleResponses {
|
||||
failedResources := objectsenvelopes.ListMapToMeta(ruleResponse.GetFailedResources())
|
||||
for _, failedResource := range failedResources {
|
||||
var ruleResult *resourcesresults.ResourceAssociatedRule
|
||||
if r, found := resources[failedResource.GetID()]; found {
|
||||
ruleResult = r
|
||||
} else {
|
||||
ruleResult = &resourcesresults.ResourceAssociatedRule{
|
||||
Paths: make([]armotypes.PosturePaths, 0, len(ruleResponse.FailedPaths)+len(ruleResponse.FixPaths)+1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ruleResult.SetStatus(apis.StatusFailed, nil)
|
||||
for _, failedPath := range ruleResponse.FailedPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FailedPath: failedPath})
|
||||
}
|
||||
ruleResult.SetStatus(apis.StatusFailed, nil)
|
||||
for _, failedPath := range ruleResponse.FailedPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FailedPath: failedPath})
|
||||
}
|
||||
|
||||
for _, fixPath := range ruleResponse.FixPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixPath: fixPath})
|
||||
}
|
||||
for _, fixPath := range ruleResponse.FixPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixPath: fixPath})
|
||||
}
|
||||
|
||||
if ruleResponse.FixCommand != "" {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixCommand: ruleResponse.FixCommand})
|
||||
}
|
||||
if ruleResponse.FixCommand != "" {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixCommand: ruleResponse.FixCommand})
|
||||
}
|
||||
// if ruleResponse has relatedObjects, add it to ruleResult
|
||||
if len(ruleResponse.RelatedObjects) > 0 {
|
||||
for _, relatedObject := range ruleResponse.RelatedObjects {
|
||||
wl := objectsenvelopes.NewObject(relatedObject.Object)
|
||||
if wl != nil {
|
||||
ruleResult.RelatedResourcesIDs = append(ruleResult.RelatedResourcesIDs, wl.GetID())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resources[failedResource.GetID()] = ruleResult
|
||||
resources[failedResource.GetID()] = ruleResult
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resources, allResources, nil
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) runOPAOnSingleRule(ctx context.Context, rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, ruleRegoDependenciesData resources.RegoDependenciesData) ([]reporthandling.RuleResponse, error) {
|
||||
@@ -355,6 +289,7 @@ func (opap *OPAProcessor) runRegoOnK8s(ctx context.Context, rule *reporthandling
|
||||
// register signature verification methods for the OPA ast engine (since these are package level symbols, we do it only once)
|
||||
rego.RegisterBuiltin2(cosignVerifySignatureDeclaration, cosignVerifySignatureDefinition)
|
||||
rego.RegisterBuiltin1(cosignHasSignatureDeclaration, cosignHasSignatureDefinition)
|
||||
rego.RegisterBuiltin1(imageNameNormalizeDeclaration, imageNameNormalizeDefinition)
|
||||
})
|
||||
|
||||
modules[rule.Name] = getRuleData(rule)
|
||||
|
||||
@@ -1,23 +1,176 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/mocks"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
"github.com/kubescape/opa-utils/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
)
|
||||
|
||||
func NewOPAProcessorMock() *OPAProcessor {
|
||||
return &OPAProcessor{}
|
||||
var (
|
||||
//go:embed testdata/opaSessionObjMock.json
|
||||
opaSessionObjMockData string
|
||||
//go:embed testdata/opaSessionObjMock1.json
|
||||
opaSessionObjMockData1 string
|
||||
//go:embed testdata/regoDependenciesDataMock.json
|
||||
regoDependenciesData string
|
||||
|
||||
allResourcesMockData []byte
|
||||
//go:embed testdata/resourcesMock1.json
|
||||
resourcesMock1 []byte
|
||||
)
|
||||
|
||||
func unzipAllResourcesTestDataAndSetVar(zipFilePath, destFilePath string) error {
|
||||
archive, err := zip.OpenReader(zipFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
os.RemoveAll(destFilePath)
|
||||
|
||||
f := archive.File[0]
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstFile, err := os.OpenFile(destFilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileInArchive, err := f.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(dstFile, fileInArchive) //nolint:gosec
|
||||
|
||||
dstFile.Close()
|
||||
fileInArchive.Close()
|
||||
archive.Close()
|
||||
|
||||
file, err := os.Open(destFilePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
allResourcesMockData, err = io.ReadAll(file)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
file.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewOPAProcessorMock(opaSessionObjMock string, resourcesMock []byte) *OPAProcessor {
|
||||
opap := &OPAProcessor{}
|
||||
if err := json.Unmarshal([]byte(regoDependenciesData), &opap.regoDependenciesData); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// no err check because Unmarshal will fail on AllResources field (expected)
|
||||
json.Unmarshal([]byte(opaSessionObjMock), &opap.OPASessionObj)
|
||||
opap.AllResources = make(map[string]workloadinterface.IMetadata)
|
||||
|
||||
allResources := make(map[string]map[string]interface{})
|
||||
if err := json.Unmarshal(resourcesMock, &allResources); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for i := range allResources {
|
||||
opap.AllResources[i] = workloadinterface.NewWorkloadObj(allResources[i])
|
||||
}
|
||||
|
||||
return opap
|
||||
}
|
||||
|
||||
func monitorHeapSpace(maxHeap *uint64, quitChan chan bool) {
|
||||
for {
|
||||
select {
|
||||
case <-quitChan:
|
||||
return
|
||||
default:
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
heapSpace := m.HeapAlloc
|
||||
if heapSpace > *maxHeap {
|
||||
*maxHeap = heapSpace
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
goarch: arm64
|
||||
pkg: github.com/kubescape/kubescape/v2/core/pkg/opaprocessor
|
||||
|
||||
BenchmarkProcess/opaprocessor.Process_1-8 1 29714096083 ns/op 22309913416 B/op 498183685 allocs/op
|
||||
--- BENCH: BenchmarkProcess/opaprocessor.Process_1-8
|
||||
|
||||
processorhandler_test.go:178: opaprocessor.Process_1_max_heap_space_gb: 2.85
|
||||
processorhandler_test.go:179: opaprocessor.Process_1_execution_time_sec: 29.714054
|
||||
|
||||
BenchmarkProcess/opaprocessor.Process_4-8 1 25574892875 ns/op 22037035032 B/op 498167263 allocs/op
|
||||
--- BENCH: BenchmarkProcess/opaprocessor.Process_4-8
|
||||
|
||||
processorhandler_test.go:178: opaprocessor.Process_4_max_heap_space_gb: 6.76
|
||||
processorhandler_test.go:179: opaprocessor.Process_4_execution_time_sec: 25.574884
|
||||
|
||||
BenchmarkProcess/opaprocessor.Process_8-8 1 16534461291 ns/op 22308814384 B/op 498167171 allocs/op
|
||||
--- BENCH: BenchmarkProcess/opaprocessor.Process_8-8
|
||||
|
||||
processorhandler_test.go:178: opaprocessor.Process_8_max_heap_space_gb: 9.47
|
||||
processorhandler_test.go:179: opaprocessor.Process_8_execution_time_sec: 16.534455
|
||||
|
||||
BenchmarkProcess/opaprocessor.Process_16-8 1 18924050500 ns/op 22179562272 B/op 498167367 allocs/op
|
||||
--- BENCH: BenchmarkProcess/opaprocessor.Process_16-8
|
||||
|
||||
processorhandler_test.go:178: opaprocessor.Process_16_max_heap_space_gb: 11.69
|
||||
processorhandler_test.go:179: opaprocessor.Process_16_execution_time_sec: 16.321493
|
||||
*/
|
||||
func BenchmarkProcess(b *testing.B) {
|
||||
b.SetParallelism(1)
|
||||
|
||||
// since all resources JSON is a large file, we need to unzip it and set the variable before running the benchmark
|
||||
unzipAllResourcesTestDataAndSetVar("testdata/allResourcesMock.json.zip", "testdata/allResourcesMock.json")
|
||||
|
||||
maxGoRoutinesArr := []int{1, 4, 8, 16}
|
||||
for _, maxGoRoutines := range maxGoRoutinesArr {
|
||||
testName := fmt.Sprintf("opaprocessor.Process_%d", maxGoRoutines)
|
||||
b.Run(testName, func(b *testing.B) {
|
||||
// setup
|
||||
opap := NewOPAProcessorMock(opaSessionObjMockData, allResourcesMockData)
|
||||
b.ResetTimer()
|
||||
var maxHeap uint64
|
||||
quitChan := make(chan bool)
|
||||
go monitorHeapSpace(&maxHeap, quitChan)
|
||||
|
||||
// test
|
||||
opap.Process(context.Background(), opap.OPASessionObj.AllPolicies, nil)
|
||||
|
||||
// teardown
|
||||
quitChan <- true
|
||||
b.Log(fmt.Sprintf("%s_max_heap_space_gb: %.2f", testName, float64(maxHeap)/(1024*1024*1024)))
|
||||
b.Log(fmt.Sprintf("%s_execution_time_sec: %f", testName, b.Elapsed().Seconds()))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessResourcesResult(t *testing.T) {
|
||||
|
||||
// set k8s
|
||||
@@ -32,10 +185,11 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
opaSessionObj := cautils.NewOPASessionObjMock()
|
||||
opaSessionObj.Policies = frameworks
|
||||
|
||||
policies := ConvertFrameworksToPolicies(opaSessionObj.Policies, "")
|
||||
scanningScope := cautils.GetScanningScope(&cautils.ScanInfo{InputPatterns: []string{""}})
|
||||
policies := ConvertFrameworksToPolicies(opaSessionObj.Policies, "", nil, scanningScope)
|
||||
ConvertFrameworksToSummaryDetails(&opaSessionObj.Report.SummaryDetails, opaSessionObj.Policies, policies)
|
||||
|
||||
opaSessionObj.K8SResources = &k8sResources
|
||||
opaSessionObj.K8SResources = k8sResources
|
||||
opaSessionObj.AllResources[deployment.GetID()] = deployment
|
||||
|
||||
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock())
|
||||
@@ -44,18 +198,18 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
|
||||
assert.Equal(t, 1, len(opaSessionObj.ResourcesResult))
|
||||
res := opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).Len())
|
||||
assert.Equal(t, 1, res.ListControlsIDs(nil).Failed())
|
||||
assert.Equal(t, 1, res.ListControlsIDs(nil).Passed())
|
||||
assert.True(t, res.GetStatus(nil).IsFailed())
|
||||
assert.False(t, res.GetStatus(nil).IsPassed())
|
||||
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
|
||||
|
||||
opap.updateResults(context.TODO())
|
||||
res = opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).Len())
|
||||
assert.Equal(t, 1, res.ListControlsIDs(nil).Failed())
|
||||
assert.Equal(t, 1, res.ListControlsIDs(nil).Passed())
|
||||
assert.True(t, res.GetStatus(nil).IsFailed())
|
||||
assert.False(t, res.GetStatus(nil).IsPassed())
|
||||
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
|
||||
@@ -68,32 +222,114 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
assert.Equal(t, 0, summaryDetails.NumberOfResources().Skipped())
|
||||
|
||||
// test resource listing
|
||||
assert.Equal(t, 1, summaryDetails.ListResourcesIDs().All().Len())
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Skipped()))
|
||||
assert.Equal(t, 1, summaryDetails.ListResourcesIDs(nil).Len())
|
||||
assert.Equal(t, 1, summaryDetails.ListResourcesIDs(nil).Failed())
|
||||
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Passed())
|
||||
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Skipped())
|
||||
|
||||
// test control listing
|
||||
assert.Equal(t, res.ListControlsIDs(nil).All().Len(), summaryDetails.NumberOfControls().All())
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Passed()), summaryDetails.NumberOfControls().Passed())
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Skipped()), summaryDetails.NumberOfControls().Skipped())
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Failed()), summaryDetails.NumberOfControls().Failed())
|
||||
assert.Equal(t, res.ListControlsIDs(nil).Len(), summaryDetails.NumberOfControls().All())
|
||||
assert.Equal(t, res.ListControlsIDs(nil).Passed(), summaryDetails.NumberOfControls().Passed())
|
||||
assert.Equal(t, res.ListControlsIDs(nil).Skipped(), summaryDetails.NumberOfControls().Skipped())
|
||||
assert.Equal(t, res.ListControlsIDs(nil).Failed(), summaryDetails.NumberOfControls().Failed())
|
||||
assert.True(t, summaryDetails.GetStatus().IsFailed())
|
||||
|
||||
opaSessionObj.Exceptions = []armotypes.PostureExceptionPolicy{*mocks.MockExceptionAllKinds(&armotypes.PosturePolicy{FrameworkName: frameworks[0].Name})}
|
||||
opap.updateResults(context.TODO())
|
||||
|
||||
res = opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).Len())
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).Passed())
|
||||
assert.True(t, res.GetStatus(nil).IsPassed())
|
||||
assert.False(t, res.GetStatus(nil).IsFailed())
|
||||
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
|
||||
|
||||
// test resource listing
|
||||
summaryDetails = opaSessionObj.Report.SummaryDetails
|
||||
assert.Equal(t, 1, summaryDetails.ListResourcesIDs().All().Len())
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Skipped()))
|
||||
assert.Equal(t, 1, summaryDetails.ListResourcesIDs(nil).Len())
|
||||
assert.Equal(t, 1, summaryDetails.ListResourcesIDs(nil).Failed())
|
||||
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Passed())
|
||||
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Skipped())
|
||||
}
|
||||
|
||||
// don't parallelize this test because it uses a global variable - allResourcesMockData
|
||||
func TestProcessRule(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
rule reporthandling.PolicyRule
|
||||
resourcesMock []byte
|
||||
opaSessionObjMock string
|
||||
expectedResult map[string]*resourcesresults.ResourceAssociatedRule
|
||||
}{
|
||||
{
|
||||
name: "TestRelatedResourcesIDs",
|
||||
rule: reporthandling.PolicyRule{
|
||||
PortalBase: armotypes.PortalBase{
|
||||
Name: "exposure-to-internet",
|
||||
Attributes: map[string]interface{}{
|
||||
"armoBuiltin": true,
|
||||
},
|
||||
},
|
||||
Rule: "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": ingress,\n \"failedPaths\": result,\n }]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"ingress.spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n",
|
||||
Match: []reporthandling.RuleMatchObjects{
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"Pod", "Service"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"apps"},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"batch"},
|
||||
APIVersions: []string{"*"},
|
||||
Resources: []string{"Job", "CronJob"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"extensions", "networking.k8s.io"},
|
||||
APIVersions: []string{"v1beta1", "v1"},
|
||||
Resources: []string{"Ingress"},
|
||||
},
|
||||
},
|
||||
Description: "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.",
|
||||
Remediation: "",
|
||||
RuleQuery: "armo_builtins",
|
||||
RuleLanguage: reporthandling.RegoLanguage,
|
||||
},
|
||||
resourcesMock: resourcesMock1,
|
||||
opaSessionObjMock: opaSessionObjMockData1,
|
||||
expectedResult: map[string]*resourcesresults.ResourceAssociatedRule{
|
||||
"/v1/default/Pod/fake-pod-1-22gck": {
|
||||
Name: "exposure-to-internet",
|
||||
ControlConfigurations: map[string][]string{},
|
||||
Status: "failed",
|
||||
SubStatus: "",
|
||||
Paths: nil,
|
||||
Exception: nil,
|
||||
RelatedResourcesIDs: []string{
|
||||
"/v1/default/Service/fake-service-1",
|
||||
},
|
||||
},
|
||||
"/v1/default/Service/fake-service-1": {
|
||||
Name: "exposure-to-internet",
|
||||
ControlConfigurations: map[string][]string{},
|
||||
Status: "passed",
|
||||
SubStatus: "",
|
||||
Paths: nil,
|
||||
Exception: nil,
|
||||
RelatedResourcesIDs: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
// since all resources JSON is a large file, we need to unzip it and set the variable before running the benchmark
|
||||
unzipAllResourcesTestDataAndSetVar("testdata/allResourcesMock.json.zip", "testdata/allResourcesMock.json")
|
||||
opap := NewOPAProcessorMock(tc.opaSessionObjMock, tc.resourcesMock)
|
||||
resources, err := opap.processRule(context.Background(), &tc.rule, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, resources)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,10 @@ import (
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
const clusterScope = "clusterScope"
|
||||
|
||||
var largeClusterSize int = -1
|
||||
|
||||
// updateResults updates the results objects and report objects. This is a critical function - DO NOT CHANGE
|
||||
//
|
||||
// The function:
|
||||
@@ -25,6 +29,10 @@ import (
|
||||
func (opap *OPAProcessor) updateResults(ctx context.Context) {
|
||||
_, span := otel.Tracer("").Start(ctx, "OPAProcessor.updateResults")
|
||||
defer span.End()
|
||||
defer logger.L().Ctx(ctx).Success("Done aggregating results")
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
// remove data from all objects
|
||||
for i := range opap.AllResources {
|
||||
@@ -87,14 +95,21 @@ func isEmptyResources(counters reportsummary.ICounters) bool {
|
||||
return counters.Failed() == 0 && counters.Skipped() == 0 && counters.Passed() == 0
|
||||
}
|
||||
|
||||
func getAllSupportedObjects(k8sResources *cautils.K8SResources, ksResources *cautils.KSResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.Match)...)
|
||||
k8sObjects = append(k8sObjects, getKSObjects(ksResources, allResources, rule.DynamicMatch)...)
|
||||
func getAllSupportedObjects(k8sResources cautils.K8SResources, externalResources cautils.ExternalResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) map[string][]workloadinterface.IMetadata {
|
||||
k8sObjects := getKubernetesObjects(k8sResources, allResources, rule.Match)
|
||||
externalObjs := getKubenetesObjectsFromExternalResources(externalResources, allResources, rule.DynamicMatch)
|
||||
if len(externalObjs) > 0 {
|
||||
l, ok := k8sObjects[clusterScope]
|
||||
if !ok {
|
||||
l = []workloadinterface.IMetadata{}
|
||||
}
|
||||
l = append(l, externalObjs...)
|
||||
k8sObjects[clusterScope] = l
|
||||
}
|
||||
return k8sObjects
|
||||
}
|
||||
|
||||
func getKSObjects(k8sResources *cautils.KSResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
func getKubenetesObjectsFromExternalResources(externalResources cautils.ExternalResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
|
||||
for m := range match {
|
||||
@@ -103,7 +118,7 @@ func getKSObjects(k8sResources *cautils.KSResources, allResources map[string]wor
|
||||
for _, resource := range match[m].Resources {
|
||||
groupResources := k8sinterface.ResourceGroupToString(groups, version, resource)
|
||||
for _, groupResource := range groupResources {
|
||||
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
|
||||
if k8sObj, ok := externalResources[groupResource]; ok {
|
||||
for i := range k8sObj {
|
||||
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
|
||||
}
|
||||
@@ -114,11 +129,11 @@ func getKSObjects(k8sResources *cautils.KSResources, allResources map[string]wor
|
||||
}
|
||||
}
|
||||
|
||||
return filterOutChildResources(k8sObjects, match)
|
||||
return k8sObjects
|
||||
}
|
||||
|
||||
func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
func getKubernetesObjects(k8sResources cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) map[string][]workloadinterface.IMetadata {
|
||||
k8sObjects := map[string][]workloadinterface.IMetadata{}
|
||||
|
||||
for m := range match {
|
||||
for _, groups := range match[m].APIGroups {
|
||||
@@ -126,14 +141,18 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[s
|
||||
for _, resource := range match[m].Resources {
|
||||
groupResources := k8sinterface.ResourceGroupToString(groups, version, resource)
|
||||
for _, groupResource := range groupResources {
|
||||
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
|
||||
/*
|
||||
if k8sObj == nil {
|
||||
// logger.L().Debug("skipping", helpers.String("resource", groupResource))
|
||||
if k8sObj, ok := k8sResources[groupResource]; ok {
|
||||
for i := range k8sObj {
|
||||
|
||||
obj := allResources[k8sObj[i]]
|
||||
ns := getNamespaceName(obj, len(allResources))
|
||||
|
||||
l, ok := k8sObjects[ns]
|
||||
if !ok {
|
||||
l = []workloadinterface.IMetadata{}
|
||||
}
|
||||
*/
|
||||
for i := range k8sObj {
|
||||
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
|
||||
l = append(l, obj)
|
||||
k8sObjects[ns] = l
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -142,34 +161,9 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[s
|
||||
}
|
||||
}
|
||||
|
||||
return filterOutChildResources(k8sObjects, match)
|
||||
return k8sObjects
|
||||
// return filterOutChildResources(k8sObjects, match)
|
||||
}
|
||||
|
||||
// filterOutChildResources filter out child resources if the parent resource is in the list
|
||||
func filterOutChildResources(objects []workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
response := []workloadinterface.IMetadata{}
|
||||
owners := []string{}
|
||||
for m := range match {
|
||||
owners = append(owners, match[m].Resources...)
|
||||
}
|
||||
|
||||
for i := range objects {
|
||||
if !k8sinterface.IsTypeWorkload(objects[i].GetObject()) {
|
||||
response = append(response, objects[i])
|
||||
continue
|
||||
}
|
||||
w := workloadinterface.NewWorkloadObj(objects[i].GetObject())
|
||||
ownerReferences, err := w.GetOwnerReferences()
|
||||
if err != nil || len(ownerReferences) == 0 {
|
||||
response = append(response, w)
|
||||
} else if !k8sinterface.IsStringInSlice(owners, ownerReferences[0].Kind) {
|
||||
response = append(response, w)
|
||||
}
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
func getRuleDependencies(ctx context.Context) (map[string]string, error) {
|
||||
modules := resources.LoadRegoModules()
|
||||
if len(modules) == 0 {
|
||||
@@ -240,3 +234,30 @@ func ruleData(rule *reporthandling.PolicyRule) string {
|
||||
func ruleEnumeratorData(rule *reporthandling.PolicyRule) string {
|
||||
return rule.ResourceEnumerator
|
||||
}
|
||||
|
||||
func getNamespaceName(obj workloadinterface.IMetadata, clusterSize int) string {
|
||||
|
||||
if !isLargeCluster(clusterSize) {
|
||||
return clusterScope
|
||||
}
|
||||
|
||||
// if the resource is in namespace scope, get the namespace
|
||||
if k8sinterface.IsResourceInNamespaceScope(obj.GetKind()) {
|
||||
return obj.GetNamespace()
|
||||
}
|
||||
if obj.GetKind() == "Namespace" {
|
||||
return obj.GetName()
|
||||
}
|
||||
|
||||
return clusterScope
|
||||
}
|
||||
|
||||
// isLargeCluster returns true if the cluster size is larger than the largeClusterSize
|
||||
// This code is a workaround for large clusters. The final solution will be to scan resources individually
|
||||
func isLargeCluster(clusterSize int) bool {
|
||||
if largeClusterSize < 0 {
|
||||
// initialize large cluster size
|
||||
largeClusterSize, _ = cautils.ParseIntEnvVar("LARGE_CLUSTER_SIZE", 2500)
|
||||
}
|
||||
return clusterSize > largeClusterSize
|
||||
}
|
||||
|
||||
BIN
core/pkg/opaprocessor/testdata/allResourcesMock.json.zip
vendored
Normal file
BIN
core/pkg/opaprocessor/testdata/allResourcesMock.json.zip
vendored
Normal file
Binary file not shown.
1
core/pkg/opaprocessor/testdata/opaSessionObjMock.json
vendored
Normal file
1
core/pkg/opaprocessor/testdata/opaSessionObjMock.json
vendored
Normal file
File diff suppressed because one or more lines are too long
40651
core/pkg/opaprocessor/testdata/opaSessionObjMock1.json
vendored
Normal file
40651
core/pkg/opaprocessor/testdata/opaSessionObjMock1.json
vendored
Normal file
File diff suppressed because one or more lines are too long
1
core/pkg/opaprocessor/testdata/regoDependenciesDataMock.json
vendored
Normal file
1
core/pkg/opaprocessor/testdata/regoDependenciesDataMock.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"clusterName":"kwok-kwok-cluster","postureControlInputs":{"cpu_limit_max":[],"cpu_limit_min":[],"cpu_request_max":[],"cpu_request_min":[],"imageRepositoryAllowList":[],"insecureCapabilities":["SETPCAP","NET_ADMIN","NET_RAW","SYS_MODULE","SYS_RAWIO","SYS_PTRACE","SYS_ADMIN","SYS_BOOT","MAC_OVERRIDE","MAC_ADMIN","PERFMON","ALL","BPF"],"k8sRecommendedLabels":["app.kubernetes.io/name","app.kubernetes.io/instance","app.kubernetes.io/version","app.kubernetes.io/component","app.kubernetes.io/part-of","app.kubernetes.io/managed-by","app.kubernetes.io/created-by"],"listOfDangerousArtifacts":["bin/bash","sbin/sh","bin/ksh","bin/tcsh","bin/zsh","usr/bin/scsh","bin/csh","bin/busybox","usr/bin/busybox"],"max_critical_vulnerabilities":["5"],"max_high_vulnerabilities":["10"],"memory_limit_max":[],"memory_limit_min":[],"memory_request_max":[],"memory_request_min":[],"publicRegistries":[],"recommendedLabels":["app","tier","phase","version","owner","env"],"sensitiveInterfaces":["nifi","argo-server","weave-scope-app","kubeflow","kubernetes-dashboard","jenkins","prometheus-deployment"],"sensitiveKeyNames":["aws_access_key_id","aws_secret_access_key","azure_batchai_storage_account","azure_batchai_storage_key","azure_batch_account","azure_batch_key","secret","key","password","pwd","token","jwt","bearer","credential"],"sensitiveValues":["BEGIN \\w+ PRIVATE KEY","PRIVATE KEY","eyJhbGciO","JWT","Bearer","_key_","_secret_"],"sensitiveValuesAllowed":[],"servicesNames":["nifi-service","argo-server","minio","postgres","workflow-controller-metrics","weave-scope-app","kubernetes-dashboard"],"trustedCosignPublicKeys":[],"untrustedRegistries":[],"wlKnownNames":["coredns","kube-proxy","event-exporter-gke","kube-dns","17-default-backend","metrics-server","ca-audit","ca-dashboard-aggregator","ca-notification-server","ca-ocimage","ca-oracle","ca-posture","ca-rbac","ca-vuln-scan","ca-webhook","ca-websocket","clair-clair"]},"dataControlInputs":null,"k8sconfig":{"token":"","ip":"","host":"https://127.0.0.1:32766","port":"","crtfile":"","clientcrtfile":"/Users/amirmalka/.kwok/clusters/kwok-cluster/pki/admin.crt","clientkeyfile":"/Users/amirmalka/.kwok/clusters/kwok-cluster/pki/admin.key"}}
|
||||
220
core/pkg/opaprocessor/testdata/resourcesMock1.json
vendored
Normal file
220
core/pkg/opaprocessor/testdata/resourcesMock1.json
vendored
Normal file
@@ -0,0 +1,220 @@
|
||||
{
|
||||
"/v1/default/Pod/fake-pod-1-22gck": {
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"name\":\"fake-pod-1-22gck\",\"namespace\":\"default\"},\"spec\":{\"containers\":[{\"image\":\"redis\",\"name\":\"fake-pod-1-22gck\",\"volumeMounts\":[{\"mountPath\":\"/etc/foo\",\"name\":\"foo\",\"readOnly\":true}]}],\"volumes\":[{\"name\":\"foo\",\"secret\":{\"optional\":true,\"secretName\":\"mysecret\"}}]}}\n"
|
||||
},
|
||||
"creationTimestamp": "2023-06-22T07:47:38Z",
|
||||
"name": "fake-pod-1-22gck",
|
||||
"namespace": "default",
|
||||
"resourceVersion": "1087189",
|
||||
"uid": "046753fa-c7b6-46dd-ae18-dd68b8b20cd3",
|
||||
"labels": {"app": "argo-server"}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"image": "redis",
|
||||
"imagePullPolicy": "Always",
|
||||
"name": "fake-pod-1-22gck",
|
||||
"resources": {},
|
||||
"terminationMessagePath": "/dev/termination-log",
|
||||
"terminationMessagePolicy": "File",
|
||||
"volumeMounts": [
|
||||
{
|
||||
"mountPath": "/etc/foo",
|
||||
"name": "foo",
|
||||
"readOnly": true
|
||||
},
|
||||
{
|
||||
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
|
||||
"name": "kube-api-access-lrpxm",
|
||||
"readOnly": true
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"dnsPolicy": "ClusterFirst",
|
||||
"enableServiceLinks": true,
|
||||
"nodeName": "minikube-yiscah",
|
||||
"preemptionPolicy": "PreemptLowerPriority",
|
||||
"priority": 0,
|
||||
"restartPolicy": "Always",
|
||||
"schedulerName": "default-scheduler",
|
||||
"securityContext": {},
|
||||
"serviceAccount": "default",
|
||||
"serviceAccountName": "default",
|
||||
"terminationGracePeriodSeconds": 30,
|
||||
"tolerations": [
|
||||
{
|
||||
"effect": "NoExecute",
|
||||
"key": "node.kubernetes.io/not-ready",
|
||||
"operator": "Exists",
|
||||
"tolerationSeconds": 300
|
||||
},
|
||||
{
|
||||
"effect": "NoExecute",
|
||||
"key": "node.kubernetes.io/unreachable",
|
||||
"operator": "Exists",
|
||||
"tolerationSeconds": 300
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "foo",
|
||||
"secret": {
|
||||
"defaultMode": 420,
|
||||
"optional": true,
|
||||
"secretName": "mysecret"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "kube-api-access-lrpxm",
|
||||
"projected": {
|
||||
"defaultMode": 420,
|
||||
"sources": [
|
||||
{
|
||||
"serviceAccountToken": {
|
||||
"expirationSeconds": 3607,
|
||||
"path": "token"
|
||||
}
|
||||
},
|
||||
{
|
||||
"configMap": {
|
||||
"items": [
|
||||
{
|
||||
"key": "ca.crt",
|
||||
"path": "ca.crt"
|
||||
}
|
||||
],
|
||||
"name": "kube-root-ca.crt"
|
||||
}
|
||||
},
|
||||
{
|
||||
"downwardAPI": {
|
||||
"items": [
|
||||
{
|
||||
"fieldRef": {
|
||||
"apiVersion": "v1",
|
||||
"fieldPath": "metadata.namespace"
|
||||
},
|
||||
"path": "namespace"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"status": {
|
||||
"conditions": [
|
||||
{
|
||||
"lastProbeTime": null,
|
||||
"lastTransitionTime": "2023-06-22T07:47:38Z",
|
||||
"status": "True",
|
||||
"type": "Initialized"
|
||||
},
|
||||
{
|
||||
"lastProbeTime": null,
|
||||
"lastTransitionTime": "2023-07-18T05:07:57Z",
|
||||
"status": "True",
|
||||
"type": "Ready"
|
||||
},
|
||||
{
|
||||
"lastProbeTime": null,
|
||||
"lastTransitionTime": "2023-07-18T05:07:57Z",
|
||||
"status": "True",
|
||||
"type": "ContainersReady"
|
||||
},
|
||||
{
|
||||
"lastProbeTime": null,
|
||||
"lastTransitionTime": "2023-06-22T07:47:38Z",
|
||||
"status": "True",
|
||||
"type": "PodScheduled"
|
||||
}
|
||||
],
|
||||
"containerStatuses": [
|
||||
{
|
||||
"containerID": "docker://a3a1aac00031c6ab85f75cfa17d14ebd71ab15f1fc5c82a262449621a77d7a7e",
|
||||
"image": "redis:latest",
|
||||
"imageID": "docker-pullable://redis@sha256:08a82d4bf8a8b4dd94e8f5408cdbad9dd184c1cf311d34176cd3e9972c43f872",
|
||||
"lastState": {
|
||||
"terminated": {
|
||||
"containerID": "docker://1ae623f4faf8cda5dabdc65c342752dfdf1675cb173b46875596c2eb0dae472f",
|
||||
"exitCode": 255,
|
||||
"finishedAt": "2023-07-18T05:03:55Z",
|
||||
"reason": "Error",
|
||||
"startedAt": "2023-07-17T16:32:35Z"
|
||||
}
|
||||
},
|
||||
"name": "fake-pod-1-22gck",
|
||||
"ready": true,
|
||||
"restartCount": 9,
|
||||
"started": true,
|
||||
"state": {
|
||||
"running": {
|
||||
"startedAt": "2023-07-18T05:07:56Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"hostIP": "192.168.85.2",
|
||||
"phase": "Running",
|
||||
"podIP": "10.244.1.131",
|
||||
"podIPs": [
|
||||
{
|
||||
"ip": "10.244.1.131"
|
||||
}
|
||||
],
|
||||
"qosClass": "BestEffort",
|
||||
"startTime": "2023-06-22T07:47:38Z"
|
||||
}
|
||||
},
|
||||
"/v1/default/Service/fake-service-1": {
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"name\":\"fake-service-1\",\"namespace\":\"default\"},\"spec\":{\"clusterIP\":\"10.96.0.11\",\"ports\":[{\"port\":80,\"protocol\":\"TCP\",\"targetPort\":9376}],\"selector\":{\"app\":\"argo-server\"},\"type\":\"LoadBalancer\"},\"status\":{\"loadBalancer\":{\"ingress\":[{\"ip\":\"192.0.2.127\"}]}}}\n"
|
||||
},
|
||||
"creationTimestamp": "2023-07-09T06:22:27Z",
|
||||
"name": "fake-service-1",
|
||||
"namespace": "default",
|
||||
"resourceVersion": "981856",
|
||||
"uid": "dd629eb1-6779-4298-a70f-0bdbd046d409"
|
||||
},
|
||||
"spec": {
|
||||
"allocateLoadBalancerNodePorts": true,
|
||||
"clusterIP": "10.96.0.11",
|
||||
"clusterIPs": [
|
||||
"10.96.0.11"
|
||||
],
|
||||
"externalTrafficPolicy": "Cluster",
|
||||
"internalTrafficPolicy": "Cluster",
|
||||
"ipFamilies": [
|
||||
"IPv4"
|
||||
],
|
||||
"ipFamilyPolicy": "SingleStack",
|
||||
"ports": [
|
||||
{
|
||||
"nodePort": 30706,
|
||||
"port": 80,
|
||||
"protocol": "TCP",
|
||||
"targetPort": 9376
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"app": "argo-server"
|
||||
},
|
||||
"sessionAffinity": "None",
|
||||
"type": "LoadBalancer"
|
||||
},
|
||||
"status": {
|
||||
"loadBalancer": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
1
core/pkg/opaprocessor/testdata/resourcesResultObjMock.json
vendored
Normal file
1
core/pkg/opaprocessor/testdata/resourcesResultObjMock.json
vendored
Normal file
File diff suppressed because one or more lines are too long
@@ -7,17 +7,19 @@ import (
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/rego"
|
||||
"github.com/open-policy-agent/opa/topdown/builtins"
|
||||
"github.com/open-policy-agent/opa/types"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// ConvertFrameworksToPolicies convert list of frameworks to list of policies
|
||||
func ConvertFrameworksToPolicies(frameworks []reporthandling.Framework, version string) *cautils.Policies {
|
||||
func ConvertFrameworksToPolicies(frameworks []reporthandling.Framework, version string, excludedRules map[string]bool, scanningScope reporthandling.ScanningScopeType) *cautils.Policies {
|
||||
policies := cautils.NewPolicies()
|
||||
policies.Set(frameworks, version)
|
||||
policies.Set(frameworks, version, excludedRules, scanningScope)
|
||||
return policies
|
||||
}
|
||||
|
||||
@@ -37,12 +39,19 @@ func ConvertFrameworksToSummaryDetails(summaryDetails *reportsummary.SummaryDeta
|
||||
ScoreFactor: frameworks[i].Controls[j].BaseScore,
|
||||
Description: frameworks[i].Controls[j].Description,
|
||||
Remediation: frameworks[i].Controls[j].Remediation,
|
||||
Category: frameworks[i].Controls[j].Category,
|
||||
}
|
||||
if frameworks[i].Controls[j].GetActionRequiredAttribute() == string(apis.SubStatusManualReview) {
|
||||
c.Status = apis.StatusSkipped
|
||||
c.StatusInfo.InnerStatus = apis.StatusSkipped
|
||||
c.StatusInfo.SubStatus = apis.SubStatusManualReview
|
||||
c.StatusInfo.InnerInfo = string(apis.SubStatusManualReviewInfo)
|
||||
}
|
||||
controls[frameworks[i].Controls[j].ControlID] = c
|
||||
summaryDetails.Controls[id] = c
|
||||
}
|
||||
}
|
||||
if cautils.StringInSlice(policies.Frameworks, frameworks[i].Name) != cautils.ValueNotFound {
|
||||
if slices.Contains(policies.Frameworks, frameworks[i].Name) {
|
||||
summaryDetails.Frameworks = append(summaryDetails.Frameworks, reportsummary.FrameworkSummary{
|
||||
Name: frameworks[i].Name,
|
||||
Controls: controls,
|
||||
@@ -86,3 +95,17 @@ var cosignHasSignatureDefinition = func(bctx rego.BuiltinContext, a *ast.Term) (
|
||||
}
|
||||
return ast.BooleanTerm(has_signature(string(aStr))), nil
|
||||
}
|
||||
|
||||
var imageNameNormalizeDeclaration = ®o.Function{
|
||||
Name: "image.parse_normalized_name",
|
||||
Decl: types.NewFunction(types.Args(types.S), types.S),
|
||||
Memoize: true,
|
||||
}
|
||||
var imageNameNormalizeDefinition = func(bctx rego.BuiltinContext, a *ast.Term) (*ast.Term, error) {
|
||||
aStr, err := builtins.StringOperand(a.Value, 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid parameter type: %v", err)
|
||||
}
|
||||
normalizedName, err := normalize_image_name(string(aStr))
|
||||
return ast.StringTerm(normalizedName), err
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/mocks"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
@@ -13,18 +14,31 @@ import (
|
||||
func TestConvertFrameworksToPolicies(t *testing.T) {
|
||||
fw0 := mocks.MockFramework_0006_0013()
|
||||
fw1 := mocks.MockFramework_0044()
|
||||
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "")
|
||||
scanningScope := cautils.GetScanningScope(&cautils.ScanInfo{InputPatterns: []string{""}})
|
||||
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "", nil, scanningScope)
|
||||
assert.Equal(t, 2, len(policies.Frameworks))
|
||||
assert.Equal(t, 3, len(policies.Controls))
|
||||
|
||||
// with excluded rules map
|
||||
excludedRulesMap := map[string]bool{
|
||||
"alert-rw-hostpath": true,
|
||||
}
|
||||
fw0 = mocks.MockFramework_0006_0013()
|
||||
fw1 = mocks.MockFramework_0044()
|
||||
policies = ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "", excludedRulesMap, scanningScope)
|
||||
assert.Equal(t, 2, len(policies.Frameworks))
|
||||
assert.Equal(t, 2, len(policies.Controls))
|
||||
|
||||
}
|
||||
func TestInitializeSummaryDetails(t *testing.T) {
|
||||
fw0 := mocks.MockFramework_0006_0013()
|
||||
fw1 := mocks.MockFramework_0044()
|
||||
scanningScope := cautils.GetScanningScope(&cautils.ScanInfo{InputPatterns: []string{""}})
|
||||
|
||||
summaryDetails := reportsummary.SummaryDetails{}
|
||||
frameworks := []reporthandling.Framework{*fw0, *fw1}
|
||||
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "")
|
||||
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "", nil, scanningScope)
|
||||
ConvertFrameworksToSummaryDetails(&summaryDetails, frameworks, policies)
|
||||
assert.Equal(t, 2, len(summaryDetails.Frameworks))
|
||||
assert.Equal(t, 3, len(summaryDetails.Controls))
|
||||
// assert.Equal(t, 3, len(summaryDetails.Controls))
|
||||
}
|
||||
|
||||
73
core/pkg/policyhandler/cache.go
Normal file
73
core/pkg/policyhandler/cache.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package policyhandler
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TimedCache provides functionality for managing a timed cache.
|
||||
// The timed cache holds a value for a specified time duration (TTL).
|
||||
// After the TTL has passed, the value is invalidated.
|
||||
//
|
||||
// The cache is thread safe.
|
||||
type TimedCache[T any] struct {
|
||||
value T
|
||||
isSet bool
|
||||
ttl time.Duration
|
||||
expiration int64
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
func NewTimedCache[T any](ttl time.Duration) *TimedCache[T] {
|
||||
cache := &TimedCache[T]{
|
||||
ttl: ttl,
|
||||
isSet: false,
|
||||
}
|
||||
|
||||
// start the invalidate task only when the ttl is greater than 0 (cache is enabled)
|
||||
if ttl > 0 {
|
||||
go cache.invalidateTask()
|
||||
}
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *TimedCache[T]) Set(value T) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
// cache is disabled
|
||||
if c.ttl == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
c.isSet = true
|
||||
c.value = value
|
||||
c.expiration = time.Now().Add(c.ttl).UnixNano()
|
||||
}
|
||||
|
||||
func (c *TimedCache[T]) Get() (T, bool) {
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
|
||||
if !c.isSet || time.Now().UnixNano() > c.expiration {
|
||||
return c.value, false
|
||||
}
|
||||
return c.value, true
|
||||
}
|
||||
|
||||
func (c *TimedCache[T]) invalidateTask() {
|
||||
for {
|
||||
<-time.After(c.ttl)
|
||||
if time.Now().UnixNano() > c.expiration {
|
||||
c.Invalidate()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *TimedCache[T]) Invalidate() {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
c.isSet = false
|
||||
}
|
||||
75
core/pkg/policyhandler/cache_test.go
Normal file
75
core/pkg/policyhandler/cache_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package policyhandler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestTimedCache(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
// value ttl
|
||||
ttl time.Duration
|
||||
// value to set
|
||||
value int
|
||||
// time to wait before checking if value exists
|
||||
wait time.Duration
|
||||
// number of times to check if value exists (with wait in between)
|
||||
checks int
|
||||
// should the value exist in cache
|
||||
exists bool
|
||||
// expected cache value
|
||||
wantVal int
|
||||
}{
|
||||
{
|
||||
name: "value exists before ttl",
|
||||
ttl: time.Second * 5,
|
||||
value: 42,
|
||||
wait: time.Second * 1,
|
||||
checks: 2,
|
||||
exists: true,
|
||||
wantVal: 42,
|
||||
},
|
||||
{
|
||||
name: "value does not exist after ttl",
|
||||
ttl: time.Second * 3,
|
||||
value: 55,
|
||||
wait: time.Second * 4,
|
||||
checks: 1,
|
||||
exists: false,
|
||||
},
|
||||
{
|
||||
name: "cache is disabled (ttl = 0) always returns false",
|
||||
ttl: 0,
|
||||
value: 55,
|
||||
wait: 0,
|
||||
checks: 1,
|
||||
exists: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cache := NewTimedCache[int](tt.ttl)
|
||||
cache.Set(tt.value)
|
||||
|
||||
for i := 0; i < tt.checks; i++ {
|
||||
// Wait for the specified duration
|
||||
time.Sleep(tt.wait)
|
||||
|
||||
// Get the value from the cache
|
||||
value, exists := cache.Get()
|
||||
|
||||
// Check if value exists
|
||||
if exists != tt.exists {
|
||||
t.Errorf("Expected exists to be %v, got %v", tt.exists, exists)
|
||||
}
|
||||
|
||||
// Check value
|
||||
if exists && value != tt.wantVal {
|
||||
t.Errorf("Expected value to be %d, got %d", tt.wantVal, value)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2,9 +2,11 @@ package policyhandler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
@@ -14,46 +16,130 @@ import (
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
func (policyHandler *PolicyHandler) getPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier, policiesAndResources *cautils.OPASessionObj) error {
|
||||
const (
|
||||
PoliciesCacheTtlEnvVar = "POLICIES_CACHE_TTL"
|
||||
)
|
||||
|
||||
var policyHandlerInstance *PolicyHandler
|
||||
|
||||
// PolicyHandler
|
||||
type PolicyHandler struct {
|
||||
getters *cautils.Getters
|
||||
cachedPolicyIdentifiers *TimedCache[[]string]
|
||||
cachedFrameworks *TimedCache[[]reporthandling.Framework]
|
||||
cachedExceptions *TimedCache[[]armotypes.PostureExceptionPolicy]
|
||||
cachedControlInputs *TimedCache[map[string][]string]
|
||||
}
|
||||
|
||||
// NewPolicyHandler creates and returns an instance of the `PolicyHandler`. The function initializes the `PolicyHandler` only if it hasn't been previously created.
|
||||
// The PolicyHandler supports caching of downloaded policies and exceptions by setting the `POLICIES_CACHE_TTL` environment variable (default is no caching).
|
||||
func NewPolicyHandler() *PolicyHandler {
|
||||
if policyHandlerInstance == nil {
|
||||
cacheTtl := getPoliciesCacheTtl()
|
||||
policyHandlerInstance = &PolicyHandler{
|
||||
cachedPolicyIdentifiers: NewTimedCache[[]string](cacheTtl),
|
||||
cachedFrameworks: NewTimedCache[[]reporthandling.Framework](cacheTtl),
|
||||
cachedExceptions: NewTimedCache[[]armotypes.PostureExceptionPolicy](cacheTtl),
|
||||
cachedControlInputs: NewTimedCache[map[string][]string](cacheTtl),
|
||||
}
|
||||
}
|
||||
return policyHandlerInstance
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) CollectPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
opaSessionObj := cautils.NewOPASessionObj(ctx, nil, nil, scanInfo)
|
||||
|
||||
policyHandler.getters = &scanInfo.Getters
|
||||
|
||||
// get policies, exceptions and controls inputs
|
||||
policies, exceptions, controlInputs, err := policyHandler.getPolicies(ctx, policyIdentifier)
|
||||
if err != nil {
|
||||
return opaSessionObj, err
|
||||
}
|
||||
|
||||
opaSessionObj.Policies = policies
|
||||
opaSessionObj.Exceptions = exceptions
|
||||
opaSessionObj.RegoInputData.PostureControlInputs = controlInputs
|
||||
|
||||
return opaSessionObj, nil
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) getPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) (policies []reporthandling.Framework, exceptions []armotypes.PostureExceptionPolicy, controlInputs map[string][]string, err error) {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "policyHandler.getPolicies")
|
||||
defer span.End()
|
||||
logger.L().Info("Downloading/Loading policy definitions")
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
logger.L().Start("Loading policies")
|
||||
|
||||
policies, err := policyHandler.getScanPolicies(ctx, policyIdentifier)
|
||||
// get policies
|
||||
policies, err = policyHandler.getScanPolicies(ctx, policyIdentifier)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if len(policies) == 0 {
|
||||
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(policyIdentifier), ", "))
|
||||
return nil, nil, nil, fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(policyIdentifier), ", "))
|
||||
}
|
||||
|
||||
policiesAndResources.Policies = policies
|
||||
logger.L().StopSuccess("Loaded policies")
|
||||
logger.L().Start("Loading exceptions")
|
||||
|
||||
// get exceptions
|
||||
exceptionPolicies, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)
|
||||
if err == nil {
|
||||
policiesAndResources.Exceptions = exceptionPolicies
|
||||
} else {
|
||||
if exceptions, err = policyHandler.getExceptions(); err != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to load exceptions", helpers.Error(err))
|
||||
}
|
||||
|
||||
logger.L().StopSuccess("Loaded exceptions")
|
||||
logger.L().Start("Loading account configurations")
|
||||
|
||||
// get account configuration
|
||||
controlsInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.ClusterName)
|
||||
if err == nil {
|
||||
policiesAndResources.RegoInputData.PostureControlInputs = controlsInputs
|
||||
} else {
|
||||
if controlInputs, err = policyHandler.getControlInputs(); err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
cautils.StopSpinner()
|
||||
|
||||
logger.L().Success("Downloaded/Loaded policy")
|
||||
return nil
|
||||
logger.L().StopSuccess("Loaded account configurations")
|
||||
|
||||
return policies, exceptions, controlInputs, nil
|
||||
}
|
||||
|
||||
// getScanPolicies - get policies from cache or downloads them. The function returns an error if the policies could not be downloaded.
|
||||
func (policyHandler *PolicyHandler) getScanPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
|
||||
policyIdentifiersSlice := policyIdentifierToSlice(policyIdentifier)
|
||||
// check if policies are cached
|
||||
if cachedPolicies, policiesExist := policyHandler.cachedFrameworks.Get(); policiesExist {
|
||||
// check if the cached policies are the same as the requested policies, otherwise download the policies
|
||||
if cachedIdentifiers, identifiersExist := policyHandler.cachedPolicyIdentifiers.Get(); identifiersExist && cautils.StringSlicesAreEqual(cachedIdentifiers, policyIdentifiersSlice) {
|
||||
logger.L().Info("Using cached policies")
|
||||
return deepCopyPolicies(cachedPolicies)
|
||||
}
|
||||
|
||||
logger.L().Debug("Cached policies are not the same as the requested policies")
|
||||
policyHandler.cachedPolicyIdentifiers.Invalidate()
|
||||
policyHandler.cachedFrameworks.Invalidate()
|
||||
}
|
||||
|
||||
policies, err := policyHandler.downloadScanPolicies(ctx, policyIdentifier)
|
||||
if err == nil {
|
||||
policyHandler.cachedFrameworks.Set(policies)
|
||||
policyHandler.cachedPolicyIdentifiers.Set(policyIdentifiersSlice)
|
||||
}
|
||||
|
||||
return policies, err
|
||||
}
|
||||
|
||||
func deepCopyPolicies(src []reporthandling.Framework) ([]reporthandling.Framework, error) {
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var dst []reporthandling.Framework
|
||||
err = json.Unmarshal(data, &dst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) downloadScanPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
|
||||
frameworks := []reporthandling.Framework{}
|
||||
|
||||
switch getScanKind(policyIdentifier) {
|
||||
@@ -102,10 +188,30 @@ func (policyHandler *PolicyHandler) getScanPolicies(ctx context.Context, policyI
|
||||
return frameworks, nil
|
||||
}
|
||||
|
||||
func policyIdentifierToSlice(rules []cautils.PolicyIdentifier) []string {
|
||||
s := []string{}
|
||||
for i := range rules {
|
||||
s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Identifier))
|
||||
func (policyHandler *PolicyHandler) getExceptions() ([]armotypes.PostureExceptionPolicy, error) {
|
||||
if cachedExceptions, exist := policyHandler.cachedExceptions.Get(); exist {
|
||||
logger.L().Info("Using cached exceptions")
|
||||
return cachedExceptions, nil
|
||||
}
|
||||
return s
|
||||
|
||||
exceptions, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)
|
||||
if err == nil {
|
||||
policyHandler.cachedExceptions.Set(exceptions)
|
||||
}
|
||||
|
||||
return exceptions, err
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) getControlInputs() (map[string][]string, error) {
|
||||
if cachedControlInputs, exist := policyHandler.cachedControlInputs.Get(); exist {
|
||||
logger.L().Info("Using cached control inputs")
|
||||
return cachedControlInputs, nil
|
||||
}
|
||||
|
||||
controlInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.ClusterName)
|
||||
if err == nil {
|
||||
policyHandler.cachedControlInputs.Set(controlInputs)
|
||||
}
|
||||
|
||||
return controlInputs, err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user