mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-15 02:20:03 +00:00
Compare commits
64 Commits
v2.3.8
...
github-act
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e09bb2e310 | ||
|
|
f7b3cdcf35 | ||
|
|
d6a47a82d2 | ||
|
|
936cb26c06 | ||
|
|
9265a5d6d0 | ||
|
|
e6f5c7e0dd | ||
|
|
4e48148d40 | ||
|
|
3648ef286d | ||
|
|
d946662e57 | ||
|
|
51b37d5cbf | ||
|
|
9afae713ba | ||
|
|
1d64522607 | ||
|
|
225a923006 | ||
|
|
6c1a3fb89b | ||
|
|
df5f7db51d | ||
|
|
35c593a624 | ||
|
|
869f0ea109 | ||
|
|
cf08daf7fb | ||
|
|
266029eb23 | ||
|
|
4c9fec8ef4 | ||
|
|
6f07e63d3f | ||
|
|
addd66bf72 | ||
|
|
e2f96200e0 | ||
|
|
f799b63684 | ||
|
|
a088219954 | ||
|
|
1a2e16b895 | ||
|
|
7444acae11 | ||
|
|
8294694e09 | ||
|
|
12d7f18b79 | ||
|
|
83279484bd | ||
|
|
ba134ebc32 | ||
|
|
b44f0a76c9 | ||
|
|
226b4772a2 | ||
|
|
5379b9b0a6 | ||
|
|
98f68d8097 | ||
|
|
f8057b5c79 | ||
|
|
f36d8c31b0 | ||
|
|
3abf18acb7 | ||
|
|
28200b2744 | ||
|
|
678f21e33c | ||
|
|
467a84ddac | ||
|
|
925145724e | ||
|
|
e3677fc45c | ||
|
|
704de5bfc1 | ||
|
|
2494c1971c | ||
|
|
3b8bd7735e | ||
|
|
602591e7f2 | ||
|
|
e276e54d2b | ||
|
|
0c019819ff | ||
|
|
d9e946cf6d | ||
|
|
e3a8ebfe05 | ||
|
|
fd3703b21b | ||
|
|
6bcdda7d56 | ||
|
|
981430d65f | ||
|
|
e91ec69832 | ||
|
|
bbfa5d356a | ||
|
|
d2af7f47db | ||
|
|
d28afcb00c | ||
|
|
ca6bdb0bef | ||
|
|
e424bfa81b | ||
|
|
a37b1f7319 | ||
|
|
b730ef5154 | ||
|
|
3280173e95 | ||
|
|
d55a74c6b2 |
2
.dockerignore
Normal file
2
.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
||||
git2go
|
||||
kubescape
|
||||
18
.github/workflows/00-pr-scanner.yaml
vendored
18
.github/workflows/00-pr-scanner.yaml
vendored
@@ -2,12 +2,9 @@ name: 00-pr_scanner
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize, ready_for_review]
|
||||
branches:
|
||||
- 'master'
|
||||
- 'main'
|
||||
- 'dev'
|
||||
paths-ignore:
|
||||
- '**.yaml'
|
||||
- '**.yml'
|
||||
- '**.md'
|
||||
- '**.sh'
|
||||
- 'website/*'
|
||||
@@ -29,3 +26,16 @@ jobs:
|
||||
RELEASE: ""
|
||||
CLIENT: test
|
||||
secrets: inherit
|
||||
|
||||
binary-build:
|
||||
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
|
||||
with:
|
||||
COMPONENT_NAME: kubescape
|
||||
CGO_ENABLED: 1
|
||||
GO111MODULE: ""
|
||||
GO_VERSION: "1.20"
|
||||
RELEASE: ""
|
||||
CLIENT: test
|
||||
ARCH_MATRIX: '[ "" ]'
|
||||
OS_MATRIX: '[ "ubuntu-20.04" ]'
|
||||
secrets: inherit
|
||||
|
||||
34
.github/workflows/01-pr-merged.yaml
vendored
34
.github/workflows/01-pr-merged.yaml
vendored
@@ -1,34 +0,0 @@
|
||||
name: 01-pr-merged
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed]
|
||||
branches:
|
||||
- 'master'
|
||||
- 'main'
|
||||
paths-ignore:
|
||||
- '**.yaml'
|
||||
- '**.md'
|
||||
- '**.sh'
|
||||
- 'website/*'
|
||||
- 'examples/*'
|
||||
- 'docs/*'
|
||||
- 'build/*'
|
||||
- '.github/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
binary-build:
|
||||
if: ${{ github.event.pull_request.merged == true && contains( github.event.pull_request.labels.*.name, 'trigger-integration-test') && github.event.pull_request.base.ref == 'master' }} ## run only if labeled as "trigger-integration-test" and base branch is master
|
||||
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
|
||||
with:
|
||||
COMPONENT_NAME: kubescape
|
||||
CGO_ENABLED: 1
|
||||
GO111MODULE: ""
|
||||
GO_VERSION: "1.20"
|
||||
RELEASE: ""
|
||||
CLIENT: test
|
||||
secrets: inherit
|
||||
12
.github/workflows/a-pr-scanner.yaml
vendored
12
.github/workflows/a-pr-scanner.yaml
vendored
@@ -87,15 +87,3 @@ jobs:
|
||||
- Credentials scan: ${{ steps.credentials-scan.outcome }}
|
||||
- Vulnerabilities scan: ${{ steps.vulnerabilities-scan.outcome }}
|
||||
reactions: 'eyes'
|
||||
basic-tests:
|
||||
needs: scanners
|
||||
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
|
||||
with:
|
||||
COMPONENT_NAME: kubescape
|
||||
CGO_ENABLED: 1
|
||||
GO111MODULE: ""
|
||||
GO_VERSION: "1.20"
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
CLIENT: ${{ inputs.CLIENT }}
|
||||
CHECKOUT_REPO: ${{ github.repository }}
|
||||
secrets: inherit
|
||||
|
||||
@@ -1,5 +1,45 @@
|
||||
name: b-binary-build-and-e2e-tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
COMPONENT_NAME:
|
||||
required: false
|
||||
type: string
|
||||
default: "kubescape"
|
||||
RELEASE:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
CLIENT:
|
||||
required: false
|
||||
type: string
|
||||
default: "test"
|
||||
GO_VERSION:
|
||||
required: false
|
||||
type: string
|
||||
default: "1.20"
|
||||
GO111MODULE:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
CGO_ENABLED:
|
||||
type: number
|
||||
default: 1
|
||||
required: false
|
||||
OS_MATRIX:
|
||||
type: string
|
||||
required: false
|
||||
default: '[ "ubuntu-20.04", "macos-latest", "windows-latest"]'
|
||||
ARCH_MATRIX:
|
||||
type: string
|
||||
required: false
|
||||
default: '[ "", "arm64"]'
|
||||
BINARY_TESTS:
|
||||
type: string
|
||||
required: false
|
||||
default: '[ "scan_nsa", "scan_mitre", "scan_with_exceptions", "scan_repository", "scan_local_file", "scan_local_glob_files", "scan_local_list_of_files", "scan_nsa_and_submit_to_backend", "scan_mitre_and_submit_to_backend", "scan_local_repository_and_submit_to_backend", "scan_repository_from_url_and_submit_to_backend", "scan_with_exception_to_backend", "scan_with_custom_framework", "scan_customer_configuration", "host_scanner", "scan_compliance_score" ]'
|
||||
|
||||
workflow_call:
|
||||
inputs:
|
||||
COMPONENT_NAME:
|
||||
@@ -22,20 +62,26 @@ on:
|
||||
default: 1
|
||||
BINARY_TESTS:
|
||||
type: string
|
||||
default: '[ "scan_nsa", "scan_mitre", "scan_with_exceptions", "scan_repository", "scan_local_file", "scan_local_glob_files", "scan_local_list_of_files", "scan_nsa_and_submit_to_backend", "scan_mitre_and_submit_to_backend", "scan_local_repository_and_submit_to_backend", "scan_repository_from_url_and_submit_to_backend", "scan_with_exception_to_backend", "scan_with_custom_framework", "scan_customer_configuration", "host_scanner", "scan_compliance_score" ]'
|
||||
CHECKOUT_REPO:
|
||||
required: false
|
||||
default: '[ "scan_nsa", "scan_mitre", "scan_with_exceptions", "scan_repository", "scan_local_file", "scan_local_glob_files", "scan_local_list_of_files", "scan_nsa_and_submit_to_backend", "scan_mitre_and_submit_to_backend", "scan_local_repository_and_submit_to_backend", "scan_repository_from_url_and_submit_to_backend", "scan_with_exception_to_backend", "scan_with_custom_framework", "scan_customer_configuration", "host_scanner", "scan_compliance_score", "scan_custom_framework_scanning_file_scope_testing", "scan_custom_framework_scanning_cluster_scope_testing", "scan_custom_framework_scanning_cluster_and_file_scope_testing", "unified_configuration_config_view", "unified_configuration_config_set", "unified_configuration_config_delete" ]'
|
||||
OS_MATRIX:
|
||||
type: string
|
||||
|
||||
|
||||
|
||||
required: false
|
||||
default: '[ "ubuntu-20.04", "macos-latest", "windows-latest"]'
|
||||
ARCH_MATRIX:
|
||||
type: string
|
||||
required: false
|
||||
default: '[ "", "arm64"]'
|
||||
|
||||
jobs:
|
||||
wf-preparation:
|
||||
name: secret-validator
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
TEST_NAMES: ${{ steps.export_tests_to_env.outputs.TEST_NAMES }}
|
||||
OS_MATRIX: ${{ steps.export_os_to_env.outputs.OS_MATRIX }}
|
||||
ARCH_MATRIX: ${{ steps.export_arch_to_env.outputs.ARCH_MATRIX }}
|
||||
is-secret-set: ${{ steps.check-secret-set.outputs.is-secret-set }}
|
||||
|
||||
steps:
|
||||
- name: check if the necessary secrets are set in github secrets
|
||||
id: check-secret-set
|
||||
@@ -49,24 +95,39 @@ jobs:
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
run: "echo \"is-secret-set=${{ env.CUSTOMER != '' && \n env.USERNAME != '' &&\n env.PASSWORD != '' &&\n env.CLIENT_ID != '' &&\n env.SECRET_KEY != '' &&\n env.REGISTRY_USERNAME != '' &&\n env.REGISTRY_PASSWORD != ''\n }}\" >> $GITHUB_OUTPUT\n"
|
||||
|
||||
- id: export_os_to_env
|
||||
name: set test name
|
||||
run: |
|
||||
echo "OS_MATRIX=$input" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
input: ${{ inputs.OS_MATRIX }}
|
||||
|
||||
- id: export_tests_to_env
|
||||
name: set test name
|
||||
run: |
|
||||
echo "TEST_NAMES=$input" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
input: ${{ inputs.BINARY_TESTS }}
|
||||
|
||||
|
||||
- id: export_arch_to_env
|
||||
name: set test name
|
||||
run: |
|
||||
echo "ARCH_MATRIX=$input" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
input: ${{ inputs.ARCH_MATRIX }}
|
||||
|
||||
|
||||
binary-build:
|
||||
name: Create cross-platform build
|
||||
needs: wf-preparation
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-latest, windows-latest]
|
||||
arch: ["", arm64]
|
||||
os: ${{ fromJson(needs.wf-preparation.outputs.OS_MATRIX) }}
|
||||
arch: ${{ fromJson(needs.wf-preparation.outputs.ARCH_MATRIX) }}
|
||||
exclude:
|
||||
- os: windows-latest
|
||||
arch: arm64
|
||||
@@ -74,7 +135,6 @@ jobs:
|
||||
|
||||
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
|
||||
with:
|
||||
repository: ${{inputs.CHECKOUT_REPO}}
|
||||
fetch-depth: 0
|
||||
submodules: recursive
|
||||
|
||||
@@ -135,8 +195,8 @@ jobs:
|
||||
if: matrix.os == 'ubuntu-20.04' && matrix.arch != ''
|
||||
|
||||
- name: Install MSYS2 & libgit2 (Windows)
|
||||
shell: cmd
|
||||
run: .\build.bat all
|
||||
shell: pwsh
|
||||
run: .\build.ps1 all
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Install pkg-config (macOS)
|
||||
|
||||
34
.github/workflows/build-image.yaml
vendored
Normal file
34
.github/workflows/build-image.yaml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: build-image
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
CLIENT:
|
||||
required: false
|
||||
type: string
|
||||
default: "test"
|
||||
IMAGE_TAG:
|
||||
required: true
|
||||
type: string
|
||||
CO_SIGN:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
PLATFORMS:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
jobs:
|
||||
publish-image:
|
||||
permissions:
|
||||
id-token: write
|
||||
packages: write
|
||||
contents: read
|
||||
uses: ./.github/workflows/d-publish-image.yaml
|
||||
with:
|
||||
client: ${{ inputs.CLIENT }}
|
||||
image_name: "quay.io/${{ github.repository_owner }}/kubescape"
|
||||
image_tag: ${{ inputs.IMAGE_TAG }}
|
||||
support_platforms: ${{ inputs.PLATFORMS }}
|
||||
cosign: ${{ inputs.CO_SIGN }}
|
||||
secrets: inherit
|
||||
30
.github/workflows/codesee-arch-diagram.yml
vendored
Normal file
30
.github/workflows/codesee-arch-diagram.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
# This workflow was added by CodeSee. Learn more at https://codesee.io/
|
||||
# This is v2.0 of this workflow file
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, synchronize, reopened]
|
||||
paths-ignore:
|
||||
- '**.yaml'
|
||||
- '**.yml'
|
||||
- '**.md'
|
||||
- '**.sh'
|
||||
- 'website/*'
|
||||
- 'examples/*'
|
||||
- 'docs/*'
|
||||
- 'build/*'
|
||||
- '.github/*'
|
||||
|
||||
name: CodeSee
|
||||
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
codesee:
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
name: Analyze the repo with CodeSee
|
||||
steps:
|
||||
- uses: Codesee-io/codesee-action@v2
|
||||
with:
|
||||
codesee-token: ${{ secrets.CODESEE_ARCH_DIAG_API_TOKEN }}
|
||||
codesee-url: https://app.codesee.io
|
||||
23
.github/workflows/comments.yaml
vendored
Normal file
23
.github/workflows/comments.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: pr-agent
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
pr_agent:
|
||||
runs-on: ubuntu-latest
|
||||
name: Run pr agent on every pull request, respond to user comments
|
||||
steps:
|
||||
- name: PR Agent action step
|
||||
continue-on-error: true
|
||||
id: pragent
|
||||
uses: Codium-ai/pr-agent@main
|
||||
env:
|
||||
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
|
||||
51
build.bat
51
build.bat
@@ -1,51 +0,0 @@
|
||||
@ECHO OFF
|
||||
|
||||
IF "%1"=="install" goto Install
|
||||
IF "%1"=="build" goto Build
|
||||
IF "%1"=="all" goto All
|
||||
IF "%1"=="" goto Error ELSE goto Error
|
||||
|
||||
:Install
|
||||
|
||||
if exist C:\MSYS64\ (
|
||||
echo "MSYS2 already installed"
|
||||
) else (
|
||||
mkdir temp_install & cd temp_install
|
||||
|
||||
echo "Downloading MSYS2..."
|
||||
curl -L https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-x86_64-20220603.exe > msys2-x86_64-20220603.exe
|
||||
|
||||
echo "Installing MSYS2..."
|
||||
msys2-x86_64-20220603.exe install --root C:\MSYS64 --confirm-command
|
||||
|
||||
cd .. && rmdir /s /q temp_install
|
||||
)
|
||||
|
||||
|
||||
echo "Adding MSYS2 to path..."
|
||||
SET "PATH=C:\MSYS64\mingw64\bin;C:\MSYS64\usr\bin;%PATH%"
|
||||
echo %PATH%
|
||||
|
||||
echo "Installing MSYS2 packages..."
|
||||
pacman -S --needed --noconfirm make
|
||||
pacman -S --needed --noconfirm mingw-w64-x86_64-cmake
|
||||
pacman -S --needed --noconfirm mingw-w64-x86_64-gcc
|
||||
pacman -S --needed --noconfirm mingw-w64-x86_64-pkg-config
|
||||
pacman -S --needed --noconfirm msys2-w32api-runtime
|
||||
|
||||
IF "%1"=="all" GOTO Build
|
||||
GOTO End
|
||||
|
||||
:Build
|
||||
SET "PATH=C:\MSYS2\mingw64\bin;C:\MSYS2\usr\bin;%PATH%"
|
||||
make libgit2
|
||||
GOTO End
|
||||
|
||||
:All
|
||||
GOTO Install
|
||||
|
||||
:Error
|
||||
echo "Error: Unknown option"
|
||||
GOTO End
|
||||
|
||||
:End
|
||||
27
build.ps1
27
build.ps1
@@ -30,24 +30,25 @@ function Install {
|
||||
|
||||
# Install MSYS
|
||||
Write-Host "Installing MSYS2..." -ForegroundColor Cyan
|
||||
Start-Process -Filepath "$PSScriptRoot\temp_install\msys2-x86_64-20220603.exe" -ArgumentList @("install", "--root", "C:\MSYS64", "--confirm-command") -Wait -NoNewWindow
|
||||
Start-Process -Filepath "$PSScriptRoot\temp_install\msys2-x86_64-20220603.exe" -ArgumentList @("install", "--root", "C:\MSYS64", "--confirm-command") -Wait
|
||||
Write-Host "MSYS2 install complete" -ForegroundColor Green
|
||||
|
||||
# Set PATH
|
||||
$env:Path = "C:\MSYS64\mingw64\bin;C:\MSYS64\usr\bin;" + $env:Path
|
||||
|
||||
# Install MSYS packages
|
||||
Write-Host "Installing MSYS2 packages..." -ForegroundColor Cyan
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "make") -Wait -NoNewWindow
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "mingw-w64-x86_64-cmake") -Wait -NoNewWindow
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "mingw-w64-x86_64-gcc") -Wait -NoNewWindow
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "mingw-w64-x86_64-pkg-config") -Wait -NoNewWindow
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "msys2-w32api-runtime") -Wait -NoNewWindow
|
||||
Write-Host "MSYS2 packages install complete" -ForegroundColor Green
|
||||
|
||||
# Remove temp directory
|
||||
Remove-Item "$PSScriptRoot\temp_install" -Recurse
|
||||
}
|
||||
|
||||
# Set PATH
|
||||
$env:Path = "C:\MSYS64\mingw64\bin;C:\MSYS64\usr\bin;" + $env:Path
|
||||
|
||||
# Install MSYS packages
|
||||
Write-Host "Installing MSYS2 packages..." -ForegroundColor Cyan
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "make") -Wait
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "mingw-w64-x86_64-cmake") -Wait
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "mingw-w64-x86_64-gcc") -Wait
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "mingw-w64-x86_64-pkg-config") -Wait
|
||||
Start-Process -Filepath "pacman" -ArgumentList @("-S", "--needed", "--noconfirm", "msys2-w32api-runtime") -Wait
|
||||
Write-Host "MSYS2 packages install complete" -ForegroundColor Green
|
||||
|
||||
Write-Host "Install complete" -ForegroundColor Green
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
v1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -55,7 +56,7 @@ func GetDownloadCmd(ks meta.IKubescape) *cobra.Command {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("policy type required, supported: %v", supported)
|
||||
}
|
||||
if cautils.StringInSlice(core.DownloadSupportCommands(), args[0]) == cautils.ValueNotFound {
|
||||
if !slices.Contains(core.DownloadSupportCommands(), args[0]) {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: %s", args[0], supported)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
v1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -43,7 +44,7 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("policy type requeued, supported: %s", supported)
|
||||
}
|
||||
if cautils.StringInSlice(core.ListSupportActions(), args[0]) == cautils.ValueNotFound {
|
||||
if !slices.Contains(core.ListSupportActions(), args[0]) {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: %s", args[0], supported)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
reporthandlingapis "github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
@@ -78,14 +79,15 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
|
||||
var frameworks []string
|
||||
|
||||
if len(args) == 0 { // scan all frameworks
|
||||
if len(args) == 0 {
|
||||
scanInfo.ScanAll = true
|
||||
} else {
|
||||
// Read frameworks from input args
|
||||
frameworks = strings.Split(args[0], ",")
|
||||
if cautils.StringInSlice(frameworks, "all") != cautils.ValueNotFound {
|
||||
if slices.Contains(frameworks, "all") {
|
||||
scanInfo.ScanAll = true
|
||||
frameworks = getter.NativeFrameworks
|
||||
|
||||
}
|
||||
if len(args) > 1 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
@@ -105,6 +107,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
}
|
||||
}
|
||||
}
|
||||
scanInfo.SetScanType(cautils.ScanTypeFramework)
|
||||
scanInfo.FrameworkScan = true
|
||||
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, apisv1.KindFramework)
|
||||
@@ -118,7 +121,8 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
if err = results.HandleResults(ctx); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
if !scanInfo.VerboseMode {
|
||||
|
||||
if !scanInfo.VerboseMode && scanInfo.ScanType == cautils.ScanTypeFramework {
|
||||
logger.L().Info("Run with '--verbose'/'-v' flag for detailed resources view\n")
|
||||
}
|
||||
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
|
||||
|
||||
117
cmd/scan/image.go
Normal file
117
cmd/scan/image.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/iconlogger"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/core"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling"
|
||||
"github.com/kubescape/kubescape/v2/pkg/imagescan"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type imageScanInfo struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// TODO(vladklokun): document image scanning on the Kubescape Docs Hub?
|
||||
var (
|
||||
imageExample = fmt.Sprintf(`
|
||||
This command is still in BETA. Feel free to contact the kubescape maintainers for more information.
|
||||
|
||||
Scan an image for vulnerabilities.
|
||||
|
||||
# Scan the 'nginx' image
|
||||
%[1]s scan image "nginx"
|
||||
|
||||
# Image scan documentation:
|
||||
# https://hub.armosec.io/docs/images
|
||||
`, cautils.ExecName())
|
||||
)
|
||||
|
||||
// imageCmd represents the image command
|
||||
func getImageCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo, imgScanInfo *imageScanInfo) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "image <IMAGE_NAME>",
|
||||
Short: "Scan an image for vulnerabilities",
|
||||
Example: imageExample,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("the command takes exactly one image name as an argument")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := validateImageScanInfo(scanInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
failOnSeverity := imagescan.ParseSeverity(scanInfo.FailThresholdSeverity)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
logger.InitLogger(iconlogger.LoggerName)
|
||||
|
||||
dbCfg, _ := imagescan.NewDefaultDBConfig()
|
||||
svc := imagescan.NewScanService(dbCfg)
|
||||
|
||||
creds := imagescan.RegistryCredentials{
|
||||
Username: imgScanInfo.Username,
|
||||
Password: imgScanInfo.Password,
|
||||
}
|
||||
|
||||
userInput := args[0]
|
||||
|
||||
logger.L().Start(fmt.Sprintf("Scanning image: %s", userInput))
|
||||
scanResults, err := svc.Scan(ctx, userInput, creds)
|
||||
if err != nil {
|
||||
logger.L().StopError(fmt.Sprintf("Failed to scan image: %s", userInput))
|
||||
return err
|
||||
}
|
||||
logger.L().StopSuccess(fmt.Sprintf("Successfully scanned image: %s", userInput))
|
||||
|
||||
scanInfo.SetScanType(cautils.ScanTypeImage)
|
||||
|
||||
outputPrinters := core.GetOutputPrinters(scanInfo, ctx)
|
||||
|
||||
uiPrinter := core.GetUIPrinter(ctx, scanInfo)
|
||||
|
||||
resultsHandler := resultshandling.NewResultsHandler(nil, outputPrinters, uiPrinter)
|
||||
|
||||
resultsHandler.ImageScanData = []cautils.ImageScanData{
|
||||
{
|
||||
PresenterConfig: scanResults,
|
||||
Image: userInput,
|
||||
},
|
||||
}
|
||||
|
||||
resultsHandler.HandleResults(ctx)
|
||||
|
||||
if imagescan.ExceedsSeverityThreshold(scanResults, failOnSeverity) {
|
||||
terminateOnExceedingSeverity(scanInfo, logger.L())
|
||||
}
|
||||
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
cmd.PersistentFlags().StringVarP(&imgScanInfo.Username, "username", "u", "", "Username for registry login")
|
||||
cmd.PersistentFlags().StringVarP(&imgScanInfo.Password, "password", "p", "", "Password for registry login")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// validateImageScanInfo validates the ScanInfo struct for the `image` command
|
||||
func validateImageScanInfo(scanInfo *cautils.ScanInfo) error {
|
||||
severity := scanInfo.FailThresholdSeverity
|
||||
|
||||
if err := validateSeverity(severity); severity != "" && err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
v1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -41,7 +43,8 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
Long: `The action you want to perform`,
|
||||
Example: scanCmdExamples,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
// setting input patterns for framework scan is only relevancy for non-security view
|
||||
if len(args) > 0 && scanInfo.View != string(cautils.SecurityViewType) {
|
||||
if args[0] != "framework" && args[0] != "control" {
|
||||
return getFrameworkCmd(ks, &scanInfo).RunE(cmd, append([]string{strings.Join(getter.NativeFrameworks, ",")}, args...))
|
||||
}
|
||||
@@ -49,6 +52,11 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if scanInfo.View == string(cautils.SecurityViewType) {
|
||||
setSecurityViewScanInfo(args, &scanInfo)
|
||||
|
||||
return securityScan(scanInfo, ks)
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return getFrameworkCmd(ks, &scanInfo).RunE(cmd, []string{strings.Join(getter.NativeFrameworks, ",")})
|
||||
@@ -81,7 +89,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to configured backend.")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.VerboseMode, "verbose", "v", false, "Display all of the input resources and not only failed resources")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.View, "view", string(cautils.ResourceViewType), fmt.Sprintf("View results based on the %s/%s. default is --view=%s", cautils.ResourceViewType, cautils.ControlViewType, cautils.ResourceViewType))
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.View, "view", string(cautils.ResourceViewType), fmt.Sprintf("View results based on the %s/%s/%s. default is --view=%s", cautils.ResourceViewType, cautils.ControlViewType, cautils.SecurityViewType, cautils.ResourceViewType))
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.HostSensorYamlPath, "host-scan-yaml", "", "Override default host scanner DaemonSet. Use this flag cautiously")
|
||||
@@ -90,6 +98,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Submit the scan results to Kubescape SaaS where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.OmitRawResources, "omit-raw-resources", "", false, "Omit raw resources from the output. By default the raw resources are included in the output")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.PrintAttackTree, "print-attack-tree", "", false, "Print attack tree")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.ScanImages, "scan-images", "", false, "Scan resources images")
|
||||
|
||||
scanCmd.PersistentFlags().MarkDeprecated("silent", "use '--logger' flag instead. Flag will be removed at 1.May.2022")
|
||||
scanCmd.PersistentFlags().MarkDeprecated("fail-threshold", "use '--compliance-threshold' flag instead. Flag will be removed at 1.Dec.2023")
|
||||
@@ -117,6 +126,38 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
|
||||
scanCmd.AddCommand(getControlCmd(ks, &scanInfo))
|
||||
scanCmd.AddCommand(getFrameworkCmd(ks, &scanInfo))
|
||||
scanCmd.AddCommand(getWorkloadCmd(ks, &scanInfo))
|
||||
|
||||
isi := &imageScanInfo{}
|
||||
scanCmd.AddCommand(getImageCmd(ks, &scanInfo, isi))
|
||||
|
||||
return scanCmd
|
||||
}
|
||||
|
||||
func setSecurityViewScanInfo(args []string, scanInfo *cautils.ScanInfo) {
|
||||
if len(args) > 0 {
|
||||
scanInfo.SetScanType(cautils.ScanTypeRepo)
|
||||
scanInfo.InputPatterns = args
|
||||
} else {
|
||||
scanInfo.SetScanType(cautils.ScanTypeCluster)
|
||||
}
|
||||
scanInfo.SetPolicyIdentifiers([]string{"clusterscan", "mitre", "nsa"}, v1.KindFramework)
|
||||
}
|
||||
|
||||
func securityScan(scanInfo cautils.ScanInfo, ks meta.IKubescape) error {
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
results, err := ks.Scan(ctx, &scanInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = results.HandleResults(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enforceSeverityThresholds(results.GetData().Report.SummaryDetails.GetResourcesSeverityCounters(), &scanInfo, terminateOnExceedingSeverity)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
v1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
|
||||
@@ -184,17 +185,20 @@ type spyLogger struct {
|
||||
setItems []spyLogMessage
|
||||
}
|
||||
|
||||
func (l *spyLogger) Error(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Success(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Warning(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Info(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Debug(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) SetLevel(level string) error { return nil }
|
||||
func (l *spyLogger) GetLevel() string { return "" }
|
||||
func (l *spyLogger) SetWriter(w *os.File) {}
|
||||
func (l *spyLogger) GetWriter() *os.File { return &os.File{} }
|
||||
func (l *spyLogger) LoggerName() string { return "" }
|
||||
func (l *spyLogger) Ctx(_ context.Context) helpers.ILogger { return l }
|
||||
func (l *spyLogger) Error(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Success(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Warning(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Info(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) Debug(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) SetLevel(level string) error { return nil }
|
||||
func (l *spyLogger) GetLevel() string { return "" }
|
||||
func (l *spyLogger) SetWriter(w *os.File) {}
|
||||
func (l *spyLogger) GetWriter() *os.File { return &os.File{} }
|
||||
func (l *spyLogger) LoggerName() string { return "" }
|
||||
func (l *spyLogger) Ctx(_ context.Context) helpers.ILogger { return l }
|
||||
func (l *spyLogger) Start(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) StopSuccess(msg string, details ...helpers.IDetails) {}
|
||||
func (l *spyLogger) StopError(msg string, details ...helpers.IDetails) {}
|
||||
|
||||
func (l *spyLogger) Fatal(msg string, details ...helpers.IDetails) {
|
||||
firstDetail := details[0]
|
||||
@@ -254,3 +258,106 @@ func Test_terminateOnExceedingSeverity(t *testing.T) {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetSecurityViewScanInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args []string
|
||||
want *cautils.ScanInfo
|
||||
}{
|
||||
{
|
||||
name: "no args",
|
||||
args: []string{},
|
||||
want: &cautils.ScanInfo{
|
||||
InputPatterns: []string{},
|
||||
ScanType: cautils.ScanTypeCluster,
|
||||
PolicyIdentifier: []cautils.PolicyIdentifier{
|
||||
{
|
||||
Kind: v1.KindFramework,
|
||||
Identifier: "clusterscan",
|
||||
},
|
||||
{
|
||||
Kind: v1.KindFramework,
|
||||
Identifier: "mitre",
|
||||
},
|
||||
{
|
||||
Kind: v1.KindFramework,
|
||||
Identifier: "nsa",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with args",
|
||||
args: []string{
|
||||
"file.yaml",
|
||||
"file2.yaml",
|
||||
},
|
||||
want: &cautils.ScanInfo{
|
||||
ScanType: cautils.ScanTypeRepo,
|
||||
InputPatterns: []string{
|
||||
"file.yaml",
|
||||
"file2.yaml",
|
||||
},
|
||||
PolicyIdentifier: []cautils.PolicyIdentifier{
|
||||
{
|
||||
Kind: v1.KindFramework,
|
||||
Identifier: "clusterscan",
|
||||
},
|
||||
{
|
||||
Kind: v1.KindFramework,
|
||||
Identifier: "mitre",
|
||||
},
|
||||
{
|
||||
Kind: v1.KindFramework,
|
||||
Identifier: "nsa",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := &cautils.ScanInfo{
|
||||
View: string(cautils.SecurityViewType),
|
||||
}
|
||||
setSecurityViewScanInfo(tt.args, got)
|
||||
|
||||
if len(tt.want.InputPatterns) != len(got.InputPatterns) {
|
||||
t.Errorf("in test: %s, got: %v, want: %v", tt.name, got.InputPatterns, tt.want.InputPatterns)
|
||||
}
|
||||
|
||||
if tt.want.ScanType != got.ScanType {
|
||||
t.Errorf("in test: %s, got: %v, want: %v", tt.name, got.ScanType, tt.want.ScanType)
|
||||
}
|
||||
|
||||
for i := range tt.want.InputPatterns {
|
||||
found := false
|
||||
for j := range tt.want.InputPatterns[i] {
|
||||
if tt.want.InputPatterns[i][j] == got.InputPatterns[i][j] {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("in test: %s, got: %v, want: %v", tt.name, got.InputPatterns, tt.want.InputPatterns)
|
||||
}
|
||||
}
|
||||
|
||||
for i := range tt.want.PolicyIdentifier {
|
||||
found := false
|
||||
for j := range got.PolicyIdentifier {
|
||||
if tt.want.PolicyIdentifier[i].Kind == got.PolicyIdentifier[j].Kind && tt.want.PolicyIdentifier[i].Identifier == got.PolicyIdentifier[j].Identifier {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("in test: %s, got: %v, want: %v", tt.name, got.PolicyIdentifier, tt.want.PolicyIdentifier)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -114,3 +114,27 @@ func Test_validateSeverity(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_validateWorkloadIdentifier(t *testing.T) {
|
||||
testCases := []struct {
|
||||
Description string
|
||||
Input string
|
||||
Want error
|
||||
}{
|
||||
{"valid workload identifier should be valid", "deployment/test", nil},
|
||||
{"invalid workload identifier missing kind", "deployment", ErrInvalidWorkloadIdentifier},
|
||||
{"invalid workload identifier with namespace", "ns/deployment/name", ErrInvalidWorkloadIdentifier},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.Description, func(t *testing.T) {
|
||||
input := testCase.Input
|
||||
want := testCase.Want
|
||||
got := validateWorkloadIdentifier(input)
|
||||
|
||||
if got != want {
|
||||
t.Errorf("got: %v, want: %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
126
cmd/scan/workload.go
Normal file
126
cmd/scan/workload.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
v1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
workloadExample = fmt.Sprintf(`
|
||||
This command is still in BETA. Feel free to contact the kubescape maintainers for more information.
|
||||
|
||||
Scan a workload for misconfigurations and image vulnerabilities.
|
||||
|
||||
# Scan an workload
|
||||
%[1]s scan workload <kind>/<name>
|
||||
|
||||
# Scan an workload in a specific namespace
|
||||
%[1]s scan workload <kind>/<name> --namespace <namespace>
|
||||
|
||||
# Scan an workload from a file path
|
||||
%[1]s scan workload <kind>/<name> --file-path <file path>
|
||||
|
||||
# Scan an workload from a helm-chart template
|
||||
%[1]s scan workload <kind>/<name> --chart-path <chart path> --file-path <file path>
|
||||
|
||||
|
||||
`, cautils.ExecName())
|
||||
|
||||
ErrInvalidWorkloadIdentifier = errors.New("invalid workload identifier")
|
||||
)
|
||||
|
||||
var namespace string
|
||||
|
||||
// controlCmd represents the control command
|
||||
func getWorkloadCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Command {
|
||||
workloadCmd := &cobra.Command{
|
||||
Use: "workload <kind>/<name> [`<glob pattern>`/`-`] [flags]",
|
||||
Short: "Scan a workload for misconfigurations and image vulnerabilities",
|
||||
Example: workloadExample,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("usage: <kind>/<name> [`<glob pattern>`/`-`] [flags]")
|
||||
}
|
||||
|
||||
if scanInfo.ChartPath != "" && scanInfo.FilePath == "" {
|
||||
return fmt.Errorf("usage: --chart-path <chart path> --file-path <file path>")
|
||||
}
|
||||
|
||||
return validateWorkloadIdentifier(args[0])
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
kind, name, err := parseWorkloadIdentifierString(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid input: %s", err.Error())
|
||||
}
|
||||
|
||||
setWorkloadScanInfo(scanInfo, kind, name)
|
||||
|
||||
// todo: add api version if provided
|
||||
ctx := context.TODO()
|
||||
results, err := ks.Scan(ctx, scanInfo)
|
||||
if err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
|
||||
if err = results.HandleResults(ctx); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
workloadCmd.PersistentFlags().StringVarP(&namespace, "namespace", "n", "", "Namespace of the workload. Default will be empty.")
|
||||
workloadCmd.PersistentFlags().StringVar(&scanInfo.FilePath, "file-path", "", "Path to the workload file.")
|
||||
workloadCmd.PersistentFlags().StringVar(&scanInfo.ChartPath, "chart-path", "", "Path to the helm chart the workload is part of. Must be used with --file-path.")
|
||||
|
||||
return workloadCmd
|
||||
}
|
||||
|
||||
func setWorkloadScanInfo(scanInfo *cautils.ScanInfo, kind string, name string) {
|
||||
scanInfo.SetScanType(cautils.ScanTypeWorkload)
|
||||
scanInfo.ScanImages = true
|
||||
|
||||
scanInfo.ScanObject = &objectsenvelopes.ScanObject{}
|
||||
scanInfo.ScanObject.SetNamespace(namespace)
|
||||
scanInfo.ScanObject.SetKind(kind)
|
||||
scanInfo.ScanObject.SetName(name)
|
||||
|
||||
scanInfo.SetPolicyIdentifiers([]string{"workloadscan"}, v1.KindFramework)
|
||||
|
||||
if scanInfo.FilePath != "" {
|
||||
scanInfo.InputPatterns = []string{scanInfo.FilePath}
|
||||
}
|
||||
}
|
||||
|
||||
func validateWorkloadIdentifier(workloadIdentifier string) error {
|
||||
// workloadIdentifier is in the form of kind/name
|
||||
x := strings.Split(workloadIdentifier, "/")
|
||||
if len(x) != 2 || x[0] == "" || x[1] == "" {
|
||||
return ErrInvalidWorkloadIdentifier
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseWorkloadIdentifierString(workloadIdentifier string) (kind, name string, err error) {
|
||||
// workloadIdentifier is in the form of namespace/kind/name
|
||||
// example: default/Deployment/nginx-deployment
|
||||
x := strings.Split(workloadIdentifier, "/")
|
||||
if len(x) != 2 {
|
||||
return "", "", ErrInvalidWorkloadIdentifier
|
||||
}
|
||||
|
||||
return x[0], x[1], nil
|
||||
}
|
||||
69
cmd/scan/workload_test.go
Normal file
69
cmd/scan/workload_test.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
v1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
)
|
||||
|
||||
func TestSetWorkloadScanInfo(t *testing.T) {
|
||||
test := []struct {
|
||||
Description string
|
||||
kind string
|
||||
name string
|
||||
want *cautils.ScanInfo
|
||||
}{
|
||||
{
|
||||
Description: "Set workload scan info",
|
||||
kind: "Deployment",
|
||||
name: "test",
|
||||
want: &cautils.ScanInfo{
|
||||
PolicyIdentifier: []cautils.PolicyIdentifier{
|
||||
{
|
||||
Identifier: "workloadscan",
|
||||
Kind: v1.KindFramework,
|
||||
},
|
||||
},
|
||||
ScanType: cautils.ScanTypeWorkload,
|
||||
ScanObject: &objectsenvelopes.ScanObject{
|
||||
Kind: "Deployment",
|
||||
Metadata: objectsenvelopes.ScanObjectMetadata{
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range test {
|
||||
t.Run(
|
||||
tc.Description,
|
||||
func(t *testing.T) {
|
||||
scanInfo := &cautils.ScanInfo{}
|
||||
setWorkloadScanInfo(scanInfo, tc.kind, tc.name)
|
||||
|
||||
if scanInfo.ScanType != tc.want.ScanType {
|
||||
t.Errorf("got: %v, want: %v", scanInfo.ScanType, tc.want.ScanType)
|
||||
}
|
||||
|
||||
if scanInfo.ScanObject.Kind != tc.want.ScanObject.Kind {
|
||||
t.Errorf("got: %v, want: %v", scanInfo.ScanObject.Kind, tc.want.ScanObject.Kind)
|
||||
}
|
||||
|
||||
if scanInfo.ScanObject.Metadata.Name != tc.want.ScanObject.Metadata.Name {
|
||||
t.Errorf("got: %v, want: %v", scanInfo.ScanObject.Metadata.Name, tc.want.ScanObject.Metadata.Name)
|
||||
}
|
||||
|
||||
if len(scanInfo.PolicyIdentifier) != 1 {
|
||||
t.Errorf("got: %v, want: %v", len(scanInfo.PolicyIdentifier), 1)
|
||||
}
|
||||
|
||||
if scanInfo.PolicyIdentifier[0].Identifier != tc.want.PolicyIdentifier[0].Identifier {
|
||||
t.Errorf("got: %v, want: %v", scanInfo.PolicyIdentifier[0].Identifier, tc.want.PolicyIdentifier[0].Identifier)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,11 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const configFileName = "config"
|
||||
const (
|
||||
configFileName string = "config"
|
||||
kubescapeNamespace string = "kubescape"
|
||||
kubescapeConfigMapName string = "kubescape-config"
|
||||
)
|
||||
|
||||
func ConfigFileFullPath() string { return getter.GetDefaultPath(configFileName + ".json") }
|
||||
|
||||
@@ -29,7 +33,6 @@ type ConfigObj struct {
|
||||
AccountID string `json:"accountID,omitempty"`
|
||||
ClientID string `json:"clientID,omitempty"`
|
||||
SecretKey string `json:"secretKey,omitempty"`
|
||||
CustomerGUID string `json:"customerGUID,omitempty"` // Deprecated
|
||||
Token string `json:"invitationParam,omitempty"`
|
||||
CustomerAdminEMail string `json:"adminMail,omitempty"`
|
||||
ClusterName string `json:"clusterName,omitempty"`
|
||||
@@ -63,6 +66,35 @@ func (co *ConfigObj) Config() []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
func (co *ConfigObj) updateEmptyFields(inCO *ConfigObj) error {
|
||||
if inCO.AccountID != "" {
|
||||
co.AccountID = inCO.AccountID
|
||||
}
|
||||
if inCO.CloudAPIURL != "" {
|
||||
co.CloudAPIURL = inCO.CloudAPIURL
|
||||
}
|
||||
if inCO.CloudAuthURL != "" {
|
||||
co.CloudAuthURL = inCO.CloudAuthURL
|
||||
}
|
||||
if inCO.CloudReportURL != "" {
|
||||
co.CloudReportURL = inCO.CloudReportURL
|
||||
}
|
||||
if inCO.CloudUIURL != "" {
|
||||
co.CloudUIURL = inCO.CloudUIURL
|
||||
}
|
||||
if inCO.ClusterName != "" {
|
||||
co.ClusterName = inCO.ClusterName
|
||||
}
|
||||
if inCO.CustomerAdminEMail != "" {
|
||||
co.CustomerAdminEMail = inCO.CustomerAdminEMail
|
||||
}
|
||||
if inCO.Token != "" {
|
||||
co.Token = inCO.Token
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// =============================== interface ============================================
|
||||
// ======================================================================================
|
||||
@@ -245,15 +277,16 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
configMapNamespace: GetConfigMapNamespace(),
|
||||
}
|
||||
|
||||
// first, load from configMap
|
||||
if c.existsConfigMap() {
|
||||
c.loadConfigFromConfigMap()
|
||||
}
|
||||
|
||||
// second, load from file
|
||||
// first, load from file
|
||||
if existsConfigFile() { // get from file
|
||||
loadConfigFromFile(c.configObj)
|
||||
}
|
||||
|
||||
// second, load from configMap
|
||||
if c.existsConfigMap() {
|
||||
c.updateConfigEmptyFieldsFromConfigMap()
|
||||
}
|
||||
|
||||
updateCredentials(c.configObj, credentials)
|
||||
updateCloudURLs(c.configObj)
|
||||
|
||||
@@ -359,6 +392,22 @@ func (c *ClusterConfig) ToMapString() map[string]interface{} {
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) updateConfigEmptyFieldsFromConfigMap() error {
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tempCO := ConfigObj{}
|
||||
if jsonConf, ok := configMap.Data["config.json"]; ok {
|
||||
json.Unmarshal([]byte(jsonConf), &tempCO)
|
||||
return c.configObj.updateEmptyFields(&tempCO)
|
||||
}
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) loadConfigFromConfigMap() error {
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -509,10 +558,6 @@ func readConfig(dat []byte, configObj *ConfigObj) error {
|
||||
if err := json.Unmarshal(dat, configObj); err != nil {
|
||||
return err
|
||||
}
|
||||
if configObj.AccountID == "" {
|
||||
configObj.AccountID = configObj.CustomerGUID
|
||||
}
|
||||
configObj.CustomerGUID = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -554,7 +599,7 @@ func getConfigMapName() string {
|
||||
if n := os.Getenv("KS_DEFAULT_CONFIGMAP_NAME"); n != "" {
|
||||
return n
|
||||
}
|
||||
return "kubescape"
|
||||
return kubescapeConfigMapName
|
||||
}
|
||||
|
||||
// GetConfigMapNamespace returns the namespace of the cluster config, which is the same for all in-cluster components
|
||||
@@ -562,7 +607,7 @@ func GetConfigMapNamespace() string {
|
||||
if n := os.Getenv("KS_DEFAULT_CONFIGMAP_NAMESPACE"); n != "" {
|
||||
return n
|
||||
}
|
||||
return "default"
|
||||
return kubescapeNamespace
|
||||
}
|
||||
|
||||
func getAccountFromEnv(credentials *Credentials) {
|
||||
|
||||
@@ -308,12 +308,12 @@ func TestGetConfigMapNamespace(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "no env",
|
||||
want: "default",
|
||||
want: kubescapeNamespace,
|
||||
},
|
||||
{
|
||||
name: "default ns",
|
||||
env: "kubescape",
|
||||
want: "kubescape",
|
||||
env: kubescapeNamespace,
|
||||
want: kubescapeNamespace,
|
||||
},
|
||||
{
|
||||
name: "custom ns",
|
||||
@@ -330,3 +330,128 @@ func TestGetConfigMapNamespace(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
anyString string = "anyString"
|
||||
shouldNotUpdate string = "shouldNotUpdate"
|
||||
shouldUpdate string = "shouldUpdate"
|
||||
)
|
||||
|
||||
func checkIsUpdateCorrectly(t *testing.T, beforeField string, afterField string) {
|
||||
switch beforeField {
|
||||
case anyString:
|
||||
assert.Equal(t, anyString, afterField)
|
||||
case "":
|
||||
assert.Equal(t, shouldUpdate, afterField)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateEmptyFields(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
inCo *ConfigObj
|
||||
outCo *ConfigObj
|
||||
}{
|
||||
{
|
||||
outCo: &ConfigObj{
|
||||
AccountID: "",
|
||||
Token: "",
|
||||
CustomerAdminEMail: "",
|
||||
ClusterName: "",
|
||||
CloudReportURL: "",
|
||||
CloudAPIURL: "",
|
||||
CloudUIURL: "",
|
||||
CloudAuthURL: "",
|
||||
},
|
||||
inCo: &ConfigObj{
|
||||
AccountID: shouldUpdate,
|
||||
Token: shouldUpdate,
|
||||
CustomerAdminEMail: shouldUpdate,
|
||||
ClusterName: shouldUpdate,
|
||||
CloudReportURL: shouldUpdate,
|
||||
CloudAPIURL: shouldUpdate,
|
||||
CloudUIURL: shouldUpdate,
|
||||
CloudAuthURL: shouldUpdate,
|
||||
},
|
||||
},
|
||||
{
|
||||
outCo: &ConfigObj{
|
||||
AccountID: anyString,
|
||||
Token: anyString,
|
||||
CustomerAdminEMail: "",
|
||||
ClusterName: "",
|
||||
CloudReportURL: "",
|
||||
CloudAPIURL: "",
|
||||
CloudUIURL: "",
|
||||
CloudAuthURL: "",
|
||||
},
|
||||
inCo: &ConfigObj{
|
||||
AccountID: shouldNotUpdate,
|
||||
Token: shouldNotUpdate,
|
||||
CustomerAdminEMail: shouldUpdate,
|
||||
ClusterName: shouldUpdate,
|
||||
CloudReportURL: shouldUpdate,
|
||||
CloudAPIURL: shouldUpdate,
|
||||
CloudUIURL: shouldUpdate,
|
||||
CloudAuthURL: shouldUpdate,
|
||||
},
|
||||
},
|
||||
{
|
||||
outCo: &ConfigObj{
|
||||
AccountID: "",
|
||||
Token: "",
|
||||
CustomerAdminEMail: anyString,
|
||||
ClusterName: anyString,
|
||||
CloudReportURL: anyString,
|
||||
CloudAPIURL: anyString,
|
||||
CloudUIURL: anyString,
|
||||
CloudAuthURL: anyString,
|
||||
},
|
||||
inCo: &ConfigObj{
|
||||
AccountID: shouldUpdate,
|
||||
Token: shouldUpdate,
|
||||
CustomerAdminEMail: shouldNotUpdate,
|
||||
ClusterName: shouldNotUpdate,
|
||||
CloudReportURL: shouldNotUpdate,
|
||||
CloudAPIURL: shouldNotUpdate,
|
||||
CloudUIURL: shouldNotUpdate,
|
||||
CloudAuthURL: shouldNotUpdate,
|
||||
},
|
||||
},
|
||||
{
|
||||
outCo: &ConfigObj{
|
||||
AccountID: anyString,
|
||||
Token: anyString,
|
||||
CustomerAdminEMail: "",
|
||||
ClusterName: anyString,
|
||||
CloudReportURL: "",
|
||||
CloudAPIURL: anyString,
|
||||
CloudUIURL: "",
|
||||
CloudAuthURL: anyString,
|
||||
},
|
||||
inCo: &ConfigObj{
|
||||
AccountID: shouldNotUpdate,
|
||||
Token: shouldNotUpdate,
|
||||
CustomerAdminEMail: shouldUpdate,
|
||||
ClusterName: shouldNotUpdate,
|
||||
CloudReportURL: shouldUpdate,
|
||||
CloudAPIURL: shouldNotUpdate,
|
||||
CloudUIURL: shouldUpdate,
|
||||
CloudAuthURL: shouldNotUpdate,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i := range tests {
|
||||
beforeChangesOutCO := tests[i].outCo
|
||||
tests[i].outCo.updateEmptyFields(tests[i].inCo)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.AccountID, tests[i].outCo.AccountID)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.CloudAPIURL, tests[i].outCo.CloudAPIURL)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.CloudAuthURL, tests[i].outCo.CloudAuthURL)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.CloudReportURL, tests[i].outCo.CloudReportURL)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.CloudUIURL, tests[i].outCo.CloudUIURL)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.ClusterName, tests[i].outCo.ClusterName)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.CustomerAdminEMail, tests[i].outCo.CustomerAdminEMail)
|
||||
checkIsUpdateCorrectly(t, beforeChangesOutCO.Token, tests[i].outCo.Token)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,9 @@ package cautils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/anchore/grype/grype/presenter/models"
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
@@ -15,12 +17,29 @@ import (
|
||||
|
||||
// K8SResources map[<api group>/<api version>/<resource>][]<resourceID>
|
||||
type K8SResources map[string][]string
|
||||
type KSResources map[string][]string
|
||||
type ExternalResources map[string][]string
|
||||
|
||||
type ImageScanData struct {
|
||||
PresenterConfig *models.PresenterConfig
|
||||
Image string
|
||||
}
|
||||
|
||||
type ScanTypes string
|
||||
|
||||
const (
|
||||
TopWorkloadsNumber = 5
|
||||
ScanTypeCluster ScanTypes = "cluster"
|
||||
ScanTypeRepo ScanTypes = "repo"
|
||||
ScanTypeImage ScanTypes = "image"
|
||||
ScanTypeWorkload ScanTypes = "workload"
|
||||
ScanTypeFramework ScanTypes = "framework"
|
||||
)
|
||||
|
||||
type OPASessionObj struct {
|
||||
K8SResources *K8SResources // input k8s objects
|
||||
ArmoResource *KSResources // input ARMO objects
|
||||
K8SResources K8SResources // input k8s objects
|
||||
ExternalResources ExternalResources // input non-k8s objects (external resources)
|
||||
AllPolicies *Policies // list of all frameworks
|
||||
ExcludedRules map[string]bool // rules to exclude map[rule name>]X
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<resource ID>]<resource>
|
||||
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<resource ID>]<resource result>
|
||||
ResourceSource map[string]reporthandling.Source // resources sources, map[<resource ID>]<resource result>
|
||||
@@ -36,9 +55,10 @@ type OPASessionObj struct {
|
||||
Policies []reporthandling.Framework // list of frameworks to scan
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
OmitRawResources bool // omit raw resources from output
|
||||
SingleResourceScan workloadinterface.IWorkload // single resource scan
|
||||
}
|
||||
|
||||
func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework, k8sResources *K8SResources, scanInfo *ScanInfo) *OPASessionObj {
|
||||
func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework, k8sResources K8SResources, scanInfo *ScanInfo) *OPASessionObj {
|
||||
return &OPASessionObj{
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
Policies: frameworks,
|
||||
@@ -55,6 +75,45 @@ func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework
|
||||
}
|
||||
}
|
||||
|
||||
// SetTopWorkloads sets the top workloads by score
|
||||
func (sessionObj *OPASessionObj) SetTopWorkloads() {
|
||||
count := 0
|
||||
|
||||
topWorkloadsSorted := make([]prioritization.PrioritizedResource, 0)
|
||||
|
||||
// create list in order to sort
|
||||
for _, wl := range sessionObj.ResourcesPrioritized {
|
||||
topWorkloadsSorted = append(topWorkloadsSorted, wl)
|
||||
}
|
||||
|
||||
// sort by score. If scores are equal, sort by resource ID
|
||||
sort.Slice(topWorkloadsSorted, func(i, j int) bool {
|
||||
if topWorkloadsSorted[i].Score == topWorkloadsSorted[j].Score {
|
||||
return topWorkloadsSorted[i].ResourceID < topWorkloadsSorted[j].ResourceID
|
||||
}
|
||||
return topWorkloadsSorted[i].Score > topWorkloadsSorted[j].Score
|
||||
})
|
||||
|
||||
if sessionObj.Report == nil {
|
||||
sessionObj.Report = &reporthandlingv2.PostureReport{}
|
||||
}
|
||||
|
||||
// set top workloads according to number of top workloads
|
||||
for i := 0; i < TopWorkloadsNumber; i++ {
|
||||
if i >= len(topWorkloadsSorted) {
|
||||
break
|
||||
}
|
||||
source := sessionObj.ResourceSource[topWorkloadsSorted[i].ResourceID]
|
||||
wlObj := &reporthandling.Resource{
|
||||
IMetadata: sessionObj.AllResources[topWorkloadsSorted[i].ResourceID],
|
||||
Source: &source,
|
||||
}
|
||||
|
||||
sessionObj.Report.SummaryDetails.TopWorkloadsByScore = append(sessionObj.Report.SummaryDetails.TopWorkloadsByScore, wlObj)
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
func (sessionObj *OPASessionObj) SetMapNamespaceToNumberOfResources(mapNamespaceToNumberOfResources map[string]int) {
|
||||
if sessionObj.Metadata.ContextMetadata.ClusterContextMetadata == nil {
|
||||
sessionObj.Metadata.ContextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
"github.com/armosec/utils-go/boolutils"
|
||||
cloudsupport "github.com/kubescape/k8s-interface/cloudsupport/v1"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
@@ -15,15 +17,25 @@ func NewPolicies() *Policies {
|
||||
}
|
||||
}
|
||||
|
||||
func (policies *Policies) Set(frameworks []reporthandling.Framework, version string) {
|
||||
func (policies *Policies) Set(frameworks []reporthandling.Framework, version string, excludedRules map[string]bool, scanningScope reporthandling.ScanningScopeType) {
|
||||
for i := range frameworks {
|
||||
if !isFrameworkFitToScanScope(frameworks[i], scanningScope) {
|
||||
continue
|
||||
}
|
||||
if frameworks[i].Name != "" && len(frameworks[i].Controls) > 0 {
|
||||
policies.Frameworks = append(policies.Frameworks, frameworks[i].Name)
|
||||
}
|
||||
for j := range frameworks[i].Controls {
|
||||
compatibleRules := []reporthandling.PolicyRule{}
|
||||
for r := range frameworks[i].Controls[j].Rules {
|
||||
if !ruleWithKSOpaDependency(frameworks[i].Controls[j].Rules[r].Attributes) && isRuleKubescapeVersionCompatible(frameworks[i].Controls[j].Rules[r].Attributes, version) {
|
||||
if excludedRules != nil {
|
||||
ruleName := frameworks[i].Controls[j].Rules[r].Name
|
||||
if _, exclude := excludedRules[ruleName]; exclude {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !ruleWithKSOpaDependency(frameworks[i].Controls[j].Rules[r].Attributes) && isRuleKubescapeVersionCompatible(frameworks[i].Controls[j].Rules[r].Attributes, version) && isControlFitToScanScope(frameworks[i].Controls[j], scanningScope) {
|
||||
compatibleRules = append(compatibleRules, frameworks[i].Controls[j].Rules[r])
|
||||
}
|
||||
}
|
||||
@@ -76,3 +88,89 @@ func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func getCloudType(scanInfo *ScanInfo) (bool, reporthandling.ScanningScopeType) {
|
||||
if cloudsupport.IsAKS() {
|
||||
return true, reporthandling.ScopeCloudAKS
|
||||
}
|
||||
if cloudsupport.IsEKS(k8sinterface.GetConfig()) {
|
||||
return true, reporthandling.ScopeCloudEKS
|
||||
}
|
||||
if cloudsupport.IsGKE(k8sinterface.GetConfig()) {
|
||||
return true, reporthandling.ScopeCloudGKE
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func GetScanningScope(scanInfo *ScanInfo) reporthandling.ScanningScopeType {
|
||||
var result reporthandling.ScanningScopeType
|
||||
|
||||
switch scanInfo.GetScanningContext() {
|
||||
case ContextCluster:
|
||||
isCloud, cloudType := getCloudType(scanInfo)
|
||||
if isCloud {
|
||||
result = cloudType
|
||||
} else {
|
||||
result = reporthandling.ScopeCluster
|
||||
}
|
||||
default:
|
||||
result = reporthandling.ScopeFile
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func isScanningScopeMatchToControlScope(scanScope reporthandling.ScanningScopeType, controlScope reporthandling.ScanningScopeType) bool {
|
||||
result := false
|
||||
|
||||
switch controlScope {
|
||||
case reporthandling.ScopeFile:
|
||||
result = (reporthandling.ScopeFile == scanScope)
|
||||
case reporthandling.ScopeCluster:
|
||||
result = (reporthandling.ScopeCluster == scanScope) || (reporthandling.ScopeCloud == scanScope) || (reporthandling.ScopeCloudAKS == scanScope) || (reporthandling.ScopeCloudEKS == scanScope) || (reporthandling.ScopeCloudGKE == scanScope)
|
||||
case reporthandling.ScopeCloud:
|
||||
result = (reporthandling.ScopeCloud == scanScope) || (reporthandling.ScopeCloudAKS == scanScope) || (reporthandling.ScopeCloudEKS == scanScope) || (reporthandling.ScopeCloudGKE == scanScope)
|
||||
case reporthandling.ScopeCloudAKS:
|
||||
result = (reporthandling.ScopeCloudAKS == scanScope)
|
||||
case reporthandling.ScopeCloudEKS:
|
||||
result = (reporthandling.ScopeCloudEKS == scanScope)
|
||||
case reporthandling.ScopeCloudGKE:
|
||||
result = (reporthandling.ScopeCloudGKE == scanScope)
|
||||
default:
|
||||
result = true
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func isControlFitToScanScope(control reporthandling.Control, scanScopeMatches reporthandling.ScanningScopeType) bool {
|
||||
// for backward compatibility - case: kubescape with scope(new one) and regolibrary without scope(old one)
|
||||
if control.ScanningScope == nil {
|
||||
return true
|
||||
}
|
||||
if len(control.ScanningScope.Matches) == 0 {
|
||||
return true
|
||||
}
|
||||
for i := range control.ScanningScope.Matches {
|
||||
if isScanningScopeMatchToControlScope(scanScopeMatches, control.ScanningScope.Matches[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isFrameworkFitToScanScope(framework reporthandling.Framework, scanScopeMatches reporthandling.ScanningScopeType) bool {
|
||||
// for backward compatibility - case: kubescape with scope(new one) and regolibrary without scope(old one)
|
||||
if framework.ScanningScope == nil {
|
||||
return true
|
||||
}
|
||||
if len(framework.ScanningScope.Matches) == 0 {
|
||||
return true
|
||||
}
|
||||
for i := range framework.ScanningScope.Matches {
|
||||
if isScanningScopeMatchToControlScope(scanScopeMatches, framework.ScanningScope.Matches[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
104
core/cautils/datastructuresmethods_test.go
Normal file
104
core/cautils/datastructuresmethods_test.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestIsControlFitToScanScope(t *testing.T) {
|
||||
tests := []struct {
|
||||
scanInfo *ScanInfo
|
||||
Control reporthandling.Control
|
||||
expected_res bool
|
||||
}{
|
||||
{
|
||||
scanInfo: &ScanInfo{
|
||||
InputPatterns: []string{
|
||||
"./testdata/any_file_for_test.json",
|
||||
},
|
||||
},
|
||||
Control: reporthandling.Control{
|
||||
ScanningScope: &reporthandling.ScanningScope{
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeFile,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected_res: true,
|
||||
},
|
||||
{
|
||||
scanInfo: &ScanInfo{
|
||||
InputPatterns: []string{
|
||||
"./testdata/any_file_for_test.json",
|
||||
},
|
||||
},
|
||||
Control: reporthandling.Control{
|
||||
ScanningScope: &reporthandling.ScanningScope{
|
||||
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCluster,
|
||||
reporthandling.ScopeFile,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected_res: true,
|
||||
},
|
||||
{
|
||||
scanInfo: &ScanInfo{},
|
||||
Control: reporthandling.Control{
|
||||
ScanningScope: &reporthandling.ScanningScope{
|
||||
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCluster,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected_res: true,
|
||||
},
|
||||
{
|
||||
scanInfo: &ScanInfo{
|
||||
InputPatterns: []string{
|
||||
"./testdata/any_file_for_test.json",
|
||||
},
|
||||
},
|
||||
Control: reporthandling.Control{
|
||||
ScanningScope: &reporthandling.ScanningScope{
|
||||
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCloudGKE,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected_res: false,
|
||||
},
|
||||
{
|
||||
scanInfo: &ScanInfo{},
|
||||
Control: reporthandling.Control{
|
||||
ScanningScope: &reporthandling.ScanningScope{
|
||||
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCloudEKS,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected_res: false,
|
||||
},
|
||||
{
|
||||
scanInfo: &ScanInfo{},
|
||||
Control: reporthandling.Control{
|
||||
ScanningScope: &reporthandling.ScanningScope{
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCloud,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected_res: false,
|
||||
}}
|
||||
for i := range tests {
|
||||
assert.Equal(t, isControlFitToScanScope(tests[i].Control, GetScanningScope(tests[i].scanInfo)), tests[i].expected_res, fmt.Sprintf("tests_true index %d", i))
|
||||
}
|
||||
}
|
||||
@@ -1,25 +1,54 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
spinnerpkg "github.com/briandowns/spinner"
|
||||
"github.com/fatih/color"
|
||||
"github.com/jwalton/gchalk"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/schollz/progressbar/v3"
|
||||
)
|
||||
|
||||
var FailureDisplay = color.New(color.Bold, color.FgHiRed).FprintfFunc()
|
||||
var WarningDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
|
||||
var FailureTextDisplay = color.New(color.Faint, color.FgHiRed).FprintfFunc()
|
||||
var InfoDisplay = color.New(color.Bold, color.FgCyan).FprintfFunc()
|
||||
var InfoTextDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
|
||||
var SimpleDisplay = color.New().FprintfFunc()
|
||||
var SuccessDisplay = color.New(color.Bold, color.FgHiGreen).FprintfFunc()
|
||||
var DescriptionDisplay = color.New(color.Faint, color.FgWhite).FprintfFunc()
|
||||
func FailureDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithBrightRed().Bold(format), a...)
|
||||
}
|
||||
|
||||
func WarningDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithBrightYellow().Bold(format), a...)
|
||||
}
|
||||
|
||||
func FailureTextDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithBrightRed().Dim(format), a...)
|
||||
}
|
||||
|
||||
func InfoDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithCyan().Bold(format), a...)
|
||||
}
|
||||
|
||||
func InfoTextDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithBrightYellow().Bold(format), a...)
|
||||
}
|
||||
|
||||
func SimpleDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.White(format), a...)
|
||||
}
|
||||
|
||||
func SuccessDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithBlue().Bold(format), a...)
|
||||
}
|
||||
|
||||
func DescriptionDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.WithWhite().Dim(format), a...)
|
||||
}
|
||||
|
||||
func BoldDisplay(w io.Writer, format string, a ...interface{}) {
|
||||
fmt.Fprintf(w, gchalk.Bold(format), a...)
|
||||
}
|
||||
|
||||
var spinner *spinnerpkg.Spinner
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
@@ -31,8 +32,13 @@ const (
|
||||
JSON_FILE_FORMAT FileFormat = "json"
|
||||
)
|
||||
|
||||
type Chart struct {
|
||||
Name string
|
||||
Path string
|
||||
}
|
||||
|
||||
// LoadResourcesFromHelmCharts scans a given path (recursively) for helm charts, renders the templates and returns a map of workloads and a map of chart names
|
||||
func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[string][]workloadinterface.IMetadata, map[string]string) {
|
||||
func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[string][]workloadinterface.IMetadata, map[string]Chart) {
|
||||
directories, _ := listDirs(basePath)
|
||||
helmDirectories := make([]string, 0)
|
||||
for _, dir := range directories {
|
||||
@@ -42,7 +48,7 @@ func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[stri
|
||||
}
|
||||
|
||||
sourceToWorkloads := map[string][]workloadinterface.IMetadata{}
|
||||
sourceToChartName := map[string]string{}
|
||||
sourceToChart := make(map[string]Chart, 0)
|
||||
for _, helmDir := range helmDirectories {
|
||||
chart, err := NewHelmChart(helmDir)
|
||||
if err == nil {
|
||||
@@ -55,11 +61,14 @@ func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[stri
|
||||
chartName := chart.GetName()
|
||||
for k, v := range wls {
|
||||
sourceToWorkloads[k] = v
|
||||
sourceToChartName[k] = chartName
|
||||
sourceToChart[k] = Chart{
|
||||
Name: chartName,
|
||||
Path: helmDir,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return sourceToWorkloads, sourceToChartName
|
||||
return sourceToWorkloads, sourceToChart
|
||||
}
|
||||
|
||||
// If the contents at given path is a Kustomize Directory, LoadResourcesFromKustomizeDirectory will
|
||||
@@ -284,11 +293,11 @@ func convertYamlToJson(i interface{}) interface{} {
|
||||
}
|
||||
|
||||
func IsYaml(filePath string) bool {
|
||||
return StringInSlice(YAML_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
|
||||
return slices.Contains(YAML_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", ""))
|
||||
}
|
||||
|
||||
func IsJson(filePath string) bool {
|
||||
return StringInSlice(JSON_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
|
||||
return slices.Contains(JSON_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", ""))
|
||||
}
|
||||
|
||||
func glob(root, pattern string, onlyDirectories bool) ([]string, error) {
|
||||
|
||||
@@ -53,7 +53,8 @@ func TestLoadResourcesFromHelmCharts(t *testing.T) {
|
||||
|
||||
w := workloads[0]
|
||||
assert.True(t, localworkload.IsTypeLocalWorkload(w.GetObject()), "Expected localworkload as object type")
|
||||
assert.Equal(t, "kubescape", sourceToChartName[file])
|
||||
assert.Equal(t, "kubescape", sourceToChartName[file].Name)
|
||||
assert.Equal(t, helmChartPath(), sourceToChartName[file].Path)
|
||||
|
||||
switch filepath.Base(file) {
|
||||
case "serviceaccount.yaml":
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/armoapi-go/identifiers"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/kubescape/kubescape/v2/internal/testutils"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
@@ -152,7 +153,7 @@ func mockExceptions() []armotypes.PostureExceptionPolicy {
|
||||
Actions: []armotypes.PostureExceptionPolicyActions{
|
||||
"alertOnly",
|
||||
},
|
||||
Resources: []armotypes.PortalDesignator{
|
||||
Resources: []identifiers.PortalDesignator{
|
||||
{
|
||||
DesignatorType: "Attributes",
|
||||
Attributes: map[string]string{
|
||||
@@ -187,7 +188,7 @@ func mockExceptions() []armotypes.PostureExceptionPolicy {
|
||||
Actions: []armotypes.PostureExceptionPolicyActions{
|
||||
"alertOnly",
|
||||
},
|
||||
Resources: []armotypes.PortalDesignator{
|
||||
Resources: []identifiers.PortalDesignator{
|
||||
{
|
||||
DesignatorType: "Attributes",
|
||||
Attributes: map[string]string{
|
||||
@@ -237,7 +238,7 @@ func mockCustomerConfig(cluster, scope string) func() *armotypes.CustomerConfig
|
||||
Attributes: map[string]interface{}{
|
||||
"label": "value",
|
||||
},
|
||||
Scope: armotypes.PortalDesignator{
|
||||
Scope: identifiers.PortalDesignator{
|
||||
DesignatorType: "Attributes",
|
||||
Attributes: map[string]string{
|
||||
"kind": "Cluster",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,13 +8,13 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
giturl "github.com/kubescape/go-git-url"
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
|
||||
@@ -87,52 +87,59 @@ func (bpf *BoolPtrFlag) Set(val string) error {
|
||||
|
||||
// TODO - UPDATE
|
||||
type ViewTypes string
|
||||
type EnvScopeTypes string
|
||||
type ManageClusterTypes string
|
||||
|
||||
const (
|
||||
ResourceViewType ViewTypes = "resource"
|
||||
SecurityViewType ViewTypes = "security"
|
||||
ControlViewType ViewTypes = "control"
|
||||
)
|
||||
|
||||
type PolicyIdentifier struct {
|
||||
Identifier string // policy Identifier e.g. c-0012 for control, nsa,mitre for frameworks
|
||||
Kind apisv1.NotificationPolicyKind // policy kind e.g. Framework,Control,Rule
|
||||
Designators armotypes.PortalDesignator
|
||||
Identifier string // policy Identifier e.g. c-0012 for control, nsa,mitre for frameworks
|
||||
Kind apisv1.NotificationPolicyKind // policy kind e.g. Framework,Control,Rule
|
||||
}
|
||||
|
||||
type ScanInfo struct {
|
||||
Getters // TODO - remove from object
|
||||
PolicyIdentifier []PolicyIdentifier // TODO - remove from object
|
||||
UseExceptions string // Load file with exceptions configuration
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
AttackTracks string // Load file with attack tracks
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
|
||||
VerboseMode bool // Display all of the input resources and not only failed resources
|
||||
View string // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
FormatVersion string // Output object can be different between versions, this is for testing and backward compatibility
|
||||
CustomClusterName string // Set the custom name of the cluster
|
||||
ExcludedNamespaces string // used for host scanner namespace
|
||||
IncludeNamespaces string //
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold float32 // DEPRECATED - Failure score threshold
|
||||
ComplianceThreshold float32 // Compliance score threshold
|
||||
FailThresholdSeverity string // Severity at and above which the command should fail
|
||||
Submit bool // Submit results to Kubescape Cloud BE
|
||||
CreateAccount bool // Create account in Kubescape Cloud BE if no account found in local cache
|
||||
ScanID string // Report id of the current scan
|
||||
HostSensorEnabled BoolPtrFlag // Deploy Kubescape K8s host scanner to collect data from certain controls
|
||||
HostSensorYamlPath string // Path to hostsensor file
|
||||
Local bool // Do not submit results
|
||||
Credentials Credentials // account ID
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
OmitRawResources bool // true if omit raw resources from the output
|
||||
PrintAttackTree bool // true if print attack tree
|
||||
Getters // TODO - remove from object
|
||||
PolicyIdentifier []PolicyIdentifier // TODO - remove from object
|
||||
UseExceptions string // Load file with exceptions configuration
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
AttackTracks string // Load file with attack tracks
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
|
||||
VerboseMode bool // Display all of the input resources and not only failed resources
|
||||
View string // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
FormatVersion string // Output object can be different between versions, this is for testing and backward compatibility
|
||||
CustomClusterName string // Set the custom name of the cluster
|
||||
ExcludedNamespaces string // used for host scanner namespace
|
||||
IncludeNamespaces string //
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold float32 // DEPRECATED - Failure score threshold
|
||||
ComplianceThreshold float32 // Compliance score threshold
|
||||
FailThresholdSeverity string // Severity at and above which the command should fail
|
||||
Submit bool // Submit results to Kubescape Cloud BE
|
||||
CreateAccount bool // Create account in Kubescape Cloud BE if no account found in local cache
|
||||
ScanID string // Report id of the current scan
|
||||
HostSensorEnabled BoolPtrFlag // Deploy Kubescape K8s host scanner to collect data from certain controls
|
||||
HostSensorYamlPath string // Path to hostsensor file
|
||||
Local bool // Do not submit results
|
||||
Credentials Credentials // account ID
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
OmitRawResources bool // true if omit raw resources from the output
|
||||
PrintAttackTree bool // true if print attack tree
|
||||
ScanObject *objectsenvelopes.ScanObject // identifies a single resource (k8s object) to be scanned
|
||||
ScanType ScanTypes
|
||||
ScanImages bool
|
||||
ChartPath string
|
||||
FilePath string
|
||||
}
|
||||
|
||||
type Getters struct {
|
||||
@@ -204,6 +211,10 @@ func (scanInfo *ScanInfo) Formats() []string {
|
||||
}
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) SetScanType(scanType ScanTypes) {
|
||||
scanInfo.ScanType = scanType
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind apisv1.NotificationPolicyKind) {
|
||||
for _, policy := range policies {
|
||||
if !scanInfo.contains(policy) {
|
||||
@@ -337,6 +348,11 @@ func setContextMetadata(ctx context.Context, contextMetadata *reporthandlingv2.C
|
||||
}
|
||||
contextMetadata.RepoContextMetadata = context
|
||||
case ContextDir:
|
||||
contextMetadata.DirectoryContextMetadata = &reporthandlingv2.DirectoryContextMetadata{
|
||||
BasePath: getAbsPath(input),
|
||||
HostName: getHostname(),
|
||||
}
|
||||
// add repo context for submitting
|
||||
contextMetadata.RepoContextMetadata = &reporthandlingv2.RepoContextMetadata{
|
||||
Provider: "none",
|
||||
Repo: fmt.Sprintf("path@%s", getAbsPath(input)),
|
||||
@@ -347,6 +363,11 @@ func setContextMetadata(ctx context.Context, contextMetadata *reporthandlingv2.C
|
||||
}
|
||||
|
||||
case ContextFile:
|
||||
contextMetadata.FileContextMetadata = &reporthandlingv2.FileContextMetadata{
|
||||
FilePath: getAbsPath(input),
|
||||
HostName: getHostname(),
|
||||
}
|
||||
// add repo context for submitting
|
||||
contextMetadata.RepoContextMetadata = &reporthandlingv2.RepoContextMetadata{
|
||||
Provider: "none",
|
||||
Repo: fmt.Sprintf("file@%s", getAbsPath(input)),
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const ValueNotFound = -1
|
||||
|
||||
func ConvertLabelsToString(labels map[string]string) string {
|
||||
labelsStr := ""
|
||||
delimiter := ""
|
||||
@@ -37,15 +35,6 @@ func ConvertStringToLabels(labelsStr string) map[string]string {
|
||||
return labels
|
||||
}
|
||||
|
||||
func StringInSlice(strSlice []string, str string) int {
|
||||
for i := range strSlice {
|
||||
if strSlice[i] == str {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return ValueNotFound
|
||||
}
|
||||
|
||||
func StringSlicesAreEqual(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
|
||||
0
core/cautils/testdata/any_file_for_test.json
vendored
Normal file
0
core/cautils/testdata/any_file_for_test.json
vendored
Normal file
@@ -31,7 +31,7 @@ type IVersionCheckHandler interface {
|
||||
|
||||
func NewIVersionCheckHandler(ctx context.Context) IVersionCheckHandler {
|
||||
if BuildNumber == "" {
|
||||
logger.L().Ctx(ctx).Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
|
||||
logger.L().Ctx(ctx).Warning("Unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
|
||||
}
|
||||
|
||||
if v, ok := os.LookupEnv(CLIENT_ENV); ok && v != "" {
|
||||
|
||||
@@ -32,9 +32,9 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func MapKSResource(ksResourceMap *KSResources, resources []string) []string {
|
||||
func MapExternalResource(externalResourceMap ExternalResources, resources []string) []string {
|
||||
var hostResources []string
|
||||
for k := range *ksResourceMap {
|
||||
for k := range externalResourceMap {
|
||||
for _, resource := range resources {
|
||||
if strings.Contains(k, resource) {
|
||||
hostResources = append(hostResources, k)
|
||||
@@ -44,16 +44,16 @@ func MapKSResource(ksResourceMap *KSResources, resources []string) []string {
|
||||
return hostResources
|
||||
}
|
||||
|
||||
func MapHostResources(ksResourceMap *KSResources) []string {
|
||||
return MapKSResource(ksResourceMap, HostSensorResources)
|
||||
func MapHostResources(externalResourceMap ExternalResources) []string {
|
||||
return MapExternalResource(externalResourceMap, HostSensorResources)
|
||||
}
|
||||
|
||||
func MapImageVulnResources(ksResourceMap *KSResources) []string {
|
||||
return MapKSResource(ksResourceMap, ImageVulnResources)
|
||||
func MapImageVulnResources(externalResourceMap ExternalResources) []string {
|
||||
return MapExternalResource(externalResourceMap, ImageVulnResources)
|
||||
}
|
||||
|
||||
func MapCloudResources(ksResourceMap *KSResources) []string {
|
||||
return MapKSResource(ksResourceMap, CloudResources)
|
||||
func MapCloudResources(externalResourceMap ExternalResources) []string {
|
||||
return MapExternalResource(externalResourceMap, CloudResources)
|
||||
}
|
||||
|
||||
func SetInfoMapForResources(info string, resources []string, errorMap map[string]apis.StatusInfo) {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
func (ks *Kubescape) SetCachedConfig(setConfig *metav1.SetConfig) error {
|
||||
|
||||
tenant := getTenantConfig(nil, "", "", getKubernetesApi())
|
||||
tenant := getTenantConfig(nil, "", "", nil)
|
||||
|
||||
if setConfig.Account != "" {
|
||||
tenant.GetConfigObj().AccountID = setConfig.Account
|
||||
@@ -45,6 +45,6 @@ func (ks *Kubescape) ViewCachedConfig(viewConfig *metav1.ViewConfig) error {
|
||||
|
||||
func (ks *Kubescape) DeleteCachedConfig(ctx context.Context, deleteConfig *metav1.DeleteConfig) error {
|
||||
|
||||
tenant := getTenantConfig(nil, "", "", getKubernetesApi()) // change k8sinterface
|
||||
tenant := getTenantConfig(nil, "", "", nil) // change k8sinterface
|
||||
return tenant.DeleteCachedConfig(ctx)
|
||||
}
|
||||
|
||||
@@ -65,13 +65,13 @@ func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.Kubern
|
||||
return nil
|
||||
}
|
||||
|
||||
func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan bool, scanningContext cautils.ScanningContext) reporter.IReport {
|
||||
func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan bool, scanInfo cautils.ScanInfo) reporter.IReport {
|
||||
_, span := otel.Tracer("").Start(ctx, "getReporter")
|
||||
defer span.End()
|
||||
|
||||
if submit {
|
||||
submitData := reporterv2.SubmitContextScan
|
||||
if scanningContext != cautils.ContextCluster {
|
||||
if scanInfo.GetScanningContext() != cautils.ContextCluster {
|
||||
submitData = reporterv2.SubmitContextRepository
|
||||
}
|
||||
return reporterv2.NewReportEventReceiver(tenantConfig.GetConfigObj(), reportID, submitData)
|
||||
@@ -81,7 +81,8 @@ func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, report
|
||||
return reporterv2.NewReportMock("", "")
|
||||
}
|
||||
var message string
|
||||
if !fwScan {
|
||||
|
||||
if !fwScan && scanInfo.ScanType != cautils.ScanTypeWorkload {
|
||||
message = "Kubescape does not submit scan results when scanning controls"
|
||||
}
|
||||
|
||||
@@ -94,11 +95,12 @@ func getResourceHandler(ctx context.Context, scanInfo *cautils.ScanInfo, tenantC
|
||||
|
||||
if len(scanInfo.InputPatterns) > 0 || k8s == nil {
|
||||
// scanInfo.HostSensor.SetBool(false)
|
||||
return resourcehandler.NewFileResourceHandler(ctx, scanInfo.InputPatterns)
|
||||
return resourcehandler.NewFileResourceHandler()
|
||||
}
|
||||
|
||||
getter.GetKSCloudAPIConnector()
|
||||
rbacObjects := getRBACHandler(tenantConfig, k8s, scanInfo.Submit)
|
||||
return resourcehandler.NewK8sResourceHandler(k8s, getFieldSelector(scanInfo), hostSensorHandler, rbacObjects, registryAdaptors)
|
||||
return resourcehandler.NewK8sResourceHandler(k8s, hostSensorHandler, rbacObjects, registryAdaptors)
|
||||
}
|
||||
|
||||
// getHostSensorHandler yields a IHostSensor that knows how to collect a host's scanned resources.
|
||||
@@ -133,17 +135,6 @@ func getHostSensorHandler(ctx context.Context, scanInfo *cautils.ScanInfo, k8s *
|
||||
}
|
||||
}
|
||||
|
||||
func getFieldSelector(scanInfo *cautils.ScanInfo) resourcehandler.IFieldSelector {
|
||||
if scanInfo.IncludeNamespaces != "" {
|
||||
return resourcehandler.NewIncludeSelector(scanInfo.IncludeNamespaces)
|
||||
}
|
||||
if scanInfo.ExcludedNamespaces != "" {
|
||||
return resourcehandler.NewExcludeSelector(scanInfo.ExcludedNamespaces)
|
||||
}
|
||||
|
||||
return &resourcehandler.EmptySelector{}
|
||||
}
|
||||
|
||||
func policyIdentifierIdentities(pi []cautils.PolicyIdentifier) string {
|
||||
policiesIdentities := ""
|
||||
for i := range pi {
|
||||
@@ -188,6 +179,12 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
|
||||
return
|
||||
}
|
||||
|
||||
// do not submit single resource scan to BE
|
||||
if scanInfo.ScanObject != nil {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
|
||||
// If There is no account, or if the account is not legal, do not submit
|
||||
if _, err := uuid.Parse(tenantConfig.GetAccountID()); err != nil {
|
||||
scanInfo.Submit = false
|
||||
@@ -280,12 +277,12 @@ func getAttackTracksGetter(ctx context.Context, attackTracks, accountID string,
|
||||
}
|
||||
|
||||
// getUIPrinter returns a printer that will be used to print to the program’s UI (terminal)
|
||||
func getUIPrinter(ctx context.Context, verboseMode bool, formatVersion string, attackTree bool, viewType cautils.ViewTypes) printer.IPrinter {
|
||||
func GetUIPrinter(ctx context.Context, scanInfo *cautils.ScanInfo) printer.IPrinter {
|
||||
var p printer.IPrinter
|
||||
if helpers.ToLevel(logger.L().GetLevel()) >= helpers.WarningLevel {
|
||||
p = &printerv2.SilentPrinter{}
|
||||
} else {
|
||||
p = printerv2.NewPrettyPrinter(verboseMode, formatVersion, attackTree, viewType)
|
||||
p = printerv2.NewPrettyPrinter(scanInfo.VerboseMode, scanInfo.FormatVersion, scanInfo.PrintAttackTree, cautils.ViewTypes(scanInfo.View), scanInfo.ScanType, scanInfo.InputPatterns)
|
||||
|
||||
// Since the UI of the program is a CLI (Stdout), it means that it should always print to Stdout
|
||||
p.SetWriter(ctx, os.Stdout.Name())
|
||||
|
||||
@@ -81,7 +81,14 @@ func Test_getUIPrinter(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
logger.L().SetLevel(tt.args.loggerLevel.String())
|
||||
got := getUIPrinter(tt.args.ctx, tt.args.verboseMode, tt.args.formatVersion, tt.args.printAttack, tt.args.viewType)
|
||||
scanInfo := &cautils.ScanInfo{
|
||||
FormatVersion: tt.args.formatVersion,
|
||||
VerboseMode: tt.args.verboseMode,
|
||||
PrintAttackTree: tt.args.printAttack,
|
||||
View: string(tt.args.viewType),
|
||||
}
|
||||
|
||||
got := GetUIPrinter(tt.args.ctx, scanInfo)
|
||||
|
||||
assert.Equal(t, tt.want.structType, reflect.TypeOf(got).String())
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
v2 "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/utils"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
@@ -87,19 +88,27 @@ func prettyPrintListFormat(ctx context.Context, targetPolicy string, policies []
|
||||
return
|
||||
}
|
||||
|
||||
header := fmt.Sprintf("Supported %s", targetPolicy)
|
||||
|
||||
policyTable := tablewriter.NewWriter(printer.GetWriter(ctx, ""))
|
||||
|
||||
policyTable.SetAutoWrapText(true)
|
||||
header := fmt.Sprintf("Supported %s", targetPolicy)
|
||||
policyTable.SetHeader([]string{header})
|
||||
policyTable.SetHeaderLine(true)
|
||||
policyTable.SetRowLine(true)
|
||||
policyTable.SetAlignment(tablewriter.ALIGN_CENTER)
|
||||
policyTable.SetUnicodeHV(tablewriter.Regular, tablewriter.Regular)
|
||||
data := v2.Matrix{}
|
||||
|
||||
controlRows := generatePolicyRows(policies)
|
||||
|
||||
var headerColors []tablewriter.Colors
|
||||
for range controlRows[0] {
|
||||
headerColors = append(headerColors, tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiYellowColor})
|
||||
}
|
||||
policyTable.SetHeaderColor(headerColors...)
|
||||
|
||||
data = append(data, controlRows...)
|
||||
|
||||
policyTable.SetAlignment(tablewriter.ALIGN_CENTER)
|
||||
policyTable.AppendBulk(data)
|
||||
policyTable.Render()
|
||||
}
|
||||
@@ -112,13 +121,29 @@ func jsonListFormat(_ context.Context, _ string, policies []string) {
|
||||
|
||||
func prettyPrintControls(ctx context.Context, policies []string) {
|
||||
controlsTable := tablewriter.NewWriter(printer.GetWriter(ctx, ""))
|
||||
controlsTable.SetAutoWrapText(true)
|
||||
controlsTable.SetHeader([]string{"Control ID", "Control Name", "Docs", "Frameworks"})
|
||||
|
||||
controlsTable.SetAutoWrapText(false)
|
||||
controlsTable.SetHeaderLine(true)
|
||||
controlsTable.SetRowLine(true)
|
||||
data := v2.Matrix{}
|
||||
controlsTable.SetUnicodeHV(tablewriter.Regular, tablewriter.Regular)
|
||||
|
||||
controlRows := generateControlRows(policies)
|
||||
|
||||
short := utils.CheckShortTerminalWidth(controlRows, []string{"Control ID", "Control Name", "Docs", "Frameworks"})
|
||||
if short {
|
||||
controlsTable.SetAutoWrapText(false)
|
||||
controlsTable.SetHeader([]string{"Controls"})
|
||||
controlRows = shortFormatControlRows(controlRows)
|
||||
} else {
|
||||
controlsTable.SetHeader([]string{"Control ID", "Control Name", "Docs", "Frameworks"})
|
||||
}
|
||||
var headerColors []tablewriter.Colors
|
||||
for range controlRows[0] {
|
||||
headerColors = append(headerColors, tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiYellowColor})
|
||||
}
|
||||
controlsTable.SetHeaderColor(headerColors...)
|
||||
|
||||
data := v2.Matrix{}
|
||||
data = append(data, controlRows...)
|
||||
|
||||
controlsTable.AppendBulk(data)
|
||||
@@ -134,7 +159,7 @@ func generateControlRows(policies []string) [][]string {
|
||||
|
||||
docs := cautils.GetControlLink(id)
|
||||
|
||||
currentRow := []string{id, control, docs, framework}
|
||||
currentRow := []string{id, control, docs, strings.Replace(framework, " ", "\n", -1)}
|
||||
|
||||
rows = append(rows, currentRow)
|
||||
}
|
||||
@@ -151,3 +176,11 @@ func generatePolicyRows(policies []string) [][]string {
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func shortFormatControlRows(controlRows [][]string) [][]string {
|
||||
rows := [][]string{}
|
||||
for _, controlRow := range controlRows {
|
||||
rows = append(rows, []string{fmt.Sprintf("Control ID"+strings.Repeat(" ", 3)+": %+v\nControl Name"+strings.Repeat(" ", 1)+": %+v\nDocs"+strings.Repeat(" ", 9)+": %+v\nFrameworks"+strings.Repeat(" ", 3)+": %+v", controlRow[0], controlRow[1], controlRow[2], strings.Replace(controlRow[3], "\n", " ", -1))})
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
@@ -6,7 +6,9 @@ import (
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/go-logger/iconlogger"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/hostsensorutils"
|
||||
@@ -17,8 +19,10 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
"github.com/kubescape/kubescape/v2/pkg/imagescan"
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"go.opentelemetry.io/otel"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/kubescape/opa-utils/resources"
|
||||
)
|
||||
@@ -79,6 +83,7 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
|
||||
spanHostScanner.End()
|
||||
|
||||
// ================== setup registry adaptors ======================================
|
||||
|
||||
registryAdaptors, _ := resourcehandler.NewRegistryAdaptors()
|
||||
|
||||
// ================== setup resource collector object ======================================
|
||||
@@ -88,19 +93,12 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
|
||||
// ================== setup reporter & printer objects ======================================
|
||||
|
||||
// reporting behavior - setup reporter
|
||||
reportHandler := getReporter(ctx, tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan, scanInfo.GetScanningContext())
|
||||
reportHandler := getReporter(ctx, tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan, *scanInfo)
|
||||
|
||||
// setup printers
|
||||
formats := scanInfo.Formats()
|
||||
outputPrinters := GetOutputPrinters(scanInfo, ctx)
|
||||
|
||||
outputPrinters := make([]printer.IPrinter, 0)
|
||||
for _, format := range formats {
|
||||
printerHandler := resultshandling.NewPrinter(ctx, format, scanInfo.FormatVersion, scanInfo.PrintAttackTree, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
|
||||
printerHandler.SetWriter(ctx, scanInfo.Output)
|
||||
outputPrinters = append(outputPrinters, printerHandler)
|
||||
}
|
||||
|
||||
uiPrinter := getUIPrinter(ctx, scanInfo.VerboseMode, scanInfo.FormatVersion, scanInfo.PrintAttackTree, cautils.ViewTypes(scanInfo.View))
|
||||
uiPrinter := GetUIPrinter(ctx, scanInfo)
|
||||
|
||||
// ================== return interface ======================================
|
||||
|
||||
@@ -114,9 +112,22 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
|
||||
}
|
||||
}
|
||||
|
||||
func GetOutputPrinters(scanInfo *cautils.ScanInfo, ctx context.Context) []printer.IPrinter {
|
||||
formats := scanInfo.Formats()
|
||||
|
||||
outputPrinters := make([]printer.IPrinter, 0)
|
||||
for _, format := range formats {
|
||||
printerHandler := resultshandling.NewPrinter(ctx, format, scanInfo.FormatVersion, scanInfo.PrintAttackTree, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
|
||||
printerHandler.SetWriter(ctx, scanInfo.Output)
|
||||
outputPrinters = append(outputPrinters, printerHandler)
|
||||
}
|
||||
return outputPrinters
|
||||
}
|
||||
|
||||
func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*resultshandling.ResultsHandler, error) {
|
||||
ctxInit, spanInit := otel.Tracer("").Start(ctx, "initialization")
|
||||
logger.L().Info("Kubescape scanner starting")
|
||||
logger.InitLogger(iconlogger.LoggerName)
|
||||
logger.L().Start("Kubescape scanner initializing")
|
||||
|
||||
// ===================== Initialization =====================
|
||||
scanInfo.Init(ctxInit) // initialize scan info
|
||||
@@ -144,10 +155,12 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
|
||||
// remove host scanner components
|
||||
defer func() {
|
||||
if err := interfaces.hostSensorHandler.TearDown(); err != nil {
|
||||
logger.L().Ctx(ctx).Error("failed to tear down host scanner", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).StopError("Failed to tear down host scanner", helpers.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
logger.L().StopSuccess("Initialized scanner")
|
||||
|
||||
resultsHandling := resultshandling.NewResultsHandler(interfaces.report, interfaces.outputPrinters, interfaces.uiPrinter)
|
||||
|
||||
// ===================== policies =====================
|
||||
@@ -162,7 +175,7 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
|
||||
|
||||
// ===================== resources =====================
|
||||
ctxResources, spanResources := otel.Tracer("").Start(ctxInit, "resources")
|
||||
err = resourcehandler.CollectResources(ctxResources, interfaces.resourceHandler, scanInfo.PolicyIdentifier, scanData, cautils.NewProgressHandler(""))
|
||||
err = resourcehandler.CollectResources(ctxResources, interfaces.resourceHandler, scanInfo.PolicyIdentifier, scanData, cautils.NewProgressHandler(""), scanInfo)
|
||||
if err != nil {
|
||||
spanInit.End()
|
||||
return resultsHandling, err
|
||||
@@ -176,22 +189,28 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
|
||||
|
||||
deps := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), interfaces.tenantConfig.GetContextName())
|
||||
reportResults := opaprocessor.NewOPAProcessor(scanData, deps)
|
||||
if err := reportResults.ProcessRulesListener(ctxOpa, cautils.NewProgressHandler("")); err != nil {
|
||||
if err := reportResults.ProcessRulesListener(ctxOpa, cautils.NewProgressHandler(""), scanInfo); err != nil {
|
||||
// TODO - do something
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
// ======================== prioritization ===================
|
||||
if scanInfo.PrintAttackTree {
|
||||
if scanInfo.PrintAttackTree || isPrioritizationScanType(scanInfo.ScanType) {
|
||||
_, spanPrioritization := otel.Tracer("").Start(ctxOpa, "prioritization")
|
||||
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(ctxOpa, scanInfo.Getters.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to get attack tracks, this may affect the scanning results", helpers.Error(err))
|
||||
} else if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
}
|
||||
if err == nil && isPrioritizationScanType(scanInfo.ScanType) {
|
||||
scanData.SetTopWorkloads()
|
||||
}
|
||||
spanPrioritization.End()
|
||||
}
|
||||
|
||||
if scanInfo.ScanImages {
|
||||
scanImages(scanInfo.ScanType, scanData, ctx, resultsHandling)
|
||||
}
|
||||
// ========================= results handling =====================
|
||||
resultsHandling.SetData(scanData)
|
||||
|
||||
@@ -201,3 +220,62 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
|
||||
|
||||
return resultsHandling, nil
|
||||
}
|
||||
|
||||
func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ctx context.Context, resultsHandling *resultshandling.ResultsHandler) {
|
||||
imagesToScan := []string{}
|
||||
|
||||
if scanType == cautils.ScanTypeWorkload {
|
||||
containers, err := workloadinterface.NewWorkloadObj(scanData.SingleResourceScan.GetObject()).GetContainers()
|
||||
if err != nil {
|
||||
logger.L().Error("failed to get containers", helpers.Error(err))
|
||||
return
|
||||
}
|
||||
for _, container := range containers {
|
||||
if !slices.Contains(imagesToScan, container.Image) {
|
||||
imagesToScan = append(imagesToScan, container.Image)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, workload := range scanData.AllResources {
|
||||
containers, err := workloadinterface.NewWorkloadObj(workload.GetObject()).GetContainers()
|
||||
if err != nil {
|
||||
logger.L().Error(fmt.Sprintf("failed to get containers for kind: %s, name: %s, namespace: %s", workload.GetKind(), workload.GetName(), workload.GetNamespace()), helpers.Error(err))
|
||||
continue
|
||||
}
|
||||
for _, container := range containers {
|
||||
if !slices.Contains(imagesToScan, container.Image) {
|
||||
imagesToScan = append(imagesToScan, container.Image)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dbCfg, _ := imagescan.NewDefaultDBConfig()
|
||||
svc := imagescan.NewScanService(dbCfg)
|
||||
|
||||
for _, img := range imagesToScan {
|
||||
logger.L().Start("Scanning", helpers.String("image", img))
|
||||
if err := scanSingleImage(ctx, img, svc, resultsHandling); err != nil {
|
||||
logger.L().StopError("failed to scan", helpers.String("image", img), helpers.Error(err))
|
||||
}
|
||||
logger.L().StopSuccess("Scanned successfully", helpers.String("image", img))
|
||||
}
|
||||
}
|
||||
|
||||
func scanSingleImage(ctx context.Context, img string, svc imagescan.Service, resultsHandling *resultshandling.ResultsHandler) error {
|
||||
|
||||
scanResults, err := svc.Scan(ctx, img, imagescan.RegistryCredentials{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resultsHandling.ImageScanData = append(resultsHandling.ImageScanData, cautils.ImageScanData{
|
||||
Image: img,
|
||||
PresenterConfig: scanResults,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func isPrioritizationScanType(scanType cautils.ScanTypes) bool {
|
||||
return scanType == cautils.ScanTypeCluster || scanType == cautils.ScanTypeRepo
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
@@ -31,7 +31,7 @@ func Init() {
|
||||
meterProvider := otel.GetMeterProvider()
|
||||
meter := meterProvider.Meter(METER_NAME)
|
||||
metricName := func(name string) string {
|
||||
return fmt.Sprintf("%s_%s", METRIC_NAME_PREFIX, name)
|
||||
return strings.Join([]string{METRIC_NAME_PREFIX, name}, "_")
|
||||
}
|
||||
|
||||
if kubernetesResourcesCount, err = meter.Int64UpDownCounter(metricName("kubernetes_resources_count")); err != nil {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/armoapi-go/identifiers"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
)
|
||||
@@ -38,9 +39,17 @@ func MockFramework_0006_0013() *reporthandling.Framework {
|
||||
Name: "framework-0006-0013",
|
||||
},
|
||||
}
|
||||
c06 := &reporthandling.Control{}
|
||||
c06 := &reporthandling.Control{ScanningScope: &reporthandling.ScanningScope{
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCluster,
|
||||
},
|
||||
}}
|
||||
json.Unmarshal([]byte(mockControl_0006), c06)
|
||||
c13 := &reporthandling.Control{}
|
||||
c13 := &reporthandling.Control{ScanningScope: &reporthandling.ScanningScope{
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCluster,
|
||||
},
|
||||
}}
|
||||
json.Unmarshal([]byte(mockControl_0013), c13)
|
||||
fw.Controls = []reporthandling.Control{*c06, *c13}
|
||||
return fw
|
||||
@@ -53,7 +62,11 @@ func MockFramework_0044() *reporthandling.Framework {
|
||||
Name: "framework-0044",
|
||||
},
|
||||
}
|
||||
c44 := &reporthandling.Control{}
|
||||
c44 := &reporthandling.Control{ScanningScope: &reporthandling.ScanningScope{
|
||||
Matches: []reporthandling.ScanningScopeType{
|
||||
reporthandling.ScopeCluster,
|
||||
},
|
||||
}}
|
||||
json.Unmarshal([]byte(mockControl_0044), c44)
|
||||
|
||||
fw.Controls = []reporthandling.Control{*c44}
|
||||
@@ -73,11 +86,11 @@ func MockExceptionAllKinds(policy *armotypes.PosturePolicy) *armotypes.PostureEx
|
||||
return &armotypes.PostureExceptionPolicy{
|
||||
PosturePolicies: []armotypes.PosturePolicy{*policy},
|
||||
Actions: []armotypes.PostureExceptionPolicyActions{armotypes.AlertOnly},
|
||||
Resources: []armotypes.PortalDesignator{
|
||||
Resources: []identifiers.PortalDesignator{
|
||||
{
|
||||
DesignatorType: armotypes.DesignatorAttributes,
|
||||
DesignatorType: identifiers.DesignatorAttributes,
|
||||
Attributes: map[string]string{
|
||||
armotypes.AttributeKind: ".*",
|
||||
identifiers.AttributeKind: ".*",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -3,7 +3,7 @@ package containerscan
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/armoapi-go/identifiers"
|
||||
)
|
||||
|
||||
func (layer *ScanResultLayer) GetFilesByPackage(pkgname string) (files *PkgFiles) {
|
||||
@@ -24,11 +24,11 @@ func (layer *ScanResultLayer) GetPackagesNames() []string {
|
||||
return pkgsNames
|
||||
}
|
||||
|
||||
func (scanresult *ScanResultReport) GetDesignatorsNContext() (*armotypes.PortalDesignator, []armotypes.ArmoContext) {
|
||||
designatorsObj := armotypes.AttributesDesignatorsFromWLID(scanresult.WLID)
|
||||
func (scanresult *ScanResultReport) GetDesignatorsNContext() (*identifiers.PortalDesignator, []identifiers.ArmoContext) {
|
||||
designatorsObj := identifiers.AttributesDesignatorsFromWLID(scanresult.WLID)
|
||||
designatorsObj.Attributes["containerName"] = scanresult.ContainerName
|
||||
designatorsObj.Attributes["customerGUID"] = scanresult.CustomerGUID
|
||||
contextObj := armotypes.DesignatorToArmoContext(designatorsObj, "designators")
|
||||
contextObj := identifiers.DesignatorToArmoContext(designatorsObj, "designators")
|
||||
return designatorsObj, contextObj
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/armoapi-go/identifiers"
|
||||
cautils "github.com/armosec/utils-k8s-go/armometadata"
|
||||
)
|
||||
|
||||
@@ -69,8 +69,8 @@ func (scanresult *ScanResultReport) Summarize() *ElasticContainerScanSummaryResu
|
||||
ListOfDangerousArtifcats: scanresult.ListOfDangerousArtifcats,
|
||||
}
|
||||
|
||||
summary.Cluster = designatorsObj.Attributes[armotypes.AttributeCluster]
|
||||
summary.Namespace = designatorsObj.Attributes[armotypes.AttributeNamespace]
|
||||
summary.Cluster = designatorsObj.Attributes[identifiers.AttributeCluster]
|
||||
summary.Namespace = designatorsObj.Attributes[identifiers.AttributeNamespace]
|
||||
|
||||
imageInfo, e2 := cautils.ImageTagToImageInfo(scanresult.ImgTag)
|
||||
if e2 == nil {
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package containerscan
|
||||
|
||||
import "github.com/armosec/armoapi-go/armotypes"
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/identifiers"
|
||||
)
|
||||
|
||||
type ElasticContainerVulnerabilityResult struct {
|
||||
Designators armotypes.PortalDesignator `json:"designators"`
|
||||
Context []armotypes.ArmoContext `json:"context"`
|
||||
Designators identifiers.PortalDesignator `json:"designators"`
|
||||
Context []identifiers.ArmoContext `json:"context"`
|
||||
|
||||
WLID string `json:"wlid"`
|
||||
ContainerScanID string `json:"containersScanID"`
|
||||
@@ -35,8 +37,8 @@ type SeverityStats struct {
|
||||
}
|
||||
|
||||
type ElasticContainerScanSeveritySummary struct {
|
||||
Designators armotypes.PortalDesignator `json:"designators"`
|
||||
Context []armotypes.ArmoContext `json:"context"`
|
||||
Designators identifiers.PortalDesignator `json:"designators"`
|
||||
Context []identifiers.ArmoContext `json:"context"`
|
||||
|
||||
SeverityStats
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
@@ -57,8 +59,8 @@ type ElasticContainerScanSeveritySummary struct {
|
||||
|
||||
type ElasticContainerScanSummaryResult struct {
|
||||
SeverityStats
|
||||
Designators armotypes.PortalDesignator `json:"designators"`
|
||||
Context []armotypes.ArmoContext `json:"context"`
|
||||
Designators identifiers.PortalDesignator `json:"designators"`
|
||||
Context []identifiers.ArmoContext `json:"context"`
|
||||
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
ContainerScanID string `json:"containersScanID"`
|
||||
|
||||
@@ -73,19 +73,17 @@ func isSupportedScanningTarget(report *reporthandlingv2.PostureReport) error {
|
||||
}
|
||||
|
||||
func getLocalPath(report *reporthandlingv2.PostureReport) string {
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.GitLocal {
|
||||
|
||||
switch report.Metadata.ScanMetadata.ScanningTarget {
|
||||
case reporthandlingv2.GitLocal:
|
||||
return report.Metadata.ContextMetadata.RepoContextMetadata.LocalRootPath
|
||||
}
|
||||
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.Directory {
|
||||
case reporthandlingv2.Directory:
|
||||
return report.Metadata.ContextMetadata.DirectoryContextMetadata.BasePath
|
||||
}
|
||||
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.File {
|
||||
case reporthandlingv2.File:
|
||||
return filepath.Dir(report.Metadata.ContextMetadata.FileContextMetadata.FilePath)
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *FixHandler) buildResourcesMap() map[string]*reporthandling.Resource {
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/sigstore/cosign/pkg/cosign"
|
||||
"github.com/sigstore/cosign/v2/pkg/cosign"
|
||||
)
|
||||
|
||||
func has_signature(img string) bool {
|
||||
|
||||
@@ -6,12 +6,12 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/sigstore/cosign/cmd/cosign/cli/options"
|
||||
"github.com/sigstore/cosign/cmd/cosign/cli/sign"
|
||||
"github.com/sigstore/cosign/pkg/cosign"
|
||||
"github.com/sigstore/cosign/pkg/cosign/pkcs11key"
|
||||
ociremote "github.com/sigstore/cosign/pkg/oci/remote"
|
||||
sigs "github.com/sigstore/cosign/pkg/signature"
|
||||
"github.com/sigstore/cosign/v2/cmd/cosign/cli/options"
|
||||
"github.com/sigstore/cosign/v2/cmd/cosign/cli/sign"
|
||||
"github.com/sigstore/cosign/v2/pkg/cosign"
|
||||
"github.com/sigstore/cosign/v2/pkg/cosign/pkcs11key"
|
||||
ociremote "github.com/sigstore/cosign/v2/pkg/oci/remote"
|
||||
sigs "github.com/sigstore/cosign/v2/pkg/signature"
|
||||
)
|
||||
|
||||
// VerifyCommand verifies a signature on a supplied container image
|
||||
|
||||
13
core/pkg/opaprocessor/normalize_image_name.go
Normal file
13
core/pkg/opaprocessor/normalize_image_name.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"github.com/docker/distribution/reference"
|
||||
)
|
||||
|
||||
func normalize_image_name(img string) (string, error) {
|
||||
name, err := reference.ParseNormalizedNamed(img)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name.String(), nil
|
||||
}
|
||||
28
core/pkg/opaprocessor/normalize_image_name_test.go
Normal file
28
core/pkg/opaprocessor/normalize_image_name_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_normalize_name(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
img string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "Normalize image name",
|
||||
img: "nginx",
|
||||
want: "docker.io/library/nginx",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
name, _ := normalize_image_name(tt.img)
|
||||
assert.Equal(t, tt.want, name, tt.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -3,9 +3,7 @@ package opaprocessor
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
@@ -23,7 +21,6 @@ import (
|
||||
"github.com/open-policy-agent/opa/rego"
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"go.opentelemetry.io/otel"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const ScoreConfigPath = "/resources/config"
|
||||
@@ -58,18 +55,14 @@ func NewOPAProcessor(sessionObj *cautils.OPASessionObj, regoDependenciesData *re
|
||||
}
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) ProcessRulesListener(ctx context.Context, progressListener IJobProgressNotificationClient) error {
|
||||
opap.OPASessionObj.AllPolicies = ConvertFrameworksToPolicies(opap.Policies, cautils.BuildNumber)
|
||||
func (opap *OPAProcessor) ProcessRulesListener(ctx context.Context, progressListener IJobProgressNotificationClient, ScanInfo *cautils.ScanInfo) error {
|
||||
scanningScope := cautils.GetScanningScope(ScanInfo)
|
||||
opap.OPASessionObj.AllPolicies = ConvertFrameworksToPolicies(opap.Policies, cautils.BuildNumber, opap.ExcludedRules, scanningScope)
|
||||
|
||||
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Policies, opap.OPASessionObj.AllPolicies)
|
||||
|
||||
maxGoRoutines, err := cautils.ParseIntEnvVar("RULE_PROCESSING_GOMAXPROCS", 2*runtime.NumCPU())
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
|
||||
// process
|
||||
if err := opap.Process(ctx, opap.OPASessionObj.AllPolicies, progressListener, maxGoRoutines); err != nil {
|
||||
if err := opap.Process(ctx, opap.OPASessionObj.AllPolicies, progressListener); err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
// Return error?
|
||||
}
|
||||
@@ -85,136 +78,62 @@ func (opap *OPAProcessor) ProcessRulesListener(ctx context.Context, progressList
|
||||
}
|
||||
|
||||
// Process OPA policies (rules) on all configured controls.
|
||||
func (opap *OPAProcessor) Process(ctx context.Context, policies *cautils.Policies, progressListener IJobProgressNotificationClient, maxGoRoutines int) error {
|
||||
func (opap *OPAProcessor) Process(ctx context.Context, policies *cautils.Policies, progressListener IJobProgressNotificationClient) error {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "OPAProcessor.Process")
|
||||
defer span.End()
|
||||
opap.loggerStartScanning()
|
||||
defer opap.loggerDoneScanning()
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
if progressListener != nil {
|
||||
progressListener.Start(len(policies.Controls))
|
||||
defer progressListener.Stop()
|
||||
}
|
||||
|
||||
// results to collect from controls being processed in parallel
|
||||
type results struct {
|
||||
resourceAssociatedControl map[string]resourcesresults.ResourceAssociatedControl
|
||||
allResources map[string]workloadinterface.IMetadata
|
||||
}
|
||||
|
||||
resultsChan := make(chan results)
|
||||
controlsGroup, groupCtx := errgroup.WithContext(ctx)
|
||||
controlsGroup.SetLimit(maxGoRoutines)
|
||||
|
||||
allResources := make(map[string]workloadinterface.IMetadata, max(len(opap.AllResources), heuristicAllocResources))
|
||||
for k, v := range opap.AllResources {
|
||||
allResources[k] = v
|
||||
}
|
||||
|
||||
var resultsCollector sync.WaitGroup
|
||||
resultsCollector.Add(1)
|
||||
go func() {
|
||||
// collects the results from processing all rules for all controls.
|
||||
//
|
||||
// NOTE: since policies.Controls is a map, iterating over it doesn't guarantee any
|
||||
// specific ordering. Therefore, if a conflict is possible on resources, e.g. 2 rules,
|
||||
// referencing the same resource, the eventual result of the merge is not guaranteed to be
|
||||
// stable. This behavior is consistent with the previous (unparallelized) processing.
|
||||
defer resultsCollector.Done()
|
||||
|
||||
for result := range resultsChan {
|
||||
// merge both maps in parallel
|
||||
var merger sync.WaitGroup
|
||||
merger.Add(1)
|
||||
go func() {
|
||||
// merge all resources
|
||||
defer merger.Done()
|
||||
for k, v := range result.allResources {
|
||||
allResources[k] = v
|
||||
}
|
||||
}()
|
||||
|
||||
merger.Add(1)
|
||||
go func() {
|
||||
defer merger.Done()
|
||||
// update resources with latest results
|
||||
for resourceID, controlResult := range result.resourceAssociatedControl {
|
||||
result, found := opap.ResourcesResult[resourceID]
|
||||
if !found {
|
||||
result = resourcesresults.Result{ResourceID: resourceID}
|
||||
}
|
||||
result.AssociatedControls = append(result.AssociatedControls, controlResult)
|
||||
opap.ResourcesResult[resourceID] = result
|
||||
}
|
||||
}()
|
||||
|
||||
merger.Wait()
|
||||
}
|
||||
}()
|
||||
|
||||
// processes rules for all controls in parallel
|
||||
for _, controlToPin := range policies.Controls {
|
||||
for _, toPin := range policies.Controls {
|
||||
if progressListener != nil {
|
||||
progressListener.ProgressJob(1, fmt.Sprintf("Control: %s", controlToPin.ControlID))
|
||||
progressListener.ProgressJob(1, fmt.Sprintf("Control: %s", toPin.ControlID))
|
||||
}
|
||||
|
||||
control := controlToPin
|
||||
control := toPin
|
||||
|
||||
controlsGroup.Go(func() error {
|
||||
resourceAssociatedControl, allResourcesFromControl, err := opap.processControl(groupCtx, &control)
|
||||
if err != nil {
|
||||
logger.L().Ctx(groupCtx).Warning(err.Error())
|
||||
resourcesAssociatedControl, err := opap.processControl(ctx, &control)
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
|
||||
if len(resourcesAssociatedControl) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// update resources with latest results
|
||||
for resourceID, controlResult := range resourcesAssociatedControl {
|
||||
if _, ok := opap.ResourcesResult[resourceID]; !ok {
|
||||
opap.ResourcesResult[resourceID] = resourcesresults.Result{ResourceID: resourceID}
|
||||
}
|
||||
|
||||
select {
|
||||
case resultsChan <- results{
|
||||
resourceAssociatedControl: resourceAssociatedControl,
|
||||
allResources: allResourcesFromControl,
|
||||
}:
|
||||
case <-groupCtx.Done(): // interrupted (NOTE: at this moment, this never happens since errors are muted)
|
||||
return groupCtx.Err()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
t := opap.ResourcesResult[resourceID]
|
||||
t.AssociatedControls = append(t.AssociatedControls, controlResult)
|
||||
opap.ResourcesResult[resourceID] = t
|
||||
}
|
||||
}
|
||||
|
||||
// wait for all results from all rules to be collected
|
||||
err := controlsGroup.Wait()
|
||||
close(resultsChan)
|
||||
resultsCollector.Wait()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// merge the final result in resources
|
||||
for k, v := range allResources {
|
||||
opap.AllResources[k] = v
|
||||
}
|
||||
opap.Report.ReportGenerationTime = time.Now().UTC()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) loggerStartScanning() {
|
||||
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
|
||||
if reporthandlingv2.Cluster == targetScan {
|
||||
logger.L().Info("Scanning", helpers.String(targetScan.String(), cautils.ClusterName))
|
||||
logger.L().Start("Scanning", helpers.String(targetScan.String(), cautils.ClusterName))
|
||||
} else {
|
||||
logger.L().Info("Scanning " + targetScan.String())
|
||||
logger.L().Start("Scanning " + targetScan.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) loggerDoneScanning() {
|
||||
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
|
||||
if reporthandlingv2.Cluster == targetScan {
|
||||
logger.L().Success("Done scanning", helpers.String(targetScan.String(), cautils.ClusterName))
|
||||
logger.L().StopSuccess("Done scanning", helpers.String(targetScan.String(), cautils.ClusterName))
|
||||
} else {
|
||||
logger.L().Success("Done scanning " + targetScan.String())
|
||||
logger.L().StopSuccess("Done scanning " + targetScan.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -222,22 +141,16 @@ func (opap *OPAProcessor) loggerDoneScanning() {
|
||||
//
|
||||
// NOTE: the call to processControl no longer mutates the state of the current OPAProcessor instance,
|
||||
// but returns a map instead, to be merged by the caller.
|
||||
func (opap *OPAProcessor) processControl(ctx context.Context, control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, map[string]workloadinterface.IMetadata, error) {
|
||||
resourcesAssociatedControl := make(map[string]resourcesresults.ResourceAssociatedControl, heuristicAllocControls)
|
||||
allResources := make(map[string]workloadinterface.IMetadata, heuristicAllocResources)
|
||||
func (opap *OPAProcessor) processControl(ctx context.Context, control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, error) {
|
||||
resourcesAssociatedControl := make(map[string]resourcesresults.ResourceAssociatedControl)
|
||||
|
||||
for i := range control.Rules {
|
||||
resourceAssociatedRule, allResourcesFromRule, err := opap.processRule(ctx, &control.Rules[i], control.FixedInput)
|
||||
resourceAssociatedRule, err := opap.processRule(ctx, &control.Rules[i], control.FixedInput)
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// merge all resources for all processed rules in this control
|
||||
for k, v := range allResourcesFromRule {
|
||||
allResources[k] = v
|
||||
}
|
||||
|
||||
// append failed rules to controls
|
||||
for resourceID, ruleResponse := range resourceAssociatedRule {
|
||||
var controlResult resourcesresults.ResourceAssociatedControl
|
||||
@@ -259,94 +172,101 @@ func (opap *OPAProcessor) processControl(ctx context.Context, control *reporthan
|
||||
}
|
||||
}
|
||||
|
||||
return resourcesAssociatedControl, allResources, nil
|
||||
return resourcesAssociatedControl, nil
|
||||
}
|
||||
|
||||
// processRule processes a single policy rule, with some extra fixed control inputs.
|
||||
//
|
||||
// NOTE: processRule no longer mutates the state of the current OPAProcessor instance,
|
||||
// and returns a map instead, to be merged by the caller.
|
||||
func (opap *OPAProcessor) processRule(ctx context.Context, rule *reporthandling.PolicyRule, fixedControlInputs map[string][]string) (map[string]*resourcesresults.ResourceAssociatedRule, map[string]workloadinterface.IMetadata, error) {
|
||||
func (opap *OPAProcessor) processRule(ctx context.Context, rule *reporthandling.PolicyRule, fixedControlInputs map[string][]string) (map[string]*resourcesresults.ResourceAssociatedRule, error) {
|
||||
resources := make(map[string]*resourcesresults.ResourceAssociatedRule)
|
||||
|
||||
ruleRegoDependenciesData := opap.makeRegoDeps(rule.ConfigInputs, fixedControlInputs)
|
||||
|
||||
inputResources, err := reporthandling.RegoResourcesAggregator(
|
||||
rule,
|
||||
getAllSupportedObjects(opap.K8SResources, opap.ArmoResource, opap.AllResources, rule), // NOTE: this uses the initial snapshot of AllResources
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error getting aggregated k8sObjects: %w", err)
|
||||
}
|
||||
|
||||
if len(inputResources) == 0 {
|
||||
return nil, nil, nil // no resources found for testing
|
||||
}
|
||||
|
||||
inputRawResources := workloadinterface.ListMetaToMap(inputResources)
|
||||
|
||||
// the failed resources are a subgroup of the enumeratedData, so we store the enumeratedData like it was the input data
|
||||
enumeratedData, err := opap.enumerateData(ctx, rule, inputRawResources)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
inputResources = objectsenvelopes.ListMapToMeta(enumeratedData)
|
||||
resources := make(map[string]*resourcesresults.ResourceAssociatedRule, len(inputResources))
|
||||
allResources := make(map[string]workloadinterface.IMetadata, len(inputResources))
|
||||
|
||||
for i, inputResource := range inputResources {
|
||||
resources[inputResource.GetID()] = &resourcesresults.ResourceAssociatedRule{
|
||||
Name: rule.Name,
|
||||
ControlConfigurations: ruleRegoDependenciesData.PostureControlInputs,
|
||||
Status: apis.StatusPassed,
|
||||
resourcesPerNS := getAllSupportedObjects(opap.K8SResources, opap.ExternalResources, opap.AllResources, rule)
|
||||
for i := range resourcesPerNS {
|
||||
resourceToScan := resourcesPerNS[i]
|
||||
if _, ok := resourcesPerNS[clusterScope]; ok && i != clusterScope {
|
||||
resourceToScan = append(resourceToScan, resourcesPerNS[clusterScope]...)
|
||||
}
|
||||
inputResources, err := reporthandling.RegoResourcesAggregator(
|
||||
rule,
|
||||
resourceToScan, // NOTE: this uses the initial snapshot of AllResources
|
||||
)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
allResources[inputResource.GetID()] = inputResources[i]
|
||||
}
|
||||
|
||||
ruleResponses, err := opap.runOPAOnSingleRule(ctx, rule, inputRawResources, ruleData, ruleRegoDependenciesData)
|
||||
if err != nil {
|
||||
return resources, allResources, err
|
||||
}
|
||||
if len(inputResources) == 0 {
|
||||
continue // no resources found for testing
|
||||
}
|
||||
|
||||
// ruleResponse to ruleResult
|
||||
for _, ruleResponse := range ruleResponses {
|
||||
failedResources := objectsenvelopes.ListMapToMeta(ruleResponse.GetFailedResources())
|
||||
for _, failedResource := range failedResources {
|
||||
var ruleResult *resourcesresults.ResourceAssociatedRule
|
||||
if r, found := resources[failedResource.GetID()]; found {
|
||||
ruleResult = r
|
||||
} else {
|
||||
ruleResult = &resourcesresults.ResourceAssociatedRule{
|
||||
Paths: make([]armotypes.PosturePaths, 0, len(ruleResponse.FailedPaths)+len(ruleResponse.FixPaths)+1),
|
||||
}
|
||||
inputRawResources := workloadinterface.ListMetaToMap(inputResources)
|
||||
|
||||
// the failed resources are a subgroup of the enumeratedData, so we store the enumeratedData like it was the input data
|
||||
enumeratedData, err := opap.enumerateData(ctx, rule, inputRawResources)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
inputResources = objectsenvelopes.ListMapToMeta(enumeratedData)
|
||||
|
||||
for i, inputResource := range inputResources {
|
||||
resources[inputResource.GetID()] = &resourcesresults.ResourceAssociatedRule{
|
||||
Name: rule.Name,
|
||||
ControlConfigurations: ruleRegoDependenciesData.PostureControlInputs,
|
||||
Status: apis.StatusPassed,
|
||||
}
|
||||
opap.AllResources[inputResource.GetID()] = inputResources[i]
|
||||
}
|
||||
|
||||
ruleResult.SetStatus(apis.StatusFailed, nil)
|
||||
for _, failedPath := range ruleResponse.FailedPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FailedPath: failedPath})
|
||||
}
|
||||
ruleResponses, err := opap.runOPAOnSingleRule(ctx, rule, inputRawResources, ruleData, ruleRegoDependenciesData)
|
||||
if err != nil {
|
||||
continue
|
||||
// return resources, allResources, err
|
||||
}
|
||||
|
||||
for _, fixPath := range ruleResponse.FixPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixPath: fixPath})
|
||||
}
|
||||
|
||||
if ruleResponse.FixCommand != "" {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixCommand: ruleResponse.FixCommand})
|
||||
}
|
||||
// if ruleResponse has relatedObjects, add it to ruleResult
|
||||
if len(ruleResponse.RelatedObjects) > 0 {
|
||||
for _, relatedObject := range ruleResponse.RelatedObjects {
|
||||
wl := objectsenvelopes.NewObject(relatedObject.Object)
|
||||
if wl != nil {
|
||||
ruleResult.RelatedResourcesIDs = append(ruleResult.RelatedResourcesIDs, wl.GetID())
|
||||
// ruleResponse to ruleResult
|
||||
for _, ruleResponse := range ruleResponses {
|
||||
failedResources := objectsenvelopes.ListMapToMeta(ruleResponse.GetFailedResources())
|
||||
for _, failedResource := range failedResources {
|
||||
var ruleResult *resourcesresults.ResourceAssociatedRule
|
||||
if r, found := resources[failedResource.GetID()]; found {
|
||||
ruleResult = r
|
||||
} else {
|
||||
ruleResult = &resourcesresults.ResourceAssociatedRule{
|
||||
Paths: make([]armotypes.PosturePaths, 0, len(ruleResponse.FailedPaths)+len(ruleResponse.FixPaths)+1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resources[failedResource.GetID()] = ruleResult
|
||||
ruleResult.SetStatus(apis.StatusFailed, nil)
|
||||
for _, failedPath := range ruleResponse.FailedPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FailedPath: failedPath})
|
||||
}
|
||||
|
||||
for _, fixPath := range ruleResponse.FixPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixPath: fixPath})
|
||||
}
|
||||
|
||||
if ruleResponse.FixCommand != "" {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixCommand: ruleResponse.FixCommand})
|
||||
}
|
||||
// if ruleResponse has relatedObjects, add it to ruleResult
|
||||
if len(ruleResponse.RelatedObjects) > 0 {
|
||||
for _, relatedObject := range ruleResponse.RelatedObjects {
|
||||
wl := objectsenvelopes.NewObject(relatedObject.Object)
|
||||
if wl != nil {
|
||||
ruleResult.RelatedResourcesIDs = append(ruleResult.RelatedResourcesIDs, wl.GetID())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resources[failedResource.GetID()] = ruleResult
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resources, allResources, nil
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) runOPAOnSingleRule(ctx context.Context, rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, ruleRegoDependenciesData resources.RegoDependenciesData) ([]reporthandling.RuleResponse, error) {
|
||||
@@ -369,6 +289,7 @@ func (opap *OPAProcessor) runRegoOnK8s(ctx context.Context, rule *reporthandling
|
||||
// register signature verification methods for the OPA ast engine (since these are package level symbols, we do it only once)
|
||||
rego.RegisterBuiltin2(cosignVerifySignatureDeclaration, cosignVerifySignatureDefinition)
|
||||
rego.RegisterBuiltin1(cosignHasSignatureDeclaration, cosignHasSignatureDefinition)
|
||||
rego.RegisterBuiltin1(imageNameNormalizeDeclaration, imageNameNormalizeDefinition)
|
||||
})
|
||||
|
||||
modules[rule.Name] = getRuleData(rule)
|
||||
|
||||
@@ -161,7 +161,7 @@ func BenchmarkProcess(b *testing.B) {
|
||||
go monitorHeapSpace(&maxHeap, quitChan)
|
||||
|
||||
// test
|
||||
opap.Process(context.Background(), opap.OPASessionObj.AllPolicies, nil, maxGoRoutines)
|
||||
opap.Process(context.Background(), opap.OPASessionObj.AllPolicies, nil)
|
||||
|
||||
// teardown
|
||||
quitChan <- true
|
||||
@@ -185,15 +185,16 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
opaSessionObj := cautils.NewOPASessionObjMock()
|
||||
opaSessionObj.Policies = frameworks
|
||||
|
||||
policies := ConvertFrameworksToPolicies(opaSessionObj.Policies, "")
|
||||
scanningScope := cautils.GetScanningScope(&cautils.ScanInfo{InputPatterns: []string{""}})
|
||||
policies := ConvertFrameworksToPolicies(opaSessionObj.Policies, "", nil, scanningScope)
|
||||
ConvertFrameworksToSummaryDetails(&opaSessionObj.Report.SummaryDetails, opaSessionObj.Policies, policies)
|
||||
|
||||
opaSessionObj.K8SResources = &k8sResources
|
||||
opaSessionObj.K8SResources = k8sResources
|
||||
opaSessionObj.AllResources[deployment.GetID()] = deployment
|
||||
|
||||
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock())
|
||||
opap.AllPolicies = policies
|
||||
opap.Process(context.TODO(), policies, nil, 1)
|
||||
opap.Process(context.TODO(), policies, nil)
|
||||
|
||||
assert.Equal(t, 1, len(opaSessionObj.ResourcesResult))
|
||||
res := opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
@@ -327,7 +328,7 @@ func TestProcessRule(t *testing.T) {
|
||||
// since all resources JSON is a large file, we need to unzip it and set the variable before running the benchmark
|
||||
unzipAllResourcesTestDataAndSetVar("testdata/allResourcesMock.json.zip", "testdata/allResourcesMock.json")
|
||||
opap := NewOPAProcessorMock(tc.opaSessionObjMock, tc.resourcesMock)
|
||||
resources, _, err := opap.processRule(context.Background(), &tc.rule, nil)
|
||||
resources, err := opap.processRule(context.Background(), &tc.rule, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, resources)
|
||||
}
|
||||
|
||||
@@ -16,6 +16,10 @@ import (
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
const clusterScope = "clusterScope"
|
||||
|
||||
var largeClusterSize int = -1
|
||||
|
||||
// updateResults updates the results objects and report objects. This is a critical function - DO NOT CHANGE
|
||||
//
|
||||
// The function:
|
||||
@@ -25,6 +29,10 @@ import (
|
||||
func (opap *OPAProcessor) updateResults(ctx context.Context) {
|
||||
_, span := otel.Tracer("").Start(ctx, "OPAProcessor.updateResults")
|
||||
defer span.End()
|
||||
defer logger.L().Ctx(ctx).Success("Done aggregating results")
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
// remove data from all objects
|
||||
for i := range opap.AllResources {
|
||||
@@ -87,14 +95,21 @@ func isEmptyResources(counters reportsummary.ICounters) bool {
|
||||
return counters.Failed() == 0 && counters.Skipped() == 0 && counters.Passed() == 0
|
||||
}
|
||||
|
||||
func getAllSupportedObjects(k8sResources *cautils.K8SResources, ksResources *cautils.KSResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.Match)...)
|
||||
k8sObjects = append(k8sObjects, getKSObjects(ksResources, allResources, rule.DynamicMatch)...)
|
||||
func getAllSupportedObjects(k8sResources cautils.K8SResources, externalResources cautils.ExternalResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) map[string][]workloadinterface.IMetadata {
|
||||
k8sObjects := getKubernetesObjects(k8sResources, allResources, rule.Match)
|
||||
externalObjs := getKubenetesObjectsFromExternalResources(externalResources, allResources, rule.DynamicMatch)
|
||||
if len(externalObjs) > 0 {
|
||||
l, ok := k8sObjects[clusterScope]
|
||||
if !ok {
|
||||
l = []workloadinterface.IMetadata{}
|
||||
}
|
||||
l = append(l, externalObjs...)
|
||||
k8sObjects[clusterScope] = l
|
||||
}
|
||||
return k8sObjects
|
||||
}
|
||||
|
||||
func getKSObjects(k8sResources *cautils.KSResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
func getKubenetesObjectsFromExternalResources(externalResources cautils.ExternalResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
|
||||
for m := range match {
|
||||
@@ -103,7 +118,7 @@ func getKSObjects(k8sResources *cautils.KSResources, allResources map[string]wor
|
||||
for _, resource := range match[m].Resources {
|
||||
groupResources := k8sinterface.ResourceGroupToString(groups, version, resource)
|
||||
for _, groupResource := range groupResources {
|
||||
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
|
||||
if k8sObj, ok := externalResources[groupResource]; ok {
|
||||
for i := range k8sObj {
|
||||
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
|
||||
}
|
||||
@@ -114,11 +129,11 @@ func getKSObjects(k8sResources *cautils.KSResources, allResources map[string]wor
|
||||
}
|
||||
}
|
||||
|
||||
return filterOutChildResources(k8sObjects, match)
|
||||
return k8sObjects
|
||||
}
|
||||
|
||||
func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
func getKubernetesObjects(k8sResources cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) map[string][]workloadinterface.IMetadata {
|
||||
k8sObjects := map[string][]workloadinterface.IMetadata{}
|
||||
|
||||
for m := range match {
|
||||
for _, groups := range match[m].APIGroups {
|
||||
@@ -126,14 +141,18 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[s
|
||||
for _, resource := range match[m].Resources {
|
||||
groupResources := k8sinterface.ResourceGroupToString(groups, version, resource)
|
||||
for _, groupResource := range groupResources {
|
||||
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
|
||||
/*
|
||||
if k8sObj == nil {
|
||||
// logger.L().Debug("skipping", helpers.String("resource", groupResource))
|
||||
if k8sObj, ok := k8sResources[groupResource]; ok {
|
||||
for i := range k8sObj {
|
||||
|
||||
obj := allResources[k8sObj[i]]
|
||||
ns := getNamespaceName(obj, len(allResources))
|
||||
|
||||
l, ok := k8sObjects[ns]
|
||||
if !ok {
|
||||
l = []workloadinterface.IMetadata{}
|
||||
}
|
||||
*/
|
||||
for i := range k8sObj {
|
||||
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
|
||||
l = append(l, obj)
|
||||
k8sObjects[ns] = l
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -142,34 +161,9 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[s
|
||||
}
|
||||
}
|
||||
|
||||
return filterOutChildResources(k8sObjects, match)
|
||||
return k8sObjects
|
||||
// return filterOutChildResources(k8sObjects, match)
|
||||
}
|
||||
|
||||
// filterOutChildResources filter out child resources if the parent resource is in the list
|
||||
func filterOutChildResources(objects []workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
response := []workloadinterface.IMetadata{}
|
||||
owners := []string{}
|
||||
for m := range match {
|
||||
owners = append(owners, match[m].Resources...)
|
||||
}
|
||||
|
||||
for i := range objects {
|
||||
if !k8sinterface.IsTypeWorkload(objects[i].GetObject()) {
|
||||
response = append(response, objects[i])
|
||||
continue
|
||||
}
|
||||
w := workloadinterface.NewWorkloadObj(objects[i].GetObject())
|
||||
ownerReferences, err := w.GetOwnerReferences()
|
||||
if err != nil || len(ownerReferences) == 0 {
|
||||
response = append(response, w)
|
||||
} else if !k8sinterface.IsStringInSlice(owners, ownerReferences[0].Kind) {
|
||||
response = append(response, w)
|
||||
}
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
func getRuleDependencies(ctx context.Context) (map[string]string, error) {
|
||||
modules := resources.LoadRegoModules()
|
||||
if len(modules) == 0 {
|
||||
@@ -240,3 +234,30 @@ func ruleData(rule *reporthandling.PolicyRule) string {
|
||||
func ruleEnumeratorData(rule *reporthandling.PolicyRule) string {
|
||||
return rule.ResourceEnumerator
|
||||
}
|
||||
|
||||
func getNamespaceName(obj workloadinterface.IMetadata, clusterSize int) string {
|
||||
|
||||
if !isLargeCluster(clusterSize) {
|
||||
return clusterScope
|
||||
}
|
||||
|
||||
// if the resource is in namespace scope, get the namespace
|
||||
if k8sinterface.IsResourceInNamespaceScope(obj.GetKind()) {
|
||||
return obj.GetNamespace()
|
||||
}
|
||||
if obj.GetKind() == "Namespace" {
|
||||
return obj.GetName()
|
||||
}
|
||||
|
||||
return clusterScope
|
||||
}
|
||||
|
||||
// isLargeCluster returns true if the cluster size is larger than the largeClusterSize
|
||||
// This code is a workaround for large clusters. The final solution will be to scan resources individually
|
||||
func isLargeCluster(clusterSize int) bool {
|
||||
if largeClusterSize < 0 {
|
||||
// initialize large cluster size
|
||||
largeClusterSize, _ = cautils.ParseIntEnvVar("LARGE_CLUSTER_SIZE", 2500)
|
||||
}
|
||||
return clusterSize > largeClusterSize
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -13,12 +13,13 @@ import (
|
||||
"github.com/open-policy-agent/opa/rego"
|
||||
"github.com/open-policy-agent/opa/topdown/builtins"
|
||||
"github.com/open-policy-agent/opa/types"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// ConvertFrameworksToPolicies convert list of frameworks to list of policies
|
||||
func ConvertFrameworksToPolicies(frameworks []reporthandling.Framework, version string) *cautils.Policies {
|
||||
func ConvertFrameworksToPolicies(frameworks []reporthandling.Framework, version string, excludedRules map[string]bool, scanningScope reporthandling.ScanningScopeType) *cautils.Policies {
|
||||
policies := cautils.NewPolicies()
|
||||
policies.Set(frameworks, version)
|
||||
policies.Set(frameworks, version, excludedRules, scanningScope)
|
||||
return policies
|
||||
}
|
||||
|
||||
@@ -38,6 +39,7 @@ func ConvertFrameworksToSummaryDetails(summaryDetails *reportsummary.SummaryDeta
|
||||
ScoreFactor: frameworks[i].Controls[j].BaseScore,
|
||||
Description: frameworks[i].Controls[j].Description,
|
||||
Remediation: frameworks[i].Controls[j].Remediation,
|
||||
Category: frameworks[i].Controls[j].Category,
|
||||
}
|
||||
if frameworks[i].Controls[j].GetActionRequiredAttribute() == string(apis.SubStatusManualReview) {
|
||||
c.Status = apis.StatusSkipped
|
||||
@@ -49,7 +51,7 @@ func ConvertFrameworksToSummaryDetails(summaryDetails *reportsummary.SummaryDeta
|
||||
summaryDetails.Controls[id] = c
|
||||
}
|
||||
}
|
||||
if cautils.StringInSlice(policies.Frameworks, frameworks[i].Name) != cautils.ValueNotFound {
|
||||
if slices.Contains(policies.Frameworks, frameworks[i].Name) {
|
||||
summaryDetails.Frameworks = append(summaryDetails.Frameworks, reportsummary.FrameworkSummary{
|
||||
Name: frameworks[i].Name,
|
||||
Controls: controls,
|
||||
@@ -93,3 +95,17 @@ var cosignHasSignatureDefinition = func(bctx rego.BuiltinContext, a *ast.Term) (
|
||||
}
|
||||
return ast.BooleanTerm(has_signature(string(aStr))), nil
|
||||
}
|
||||
|
||||
var imageNameNormalizeDeclaration = ®o.Function{
|
||||
Name: "image.parse_normalized_name",
|
||||
Decl: types.NewFunction(types.Args(types.S), types.S),
|
||||
Memoize: true,
|
||||
}
|
||||
var imageNameNormalizeDefinition = func(bctx rego.BuiltinContext, a *ast.Term) (*ast.Term, error) {
|
||||
aStr, err := builtins.StringOperand(a.Value, 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid parameter type: %v", err)
|
||||
}
|
||||
normalizedName, err := normalize_image_name(string(aStr))
|
||||
return ast.StringTerm(normalizedName), err
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/mocks"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
@@ -13,18 +14,31 @@ import (
|
||||
func TestConvertFrameworksToPolicies(t *testing.T) {
|
||||
fw0 := mocks.MockFramework_0006_0013()
|
||||
fw1 := mocks.MockFramework_0044()
|
||||
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "")
|
||||
scanningScope := cautils.GetScanningScope(&cautils.ScanInfo{InputPatterns: []string{""}})
|
||||
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "", nil, scanningScope)
|
||||
assert.Equal(t, 2, len(policies.Frameworks))
|
||||
assert.Equal(t, 3, len(policies.Controls))
|
||||
|
||||
// with excluded rules map
|
||||
excludedRulesMap := map[string]bool{
|
||||
"alert-rw-hostpath": true,
|
||||
}
|
||||
fw0 = mocks.MockFramework_0006_0013()
|
||||
fw1 = mocks.MockFramework_0044()
|
||||
policies = ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "", excludedRulesMap, scanningScope)
|
||||
assert.Equal(t, 2, len(policies.Frameworks))
|
||||
assert.Equal(t, 2, len(policies.Controls))
|
||||
|
||||
}
|
||||
func TestInitializeSummaryDetails(t *testing.T) {
|
||||
fw0 := mocks.MockFramework_0006_0013()
|
||||
fw1 := mocks.MockFramework_0044()
|
||||
scanningScope := cautils.GetScanningScope(&cautils.ScanInfo{InputPatterns: []string{""}})
|
||||
|
||||
summaryDetails := reportsummary.SummaryDetails{}
|
||||
frameworks := []reporthandling.Framework{*fw0, *fw1}
|
||||
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "")
|
||||
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "", nil, scanningScope)
|
||||
ConvertFrameworksToSummaryDetails(&summaryDetails, frameworks, policies)
|
||||
assert.Equal(t, 2, len(summaryDetails.Frameworks))
|
||||
assert.Equal(t, 3, len(summaryDetails.Controls))
|
||||
// assert.Equal(t, 3, len(summaryDetails.Controls))
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package policyhandler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@@ -66,10 +67,8 @@ func (policyHandler *PolicyHandler) CollectPolicies(ctx context.Context, policyI
|
||||
func (policyHandler *PolicyHandler) getPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) (policies []reporthandling.Framework, exceptions []armotypes.PostureExceptionPolicy, controlInputs map[string][]string, err error) {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "policyHandler.getPolicies")
|
||||
defer span.End()
|
||||
logger.L().Info("Downloading/Loading policy definitions")
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
logger.L().Start("Loading policies")
|
||||
|
||||
// get policies
|
||||
policies, err = policyHandler.getScanPolicies(ctx, policyIdentifier)
|
||||
@@ -80,18 +79,23 @@ func (policyHandler *PolicyHandler) getPolicies(ctx context.Context, policyIdent
|
||||
return nil, nil, nil, fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(policyIdentifier), ", "))
|
||||
}
|
||||
|
||||
logger.L().StopSuccess("Loaded policies")
|
||||
logger.L().Start("Loading exceptions")
|
||||
|
||||
// get exceptions
|
||||
if exceptions, err = policyHandler.getExceptions(); err != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to load exceptions", helpers.Error(err))
|
||||
}
|
||||
|
||||
logger.L().StopSuccess("Loaded exceptions")
|
||||
logger.L().Start("Loading account configurations")
|
||||
|
||||
// get account configuration
|
||||
if controlInputs, err = policyHandler.getControlInputs(); err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
|
||||
cautils.StopSpinner()
|
||||
logger.L().Success("Downloaded/Loaded policy")
|
||||
logger.L().StopSuccess("Loaded account configurations")
|
||||
|
||||
return policies, exceptions, controlInputs, nil
|
||||
}
|
||||
@@ -104,7 +108,7 @@ func (policyHandler *PolicyHandler) getScanPolicies(ctx context.Context, policyI
|
||||
// check if the cached policies are the same as the requested policies, otherwise download the policies
|
||||
if cachedIdentifiers, identifiersExist := policyHandler.cachedPolicyIdentifiers.Get(); identifiersExist && cautils.StringSlicesAreEqual(cachedIdentifiers, policyIdentifiersSlice) {
|
||||
logger.L().Info("Using cached policies")
|
||||
return cachedPolicies, nil
|
||||
return deepCopyPolicies(cachedPolicies)
|
||||
}
|
||||
|
||||
logger.L().Debug("Cached policies are not the same as the requested policies")
|
||||
@@ -121,6 +125,20 @@ func (policyHandler *PolicyHandler) getScanPolicies(ctx context.Context, policyI
|
||||
return policies, err
|
||||
}
|
||||
|
||||
func deepCopyPolicies(src []reporthandling.Framework) ([]reporthandling.Framework, error) {
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var dst []reporthandling.Framework
|
||||
err = json.Unmarshal(data, &dst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) downloadScanPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
|
||||
frameworks := []reporthandling.Framework{}
|
||||
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const (
|
||||
FieldSelectorsSeparator = ","
|
||||
FieldSelectorsEqualsOperator = "=="
|
||||
FieldSelectorsNotEqualsOperator = "!="
|
||||
)
|
||||
|
||||
type IFieldSelector interface {
|
||||
GetNamespacesSelectors(*schema.GroupVersionResource) []string
|
||||
GetClusterScope(*schema.GroupVersionResource) bool
|
||||
@@ -52,9 +57,9 @@ func (is *IncludeSelector) GetClusterScope(resource *schema.GroupVersionResource
|
||||
|
||||
func (es *ExcludeSelector) GetNamespacesSelectors(resource *schema.GroupVersionResource) []string {
|
||||
fieldSelectors := ""
|
||||
for _, n := range strings.Split(es.namespace, ",") {
|
||||
for _, n := range strings.Split(es.namespace, FieldSelectorsSeparator) {
|
||||
if n != "" {
|
||||
fieldSelectors += getNamespacesSelector(resource, n, "!=") + ","
|
||||
fieldSelectors = combineFieldSelectors(fieldSelectors, getNamespacesSelector(resource.Resource, n, FieldSelectorsNotEqualsOperator))
|
||||
}
|
||||
}
|
||||
return []string{fieldSelectors}
|
||||
@@ -63,23 +68,44 @@ func (es *ExcludeSelector) GetNamespacesSelectors(resource *schema.GroupVersionR
|
||||
|
||||
func (is *IncludeSelector) GetNamespacesSelectors(resource *schema.GroupVersionResource) []string {
|
||||
fieldSelectors := []string{}
|
||||
for _, n := range strings.Split(is.namespace, ",") {
|
||||
for _, n := range strings.Split(is.namespace, FieldSelectorsSeparator) {
|
||||
if n != "" {
|
||||
fieldSelectors = append(fieldSelectors, getNamespacesSelector(resource, n, "=="))
|
||||
fieldSelectors = append(fieldSelectors, getNamespacesSelector(resource.Resource, n, FieldSelectorsEqualsOperator))
|
||||
}
|
||||
}
|
||||
return fieldSelectors
|
||||
}
|
||||
|
||||
func getNamespacesSelector(resource *schema.GroupVersionResource, ns, operator string) string {
|
||||
fieldSelector := "metadata."
|
||||
if resource.Resource == "namespaces" {
|
||||
fieldSelector += "name"
|
||||
} else if k8sinterface.IsResourceInNamespaceScope(resource.Resource) {
|
||||
fieldSelector += "namespace"
|
||||
} else {
|
||||
func getNamespacesSelector(kind, ns, operator string) string {
|
||||
if ns == "" {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%s%s%s", fieldSelector, operator, ns)
|
||||
|
||||
if kind == "namespaces" || kind == "Namespace" {
|
||||
return getNameFieldSelectorString(ns, operator)
|
||||
}
|
||||
|
||||
if k8sinterface.IsResourceInNamespaceScope(kind) {
|
||||
return getNamespaceFieldSelectorString(ns, operator)
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func getNameFieldSelectorString(resourceName, operator string) string {
|
||||
return strings.Join([]string{"metadata.name", resourceName}, operator)
|
||||
}
|
||||
|
||||
func getNamespaceFieldSelectorString(namespace, operator string) string {
|
||||
return strings.Join([]string{"metadata.namespace", namespace}, operator)
|
||||
}
|
||||
|
||||
func combineFieldSelectors(selectors ...string) string {
|
||||
var nonEmptyStrings []string
|
||||
for i := range selectors {
|
||||
if selectors[i] != "" {
|
||||
nonEmptyStrings = append(nonEmptyStrings, selectors[i])
|
||||
}
|
||||
}
|
||||
return strings.Join(nonEmptyStrings, FieldSelectorsSeparator)
|
||||
}
|
||||
|
||||
@@ -10,8 +10,13 @@ import (
|
||||
|
||||
func TestGetNamespacesSelector(t *testing.T) {
|
||||
k8sinterface.InitializeMapResourcesMock()
|
||||
assert.Equal(t, "metadata.namespace==default", getNamespacesSelector(&schema.GroupVersionResource{Version: "v1", Resource: "pods"}, "default", "=="))
|
||||
assert.Equal(t, "", getNamespacesSelector(&schema.GroupVersionResource{Version: "v1", Resource: "nodes"}, "default", "=="))
|
||||
assert.Equal(t, "", getNamespacesSelector("pods", "", "=="))
|
||||
assert.Equal(t, "metadata.namespace==default", getNamespacesSelector("pods", "default", "=="))
|
||||
assert.Equal(t, "metadata.namespace==default", getNamespacesSelector("Pod", "default", "=="))
|
||||
assert.Equal(t, "", getNamespacesSelector("nodes", "default", "=="))
|
||||
assert.Equal(t, "", getNamespacesSelector("Node", "default", "=="))
|
||||
assert.Equal(t, "metadata.name==kube-system", getNamespacesSelector("namespaces", "kube-system", "=="))
|
||||
assert.Equal(t, "metadata.name==kube-system", getNamespacesSelector("Namespace", "kube-system", "=="))
|
||||
}
|
||||
|
||||
func TestExcludedNamespacesSelectors(t *testing.T) {
|
||||
@@ -20,11 +25,11 @@ func TestExcludedNamespacesSelectors(t *testing.T) {
|
||||
es := NewExcludeSelector("default,ingress")
|
||||
selectors := es.GetNamespacesSelectors(&schema.GroupVersionResource{Resource: "pods"})
|
||||
assert.Equal(t, 1, len(selectors))
|
||||
assert.Equal(t, "metadata.namespace!=default,metadata.namespace!=ingress,", selectors[0])
|
||||
assert.Equal(t, "metadata.namespace!=default,metadata.namespace!=ingress", selectors[0])
|
||||
|
||||
selectors2 := es.GetNamespacesSelectors(&schema.GroupVersionResource{Resource: "namespaces"})
|
||||
assert.Equal(t, 1, len(selectors2))
|
||||
assert.Equal(t, "metadata.name!=default,metadata.name!=ingress,", selectors2[0])
|
||||
assert.Equal(t, "metadata.name!=default,metadata.name!=ingress", selectors2[0])
|
||||
}
|
||||
|
||||
func TestIncludeNamespacesSelectors(t *testing.T) {
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
@@ -19,40 +19,40 @@ import (
|
||||
)
|
||||
|
||||
// FileResourceHandler handle resources from files and URLs
|
||||
type FileResourceHandler struct {
|
||||
inputPatterns []string
|
||||
}
|
||||
type FileResourceHandler struct{}
|
||||
|
||||
func NewFileResourceHandler(_ context.Context, inputPatterns []string) *FileResourceHandler {
|
||||
func NewFileResourceHandler() *FileResourceHandler {
|
||||
k8sinterface.InitializeMapResourcesMock() // initialize the resource map
|
||||
return &FileResourceHandler{
|
||||
inputPatterns: inputPatterns,
|
||||
}
|
||||
return &FileResourceHandler{}
|
||||
}
|
||||
|
||||
func (fileHandler *FileResourceHandler) GetResources(ctx context.Context, sessionObj *cautils.OPASessionObj, _ *armotypes.PortalDesignator, progressListener opaprocessor.IJobProgressNotificationClient) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.KSResources, error) {
|
||||
|
||||
//
|
||||
// build resources map
|
||||
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
|
||||
k8sResources := setK8sResourceMap(sessionObj.Policies)
|
||||
func (fileHandler *FileResourceHandler) GetResources(ctx context.Context, sessionObj *cautils.OPASessionObj, progressListener opaprocessor.IJobProgressNotificationClient, scanInfo *cautils.ScanInfo) (cautils.K8SResources, map[string]workloadinterface.IMetadata, cautils.ExternalResources, map[string]bool, error) {
|
||||
allResources := map[string]workloadinterface.IMetadata{}
|
||||
ksResources := &cautils.KSResources{}
|
||||
externalResources := cautils.ExternalResources{}
|
||||
|
||||
if len(fileHandler.inputPatterns) == 0 {
|
||||
return nil, nil, nil, fmt.Errorf("missing input")
|
||||
if len(scanInfo.InputPatterns) == 0 {
|
||||
return nil, nil, nil, nil, fmt.Errorf("missing input")
|
||||
}
|
||||
|
||||
logger.L().Info("Accessing local objects")
|
||||
cautils.StartSpinner()
|
||||
logger.L().Start("Accessing local objects")
|
||||
|
||||
for path := range fileHandler.inputPatterns {
|
||||
workloadIDToSource, workloads, err := getResourcesFromPath(ctx, fileHandler.inputPatterns[path])
|
||||
if err != nil {
|
||||
return nil, allResources, nil, err
|
||||
// load resources from all input paths
|
||||
mappedResources := map[string][]workloadinterface.IMetadata{}
|
||||
for path := range scanInfo.InputPatterns {
|
||||
var workloadIDToSource map[string]reporthandling.Source
|
||||
var workloads []workloadinterface.IMetadata
|
||||
var err error
|
||||
|
||||
if scanInfo.ChartPath != "" && scanInfo.FilePath != "" {
|
||||
workloadIDToSource, workloads, err = getWorkloadFromHelmChart(ctx, scanInfo.ChartPath, scanInfo.FilePath)
|
||||
} else {
|
||||
workloadIDToSource, workloads, err = getResourcesFromPath(ctx, scanInfo.InputPatterns[path])
|
||||
if err != nil {
|
||||
return nil, allResources, nil, nil, err
|
||||
}
|
||||
}
|
||||
if len(workloads) == 0 {
|
||||
logger.L().Debug("path ignored because contains only a non-kubernetes file", helpers.String("path", fileHandler.inputPatterns[path]))
|
||||
logger.L().Debug("path ignored because contains only a non-kubernetes file", helpers.String("path", scanInfo.InputPatterns[path]))
|
||||
}
|
||||
|
||||
for k, v := range workloadIDToSource {
|
||||
@@ -60,26 +60,115 @@ func (fileHandler *FileResourceHandler) GetResources(ctx context.Context, sessio
|
||||
}
|
||||
|
||||
// map all resources: map["/apiVersion/version/kind"][]<k8s workloads>
|
||||
mappedResources := mapResources(workloads)
|
||||
|
||||
// save only relevant resources
|
||||
for i := range mappedResources {
|
||||
if _, ok := (*k8sResources)[i]; ok {
|
||||
ids := []string{}
|
||||
for j := range mappedResources[i] {
|
||||
ids = append(ids, mappedResources[i][j].GetID())
|
||||
allResources[mappedResources[i][j].GetID()] = mappedResources[i][j]
|
||||
}
|
||||
(*k8sResources)[i] = append((*k8sResources)[i], ids...)
|
||||
}
|
||||
}
|
||||
|
||||
addWorkloadsToResourcesMap(mappedResources, workloads)
|
||||
}
|
||||
|
||||
cautils.StopSpinner()
|
||||
logger.L().Success("Done accessing local objects")
|
||||
// locate input k8s object in the mapped resources - if not found or not a valid resource, return error
|
||||
var err error
|
||||
if sessionObj.SingleResourceScan, err = findScanObjectResource(mappedResources, scanInfo.ScanObject); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
return k8sResources, allResources, ksResources, nil
|
||||
if sessionObj.SingleResourceScan != nil && k8sinterface.WorkloadHasParent(sessionObj.SingleResourceScan) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("resource %s has a parent and cannot be scanned", sessionObj.SingleResourceScan.GetID())
|
||||
}
|
||||
|
||||
// build a resources map, based on the policies
|
||||
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
|
||||
resourceToQuery, excludedRulesMap := getQueryableResourceMapFromPolicies(sessionObj.Policies, sessionObj.SingleResourceScan)
|
||||
k8sResources := resourceToQuery.ToK8sResourceMap()
|
||||
|
||||
// save only relevant resources
|
||||
for i := range mappedResources {
|
||||
if _, ok := k8sResources[i]; ok {
|
||||
ids := []string{}
|
||||
for j := range mappedResources[i] {
|
||||
ids = append(ids, mappedResources[i][j].GetID())
|
||||
allResources[mappedResources[i][j].GetID()] = mappedResources[i][j]
|
||||
}
|
||||
k8sResources[i] = append(k8sResources[i], ids...)
|
||||
}
|
||||
}
|
||||
|
||||
logger.L().StopSuccess("Done accessing local objects")
|
||||
// save input resource in resource maps
|
||||
addSingleResourceToResourceMaps(k8sResources, allResources, sessionObj.SingleResourceScan)
|
||||
|
||||
return k8sResources, allResources, externalResources, excludedRulesMap, nil
|
||||
}
|
||||
|
||||
func getWorkloadFromHelmChart(ctx context.Context, helmPath, workloadPath string) (map[string]reporthandling.Source, []workloadinterface.IMetadata, error) {
|
||||
clonedRepo, err := cloneGitRepo(&helmPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if clonedRepo != "" {
|
||||
defer os.RemoveAll(clonedRepo)
|
||||
}
|
||||
|
||||
// Get repo root
|
||||
repoRoot, gitRepo := extractGitRepo(helmPath)
|
||||
|
||||
helmSourceToWorkloads, helmSourceToChart := cautils.LoadResourcesFromHelmCharts(ctx, helmPath)
|
||||
|
||||
if clonedRepo != "" {
|
||||
workloadPath = clonedRepo + workloadPath
|
||||
}
|
||||
|
||||
wlSource, ok := helmSourceToWorkloads[workloadPath]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("workload %s not found in chart %s", workloadPath, helmPath)
|
||||
}
|
||||
|
||||
if len(wlSource) != 1 {
|
||||
return nil, nil, fmt.Errorf("workload %s found multiple times in chart %s", workloadPath, helmPath)
|
||||
}
|
||||
|
||||
helmChart, ok := helmSourceToChart[workloadPath]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("helmChart not found for workload %s", workloadPath)
|
||||
}
|
||||
|
||||
workloadSource := getWorkloadSourceHelmChart(repoRoot, helmPath, gitRepo, helmChart)
|
||||
|
||||
workloadIDToSource := make(map[string]reporthandling.Source, 1)
|
||||
workloadIDToSource[wlSource[0].GetID()] = workloadSource
|
||||
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
workloads = append(workloads, wlSource...)
|
||||
|
||||
return workloadIDToSource, workloads, nil
|
||||
|
||||
}
|
||||
|
||||
func getWorkloadSourceHelmChart(repoRoot string, source string, gitRepo *cautils.LocalGitRepository, helmChart cautils.Chart) reporthandling.Source {
|
||||
relSource, err := filepath.Rel(repoRoot, source)
|
||||
if err == nil {
|
||||
source = relSource
|
||||
}
|
||||
|
||||
var lastCommit reporthandling.LastCommit
|
||||
if gitRepo != nil {
|
||||
commitInfo, _ := gitRepo.GetFileLastCommit(source)
|
||||
if commitInfo != nil {
|
||||
lastCommit = reporthandling.LastCommit{
|
||||
Hash: commitInfo.SHA,
|
||||
Date: commitInfo.Author.Date,
|
||||
CommitterName: commitInfo.Author.Name,
|
||||
CommitterEmail: commitInfo.Author.Email,
|
||||
Message: commitInfo.Message,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return reporthandling.Source{
|
||||
Path: repoRoot,
|
||||
HelmPath: helmChart.Path,
|
||||
RelativePath: source,
|
||||
FileType: reporthandling.SourceTypeHelmChart,
|
||||
HelmChartName: helmChart.Name,
|
||||
LastCommit: lastCommit,
|
||||
}
|
||||
}
|
||||
|
||||
func getResourcesFromPath(ctx context.Context, path string) (map[string]reporthandling.Source, []workloadinterface.IMetadata, error) {
|
||||
@@ -95,13 +184,7 @@ func getResourcesFromPath(ctx context.Context, path string) (map[string]reportha
|
||||
}
|
||||
|
||||
// Get repo root
|
||||
repoRoot := ""
|
||||
gitRepo, err := cautils.NewLocalGitRepository(path)
|
||||
if err == nil && gitRepo != nil {
|
||||
repoRoot, _ = gitRepo.GetRootDir()
|
||||
} else {
|
||||
repoRoot, _ = filepath.Abs(path)
|
||||
}
|
||||
repoRoot, gitRepo := extractGitRepo(path)
|
||||
|
||||
// when scanning a single file, we consider the repository root to be
|
||||
// the directory of the scanned file
|
||||
@@ -136,9 +219,7 @@ func getResourcesFromPath(ctx context.Context, path string) (map[string]reportha
|
||||
if gitRepo != nil {
|
||||
commitInfo, err := gitRepo.GetFileLastCommit(source)
|
||||
if err != nil && !warnIssued {
|
||||
cautils.StopSpinner()
|
||||
logger.L().Ctx(ctx).Warning("git scan skipped", helpers.Error(err))
|
||||
cautils.StartSpinner()
|
||||
logger.L().Ctx(ctx).Warning("Git scan skipped", helpers.Error(err))
|
||||
warnIssued = true // croak only once
|
||||
}
|
||||
|
||||
@@ -153,10 +234,21 @@ func getResourcesFromPath(ctx context.Context, path string) (map[string]reportha
|
||||
}
|
||||
}
|
||||
|
||||
workloadSource := reporthandling.Source{
|
||||
RelativePath: relSource,
|
||||
FileType: filetype,
|
||||
LastCommit: lastCommit,
|
||||
var workloadSource reporthandling.Source
|
||||
if clonedRepo != "" {
|
||||
workloadSource = reporthandling.Source{
|
||||
Path: "",
|
||||
RelativePath: relSource,
|
||||
FileType: filetype,
|
||||
LastCommit: lastCommit,
|
||||
}
|
||||
} else {
|
||||
workloadSource = reporthandling.Source{
|
||||
Path: repoRoot,
|
||||
RelativePath: relSource,
|
||||
FileType: filetype,
|
||||
LastCommit: lastCommit,
|
||||
}
|
||||
}
|
||||
|
||||
for i := range ws {
|
||||
@@ -169,36 +261,23 @@ func getResourcesFromPath(ctx context.Context, path string) (map[string]reportha
|
||||
}
|
||||
|
||||
// load resources from helm charts
|
||||
helmSourceToWorkloads, helmSourceToChartName := cautils.LoadResourcesFromHelmCharts(ctx, path)
|
||||
helmSourceToWorkloads, helmSourceToChart := cautils.LoadResourcesFromHelmCharts(ctx, path)
|
||||
for source, ws := range helmSourceToWorkloads {
|
||||
workloads = append(workloads, ws...)
|
||||
helmChartName := helmSourceToChartName[source]
|
||||
helmChart := helmSourceToChart[source]
|
||||
|
||||
relSource, err := filepath.Rel(repoRoot, source)
|
||||
if err == nil {
|
||||
source = relSource
|
||||
}
|
||||
|
||||
var lastCommit reporthandling.LastCommit
|
||||
if gitRepo != nil {
|
||||
commitInfo, _ := gitRepo.GetFileLastCommit(source)
|
||||
if commitInfo != nil {
|
||||
lastCommit = reporthandling.LastCommit{
|
||||
Hash: commitInfo.SHA,
|
||||
Date: commitInfo.Author.Date,
|
||||
CommitterName: commitInfo.Author.Name,
|
||||
CommitterEmail: commitInfo.Author.Email,
|
||||
Message: commitInfo.Message,
|
||||
}
|
||||
if clonedRepo != "" {
|
||||
url, err := gitRepo.GetRemoteUrl()
|
||||
if err != nil {
|
||||
logger.L().Warning("failed to get remote url", helpers.Error(err))
|
||||
break
|
||||
}
|
||||
helmChart.Path = strings.TrimSuffix(url, ".git")
|
||||
repoRoot = ""
|
||||
source = strings.TrimPrefix(source, fmt.Sprintf("%s/", clonedRepo))
|
||||
}
|
||||
|
||||
workloadSource := reporthandling.Source{
|
||||
RelativePath: source,
|
||||
FileType: reporthandling.SourceTypeHelmChart,
|
||||
HelmChartName: helmChartName,
|
||||
LastCommit: lastCommit,
|
||||
}
|
||||
workloadSource := getWorkloadSourceHelmChart(repoRoot, source, gitRepo, helmChart)
|
||||
|
||||
for i := range ws {
|
||||
workloadIDToSource[ws[i].GetID()] = workloadSource
|
||||
@@ -250,6 +329,17 @@ func getResourcesFromPath(ctx context.Context, path string) (map[string]reportha
|
||||
return workloadIDToSource, workloads, nil
|
||||
}
|
||||
|
||||
func extractGitRepo(path string) (string, *cautils.LocalGitRepository) {
|
||||
repoRoot := ""
|
||||
gitRepo, err := cautils.NewLocalGitRepository(path)
|
||||
if err == nil && gitRepo != nil {
|
||||
repoRoot, _ = gitRepo.GetRootDir()
|
||||
} else {
|
||||
repoRoot, _ = filepath.Abs(path)
|
||||
}
|
||||
return repoRoot, gitRepo
|
||||
}
|
||||
|
||||
func (fileHandler *FileResourceHandler) GetClusterAPIServerInfo(_ context.Context) *version.Info {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
)
|
||||
|
||||
// Clone git repository
|
||||
@@ -33,10 +34,7 @@ func cloneGitRepo(path *string) (string, error) {
|
||||
return clonedDir, nil
|
||||
}
|
||||
|
||||
// build resources map
|
||||
func mapResources(workloads []workloadinterface.IMetadata) map[string][]workloadinterface.IMetadata {
|
||||
|
||||
allResources := map[string][]workloadinterface.IMetadata{}
|
||||
func addWorkloadsToResourcesMap(allResources map[string][]workloadinterface.IMetadata, workloads []workloadinterface.IMetadata) {
|
||||
for i := range workloads {
|
||||
groupVersionResource, err := k8sinterface.GetGroupVersionResource(workloads[i].GetKind())
|
||||
if err != nil {
|
||||
@@ -58,8 +56,6 @@ func mapResources(workloads []workloadinterface.IMetadata) map[string][]workload
|
||||
allResources[resourceTriplets] = []workloadinterface.IMetadata{workloads[i]}
|
||||
}
|
||||
}
|
||||
return allResources
|
||||
|
||||
}
|
||||
|
||||
/* unused for now
|
||||
@@ -85,3 +81,55 @@ func addCommitData(input string, workloadIDToSource map[string]reporthandling.So
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// findScanObjectResource finds the requested k8s object to be scanned in the resources map
|
||||
func findScanObjectResource(mappedResources map[string][]workloadinterface.IMetadata, resource *objectsenvelopes.ScanObject) (workloadinterface.IWorkload, error) {
|
||||
if resource == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
logger.L().Debug("Single resource scan", helpers.String("resource", resource.GetID()))
|
||||
|
||||
wls := []workloadinterface.IWorkload{}
|
||||
for _, resources := range mappedResources {
|
||||
for _, r := range resources {
|
||||
if r.GetKind() == resource.GetKind() && r.GetName() == resource.GetName() {
|
||||
if resource.GetNamespace() != "" && resource.GetNamespace() != r.GetNamespace() {
|
||||
continue
|
||||
}
|
||||
if resource.GetApiVersion() != "" && resource.GetApiVersion() != r.GetApiVersion() {
|
||||
continue
|
||||
}
|
||||
|
||||
if k8sinterface.IsTypeWorkload(r.GetObject()) {
|
||||
wl := workloadinterface.NewWorkloadObj(r.GetObject())
|
||||
wls = append(wls, wl)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(wls) == 0 {
|
||||
return nil, fmt.Errorf("k8s resource '%s' not found", getReadableID(resource))
|
||||
} else if len(wls) > 1 {
|
||||
return nil, fmt.Errorf("more than one k8s resource found for '%s'", getReadableID(resource))
|
||||
}
|
||||
|
||||
return wls[0], nil
|
||||
}
|
||||
|
||||
// TODO: move this to k8s-interface
|
||||
func getReadableID(obj *objectsenvelopes.ScanObject) string {
|
||||
var ID string
|
||||
if obj.GetApiVersion() != "" {
|
||||
ID += fmt.Sprintf("%s/", k8sinterface.JoinGroupVersion(k8sinterface.SplitApiVersion(obj.GetApiVersion())))
|
||||
}
|
||||
|
||||
if obj.GetNamespace() != "" {
|
||||
ID += fmt.Sprintf("%s/", obj.GetNamespace())
|
||||
}
|
||||
|
||||
ID += fmt.Sprintf("%s/%s", obj.GetKind(), obj.GetName())
|
||||
|
||||
return ID
|
||||
}
|
||||
|
||||
107
core/pkg/resourcehandler/filesloaderutils_test.go
Normal file
107
core/pkg/resourcehandler/filesloaderutils_test.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func mockWorkloadWithSource(apiVersion, kind, namespace, name, source string) workloadinterface.IMetadata {
|
||||
wl := mockWorkload(apiVersion, kind, namespace, name)
|
||||
resource := reporthandling.NewResourceIMetadata(wl)
|
||||
resource.SetSource(&reporthandling.Source{
|
||||
Path: source,
|
||||
RelativePath: source,
|
||||
})
|
||||
|
||||
return resource
|
||||
}
|
||||
|
||||
func TestFindScanObjectResource(t *testing.T) {
|
||||
mappedResources := map[string][]workloadinterface.IMetadata{
|
||||
"/v1/pods": {
|
||||
mockWorkloadWithSource("v1", "Pod", "default", "nginx", "/fileA.yaml"),
|
||||
mockWorkloadWithSource("v1", "Pod", "default", "nginx", "/fileB.yaml"),
|
||||
mockWorkloadWithSource("v1", "Pod", "", "mariadb", "/fileB.yaml"),
|
||||
},
|
||||
}
|
||||
tt := []struct {
|
||||
name string
|
||||
scanObject *objectsenvelopes.ScanObject
|
||||
expectedResourceName string
|
||||
expectErr bool
|
||||
expectedErrorString string
|
||||
}{
|
||||
{
|
||||
name: "scan object is nil",
|
||||
scanObject: nil,
|
||||
expectedResourceName: "",
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "multiple resources match",
|
||||
scanObject: &objectsenvelopes.ScanObject{
|
||||
Kind: "Pod",
|
||||
ApiVersion: "v1",
|
||||
Metadata: objectsenvelopes.ScanObjectMetadata{
|
||||
Namespace: "default",
|
||||
|
||||
Name: "nginx",
|
||||
},
|
||||
},
|
||||
expectedResourceName: "",
|
||||
expectErr: true,
|
||||
expectedErrorString: "more than one k8s resource found for '/v1/default/Pod/nginx'",
|
||||
},
|
||||
{
|
||||
name: "single resource match",
|
||||
scanObject: &objectsenvelopes.ScanObject{
|
||||
Kind: "Pod",
|
||||
ApiVersion: "v1",
|
||||
Metadata: objectsenvelopes.ScanObjectMetadata{
|
||||
Name: "mariadb",
|
||||
Namespace: "",
|
||||
},
|
||||
},
|
||||
expectedResourceName: "mariadb",
|
||||
expectErr: false,
|
||||
expectedErrorString: "",
|
||||
},
|
||||
{
|
||||
name: "no workload match",
|
||||
scanObject: &objectsenvelopes.ScanObject{
|
||||
Kind: "Deployment",
|
||||
ApiVersion: "apps/v1",
|
||||
Metadata: objectsenvelopes.ScanObjectMetadata{
|
||||
Namespace: "",
|
||||
Name: "notfound",
|
||||
},
|
||||
},
|
||||
expectedResourceName: "",
|
||||
expectErr: true,
|
||||
expectedErrorString: "not found",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resource, err := findScanObjectResource(mappedResources, tc.scanObject)
|
||||
if (err != nil) != tc.expectErr {
|
||||
t.Errorf("findScanObjectResource() error = %v, expectErr %v", err, tc.expectErr)
|
||||
return
|
||||
}
|
||||
|
||||
if tc.expectErr {
|
||||
assert.ErrorContains(t, err, tc.expectedErrorString)
|
||||
}
|
||||
|
||||
if tc.expectedResourceName != "" {
|
||||
assert.Equal(t, tc.expectedResourceName, resource.GetName())
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
@@ -211,7 +211,7 @@ func Test_isAKS(t *testing.T) {
|
||||
/* unused for now.
|
||||
type iResourceHandlerMock struct{}
|
||||
|
||||
func (*iResourceHandlerMock) GetResources(*cautils.OPASessionObj, *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.KSResources, error) {
|
||||
func (*iResourceHandlerMock) GetResources(*cautils.OPASessionObj, *identifiers.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.KSResources, error) {
|
||||
return nil, nil, nil, nil
|
||||
}
|
||||
func (*iResourceHandlerMock) GetClusterAPIServerInfo() *version.Info {
|
||||
@@ -232,7 +232,7 @@ func getResourceHandlerMock() *K8sResourceHandler {
|
||||
Context: context.Background(),
|
||||
}
|
||||
|
||||
return NewK8sResourceHandler(k8s, &EmptySelector{}, nil, nil, nil)
|
||||
return NewK8sResourceHandler(k8s, nil, nil, nil)
|
||||
}
|
||||
func Test_CollectResources(t *testing.T) {
|
||||
resourceHandler := getResourceHandlerMock()
|
||||
@@ -246,15 +246,14 @@ func Test_CollectResources(t *testing.T) {
|
||||
ClusterAPIServerInfo: nil,
|
||||
},
|
||||
}
|
||||
policyIdentifier := []cautils.PolicyIdentifier{{}}
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
CollectResources(context.TODO(), resourceHandler, policyIdentifier, objSession, cautils.NewProgressHandler(""))
|
||||
CollectResources(context.TODO(), resourceHandler, []cautils.PolicyIdentifier{}, objSession, cautils.NewProgressHandler(""), &cautils.ScanInfo{})
|
||||
}, "Cluster named .*eks.* without a cloud config panics on cluster scan !")
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
objSession.Metadata.ScanMetadata.ScanningTarget = reportv2.File
|
||||
CollectResources(context.TODO(), resourceHandler, policyIdentifier, objSession, cautils.NewProgressHandler(""))
|
||||
CollectResources(context.TODO(), resourceHandler, []cautils.PolicyIdentifier{}, objSession, cautils.NewProgressHandler(""), &cautils.ScanInfo{})
|
||||
}, "Cluster named .*eks.* without a cloud config panics on non-cluster scan !")
|
||||
|
||||
}
|
||||
|
||||
@@ -19,8 +19,7 @@ import (
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
// CollectResources uses the provided resource handler to collect resources and returns an updated OPASessionObj
|
||||
func CollectResources(ctx context.Context, rsrcHandler IResourceHandler, policyIdentifier []cautils.PolicyIdentifier, opaSessionObj *cautils.OPASessionObj, progressListener opaprocessor.IJobProgressNotificationClient) error {
|
||||
func CollectResources(ctx context.Context, rsrcHandler IResourceHandler, policyIdentifier []cautils.PolicyIdentifier, opaSessionObj *cautils.OPASessionObj, progressListener opaprocessor.IJobProgressNotificationClient, scanInfo *cautils.ScanInfo) error {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "resourcehandler.CollectResources")
|
||||
defer span.End()
|
||||
opaSessionObj.Report.ClusterAPIServerInfo = rsrcHandler.GetClusterAPIServerInfo(ctx)
|
||||
@@ -30,16 +29,17 @@ func CollectResources(ctx context.Context, rsrcHandler IResourceHandler, policyI
|
||||
setCloudMetadata(opaSessionObj)
|
||||
}
|
||||
|
||||
resourcesMap, allResources, ksResources, err := rsrcHandler.GetResources(ctx, opaSessionObj, &policyIdentifier[0].Designators, progressListener)
|
||||
resourcesMap, allResources, externalResources, excludedRulesMap, err := rsrcHandler.GetResources(ctx, opaSessionObj, progressListener, scanInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opaSessionObj.K8SResources = resourcesMap
|
||||
opaSessionObj.AllResources = allResources
|
||||
opaSessionObj.ArmoResource = ksResources
|
||||
opaSessionObj.ExternalResources = externalResources
|
||||
opaSessionObj.ExcludedRules = excludedRulesMap
|
||||
|
||||
if (opaSessionObj.K8SResources == nil || len(*opaSessionObj.K8SResources) == 0) && (opaSessionObj.ArmoResource == nil || len(*opaSessionObj.ArmoResource) == 0) {
|
||||
if (opaSessionObj.K8SResources == nil || len(opaSessionObj.K8SResources) == 0) && (opaSessionObj.ExternalResources == nil || len(opaSessionObj.ExternalResources) == 0) {
|
||||
return fmt.Errorf("empty list of resources")
|
||||
}
|
||||
|
||||
@@ -52,6 +52,9 @@ func setCloudMetadata(opaSessionObj *cautils.OPASessionObj) {
|
||||
return
|
||||
}
|
||||
cloudMetadata := reportv2.NewCloudMetadata(iCloudMetadata)
|
||||
if opaSessionObj.Metadata.ContextMetadata.ClusterContextMetadata == nil {
|
||||
opaSessionObj.Metadata.ContextMetadata.ClusterContextMetadata = &reportv2.ClusterMetadata{}
|
||||
}
|
||||
opaSessionObj.Metadata.ContextMetadata.ClusterContextMetadata.CloudMetadata = cloudMetadata
|
||||
opaSessionObj.Metadata.ClusterMetadata.CloudMetadata = cloudMetadata // deprecated - fallback
|
||||
opaSessionObj.Report.ClusterCloudProvider = iCloudMetadata.Provider().ToString() // deprecated - fallback
|
||||
|
||||
@@ -3,7 +3,6 @@ package resourcehandler
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/opaprocessor"
|
||||
@@ -11,6 +10,6 @@ import (
|
||||
)
|
||||
|
||||
type IResourceHandler interface {
|
||||
GetResources(context.Context, *cautils.OPASessionObj, *armotypes.PortalDesignator, opaprocessor.IJobProgressNotificationClient) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.KSResources, error)
|
||||
GetResources(context.Context, *cautils.OPASessionObj, opaprocessor.IJobProgressNotificationClient, *cautils.ScanInfo) (cautils.K8SResources, map[string]workloadinterface.IMetadata, cautils.ExternalResources, map[string]bool, error)
|
||||
GetClusterAPIServerInfo(ctx context.Context) *version.Info
|
||||
}
|
||||
@@ -20,15 +20,12 @@ import (
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
k8slabels "k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/client-go/dynamic"
|
||||
)
|
||||
|
||||
type cloudResourceGetter func(string, string) (workloadinterface.IMetadata, error)
|
||||
@@ -43,47 +40,48 @@ var cloudResourceGetterMapping = map[string]cloudResourceGetter{
|
||||
type K8sResourceHandler struct {
|
||||
k8s *k8sinterface.KubernetesApi
|
||||
hostSensorHandler hostsensorutils.IHostSensor
|
||||
fieldSelector IFieldSelector
|
||||
rbacObjectsAPI *cautils.RBACObjects
|
||||
registryAdaptors *RegistryAdaptors
|
||||
}
|
||||
|
||||
func NewK8sResourceHandler(k8s *k8sinterface.KubernetesApi, fieldSelector IFieldSelector, hostSensorHandler hostsensorutils.IHostSensor, rbacObjects *cautils.RBACObjects, registryAdaptors *RegistryAdaptors) *K8sResourceHandler {
|
||||
func NewK8sResourceHandler(k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor, rbacObjects *cautils.RBACObjects, registryAdaptors *RegistryAdaptors) *K8sResourceHandler {
|
||||
return &K8sResourceHandler{
|
||||
k8s: k8s,
|
||||
fieldSelector: fieldSelector,
|
||||
hostSensorHandler: hostSensorHandler,
|
||||
rbacObjectsAPI: rbacObjects,
|
||||
registryAdaptors: registryAdaptors,
|
||||
}
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) GetResources(ctx context.Context, sessionObj *cautils.OPASessionObj, designator *armotypes.PortalDesignator, progressListener opaprocessor.IJobProgressNotificationClient) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.KSResources, error) {
|
||||
allResources := map[string]workloadinterface.IMetadata{}
|
||||
func (k8sHandler *K8sResourceHandler) GetResources(ctx context.Context, sessionObj *cautils.OPASessionObj, progressListener opaprocessor.IJobProgressNotificationClient, scanInfo *cautils.ScanInfo) (cautils.K8SResources, map[string]workloadinterface.IMetadata, cautils.ExternalResources, map[string]bool, error) {
|
||||
logger.L().Start("Accessing Kubernetes objects")
|
||||
var err error
|
||||
|
||||
// get k8s resources
|
||||
logger.L().Info("Accessing Kubernetes objects")
|
||||
globalFieldSelectors := getFieldSelectorFromScanInfo(scanInfo)
|
||||
sessionObj.SingleResourceScan, err = k8sHandler.findScanObjectResource(scanInfo.ScanObject, globalFieldSelectors)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
cautils.StartSpinner()
|
||||
resourceToControl := make(map[string][]string)
|
||||
// build resources map
|
||||
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
|
||||
k8sResourcesMap := setK8sResourceMap(sessionObj.Policies)
|
||||
|
||||
// get namespace and labels from designator (ignore cluster labels)
|
||||
_, namespace, labels := armotypes.DigestPortalDesignator(designator)
|
||||
|
||||
// pull k8s recourses
|
||||
queryableResources, excludedRulesMap := getQueryableResourceMapFromPolicies(sessionObj.Policies, sessionObj.SingleResourceScan)
|
||||
ksResourceMap := setKSResourceMap(sessionObj.Policies, resourceToControl)
|
||||
|
||||
// map of Kubescape resources to control_ids
|
||||
sessionObj.ResourceToControlsMap = resourceToControl
|
||||
|
||||
if err := k8sHandler.pullResources(k8sResourcesMap, allResources, namespace, labels); err != nil {
|
||||
// pull k8s resources
|
||||
k8sResourcesMap, allResources, err := k8sHandler.pullResources(queryableResources, globalFieldSelectors)
|
||||
if err != nil {
|
||||
cautils.StopSpinner()
|
||||
return k8sResourcesMap, allResources, ksResourceMap, err
|
||||
return k8sResourcesMap, allResources, ksResourceMap, excludedRulesMap, err
|
||||
}
|
||||
|
||||
// add single resource to k8s resources map (for single resource scan)
|
||||
addSingleResourceToResourceMaps(k8sResourcesMap, allResources, sessionObj.SingleResourceScan)
|
||||
|
||||
metrics.UpdateKubernetesResourcesCount(ctx, int64(len(allResources)))
|
||||
numberOfWorkerNodes, err := k8sHandler.pullWorkerNodesNumber()
|
||||
|
||||
@@ -94,8 +92,7 @@ func (k8sHandler *K8sResourceHandler) GetResources(ctx context.Context, sessionO
|
||||
metrics.UpdateWorkerNodesCount(ctx, int64(numberOfWorkerNodes))
|
||||
}
|
||||
|
||||
cautils.StopSpinner()
|
||||
logger.L().Success("Accessed to Kubernetes objects")
|
||||
logger.L().StopSuccess("Accessed Kubernetes objects")
|
||||
|
||||
// backswords compatibility - get image vulnerability resources
|
||||
if k8sHandler.registryAdaptors != nil {
|
||||
@@ -153,32 +150,74 @@ func (k8sHandler *K8sResourceHandler) GetResources(ctx context.Context, sessionO
|
||||
}
|
||||
}
|
||||
|
||||
return k8sResourcesMap, allResources, ksResourceMap, nil
|
||||
return k8sResourcesMap, allResources, ksResourceMap, excludedRulesMap, nil
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) collectCloudResources(ctx context.Context, sessionObj *cautils.OPASessionObj, allResources map[string]workloadinterface.IMetadata, ksResourceMap *cautils.KSResources, cloudResources []string, progressListener opaprocessor.IJobProgressNotificationClient) error {
|
||||
// findScanObjectResource pulls the requested k8s object to be scanned from the api server
|
||||
func (k8sHandler *K8sResourceHandler) findScanObjectResource(resource *objectsenvelopes.ScanObject, globalFieldSelector IFieldSelector) (workloadinterface.IWorkload, error) {
|
||||
if resource == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
logger.L().Debug("Single resource scan", helpers.String("resource", resource.GetID()))
|
||||
|
||||
var wlIdentifierString string
|
||||
if resource.GetApiVersion() != "" {
|
||||
wlIdentifierString = strings.Join([]string{resource.GetApiVersion(), resource.GetKind()}, "/")
|
||||
} else {
|
||||
wlIdentifierString = resource.GetKind()
|
||||
}
|
||||
|
||||
gvr, err := k8sinterface.GetGroupVersionResource(wlIdentifierString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fieldSelectors := getNameFieldSelectorString(resource.GetName(), FieldSelectorsEqualsOperator)
|
||||
if resource.GetNamespace() != "" && k8sinterface.IsNamespaceScope(&gvr) {
|
||||
fieldSelectors = combineFieldSelectors(fieldSelectors, getNamespaceFieldSelectorString(resource.GetNamespace(), FieldSelectorsEqualsOperator))
|
||||
}
|
||||
result, err := k8sHandler.pullSingleResource(&gvr, nil, fieldSelectors, globalFieldSelector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get resource %s, reason: %v", getReadableID(resource), err)
|
||||
}
|
||||
|
||||
if len(result) == 0 {
|
||||
return nil, fmt.Errorf("resource %s was not found", getReadableID(resource))
|
||||
}
|
||||
|
||||
metaObjs := ConvertMapListToMeta(k8sinterface.ConvertUnstructuredSliceToMap(result))
|
||||
if len(metaObjs) == 0 {
|
||||
return nil, fmt.Errorf("resource %s has a parent and cannot be scanned", getReadableID(resource))
|
||||
}
|
||||
|
||||
if len(metaObjs) > 1 {
|
||||
return nil, fmt.Errorf("more than one resource found for %s", getReadableID(resource))
|
||||
}
|
||||
|
||||
if !k8sinterface.IsTypeWorkload(metaObjs[0].GetObject()) {
|
||||
return nil, fmt.Errorf("%s is not a valid Kubernetes workload", getReadableID(resource))
|
||||
}
|
||||
|
||||
wl := workloadinterface.NewWorkloadObj(metaObjs[0].GetObject())
|
||||
return wl, nil
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) collectCloudResources(ctx context.Context, sessionObj *cautils.OPASessionObj, allResources map[string]workloadinterface.IMetadata, externalResourceMap cautils.ExternalResources, cloudResources []string, progressListener opaprocessor.IJobProgressNotificationClient) error {
|
||||
clusterName := cautils.ClusterName
|
||||
provider := cloudsupport.GetCloudProvider(clusterName)
|
||||
if provider == "" {
|
||||
return fmt.Errorf("failed to get cloud provider, cluster: %s", clusterName)
|
||||
}
|
||||
|
||||
logger.L().Start("Downloading cloud resources")
|
||||
|
||||
if sessionObj.Metadata != nil && sessionObj.Metadata.ContextMetadata.ClusterContextMetadata != nil {
|
||||
sessionObj.Metadata.ContextMetadata.ClusterContextMetadata.CloudProvider = provider
|
||||
}
|
||||
logger.L().Debug("cloud", helpers.String("cluster", clusterName), helpers.String("clusterName", clusterName), helpers.String("provider", provider))
|
||||
|
||||
logger.L().Info("Downloading cloud resources")
|
||||
// start progressbar during pull of cloud resources (this can take a while).
|
||||
if progressListener != nil {
|
||||
progressListener.Start(len(cloudResources))
|
||||
defer progressListener.Stop()
|
||||
}
|
||||
for resourceKind, resourceGetter := range cloudResourceGetterMapping {
|
||||
// set way to progress
|
||||
if progressListener != nil {
|
||||
progressListener.ProgressJob(1, fmt.Sprintf("Cloud Resource: %s", resourceKind))
|
||||
}
|
||||
if !cloudResourceRequired(cloudResources, resourceKind) {
|
||||
continue
|
||||
}
|
||||
@@ -197,13 +236,13 @@ func (k8sHandler *K8sResourceHandler) collectCloudResources(ctx context.Context,
|
||||
}
|
||||
|
||||
allResources[wl.GetID()] = wl
|
||||
(*ksResourceMap)[fmt.Sprintf("%s/%s", wl.GetApiVersion(), wl.GetKind())] = []string{wl.GetID()}
|
||||
externalResourceMap[fmt.Sprintf("%s/%s", wl.GetApiVersion(), wl.GetKind())] = []string{wl.GetID()}
|
||||
}
|
||||
logger.L().Success("Downloaded cloud resources")
|
||||
logger.L().StopSuccess("Downloaded cloud resources")
|
||||
|
||||
// get api server info resource
|
||||
if cloudResourceRequired(cloudResources, string(cloudsupport.TypeApiServerInfo)) {
|
||||
if err := k8sHandler.collectAPIServerInfoResource(allResources, ksResourceMap); err != nil {
|
||||
if err := k8sHandler.collectAPIServerInfoResource(allResources, externalResourceMap); err != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to collect api server info resource", helpers.Error(err))
|
||||
|
||||
return err
|
||||
@@ -222,14 +261,14 @@ func cloudResourceRequired(cloudResources []string, resource string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) collectAPIServerInfoResource(allResources map[string]workloadinterface.IMetadata, ksResourceMap *cautils.KSResources) error {
|
||||
func (k8sHandler *K8sResourceHandler) collectAPIServerInfoResource(allResources map[string]workloadinterface.IMetadata, externalResourceMap cautils.ExternalResources) error {
|
||||
clusterAPIServerInfo, err := k8sHandler.k8s.DiscoveryClient.ServerVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resource := cloudsupport.NewApiServerVersionInfo(clusterAPIServerInfo)
|
||||
allResources[resource.GetID()] = resource
|
||||
(*ksResourceMap)[fmt.Sprintf("%s/%s", resource.GetApiVersion(), resource.GetKind())] = []string{resource.GetID()}
|
||||
externalResourceMap[fmt.Sprintf("%s/%s", resource.GetApiVersion(), resource.GetKind())] = []string{resource.GetID()}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -267,13 +306,15 @@ func setMapNamespaceToNumOfResources(ctx context.Context, allResources map[strin
|
||||
sessionObj.SetMapNamespaceToNumberOfResources(mapNamespaceToNumberOfResources)
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) pullResources(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, namespace string, labels map[string]string) error {
|
||||
func (k8sHandler *K8sResourceHandler) pullResources(queryableResources QueryableResources, globalFieldSelectors IFieldSelector) (cautils.K8SResources, map[string]workloadinterface.IMetadata, error) {
|
||||
k8sResources := queryableResources.ToK8sResourceMap()
|
||||
allResources := map[string]workloadinterface.IMetadata{}
|
||||
|
||||
var errs error
|
||||
for groupResource := range *k8sResources {
|
||||
apiGroup, apiVersion, resource := k8sinterface.StringToResourceGroup(groupResource)
|
||||
for i := range queryableResources {
|
||||
apiGroup, apiVersion, resource := k8sinterface.StringToResourceGroup(queryableResources[i].GroupVersionResourceTriplet)
|
||||
gvr := schema.GroupVersionResource{Group: apiGroup, Version: apiVersion, Resource: resource}
|
||||
result, err := k8sHandler.pullSingleResource(&gvr, namespace, labels)
|
||||
result, err := k8sHandler.pullSingleResource(&gvr, nil, queryableResources[i].FieldSelectors, globalFieldSelectors)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "the server could not find the requested resource") {
|
||||
// handle error
|
||||
@@ -290,19 +331,28 @@ func (k8sHandler *K8sResourceHandler) pullResources(k8sResources *cautils.K8SRes
|
||||
for i := range metaObjs {
|
||||
allResources[metaObjs[i].GetID()] = metaObjs[i]
|
||||
}
|
||||
(*k8sResources)[groupResource] = workloadinterface.ListMetaIDs(metaObjs)
|
||||
|
||||
key := queryableResources[i].GroupVersionResourceTriplet
|
||||
if _, ok := k8sResources[key]; !ok {
|
||||
k8sResources[key] = workloadinterface.ListMetaIDs(metaObjs)
|
||||
} else {
|
||||
k8sResources[key] = append(k8sResources[key], workloadinterface.ListMetaIDs(metaObjs)...)
|
||||
}
|
||||
}
|
||||
return errs
|
||||
return k8sResources, allResources, errs
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) pullSingleResource(resource *schema.GroupVersionResource, namespace string, labels map[string]string) ([]unstructured.Unstructured, error) {
|
||||
func (k8sHandler *K8sResourceHandler) pullSingleResource(resource *schema.GroupVersionResource, labels map[string]string, fields string, fieldSelector IFieldSelector) ([]unstructured.Unstructured, error) {
|
||||
resourceList := []unstructured.Unstructured{}
|
||||
// set labels
|
||||
listOptions := metav1.ListOptions{}
|
||||
fieldSelectors := k8sHandler.fieldSelector.GetNamespacesSelectors(resource)
|
||||
fieldSelectors := fieldSelector.GetNamespacesSelectors(resource)
|
||||
for i := range fieldSelectors {
|
||||
|
||||
listOptions.FieldSelector = fieldSelectors[i]
|
||||
if fieldSelectors[i] != "" {
|
||||
listOptions.FieldSelector = combineFieldSelectors(fieldSelectors[i], fields)
|
||||
} else if fields != "" {
|
||||
listOptions.FieldSelector = fields
|
||||
}
|
||||
|
||||
if len(labels) > 0 {
|
||||
set := k8slabels.Set(labels)
|
||||
@@ -310,21 +360,12 @@ func (k8sHandler *K8sResourceHandler) pullSingleResource(resource *schema.GroupV
|
||||
}
|
||||
|
||||
// set dynamic object
|
||||
var clientResource dynamic.ResourceInterface
|
||||
if namespace != "" {
|
||||
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource)
|
||||
} else if k8sinterface.IsNamespaceScope(resource) {
|
||||
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource).Namespace(namespace)
|
||||
} else if k8sHandler.fieldSelector.GetClusterScope(resource) {
|
||||
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
clientResource := k8sHandler.k8s.DynamicClient.Resource(*resource)
|
||||
|
||||
// list resources
|
||||
result, err := clientResource.List(context.Background(), listOptions)
|
||||
if err != nil || result == nil {
|
||||
return nil, fmt.Errorf("failed to get resource: %v, namespace: %s, labelSelector: %v, reason: %v", resource, namespace, listOptions.LabelSelector, err)
|
||||
return nil, fmt.Errorf("failed to get resource: %v, labelSelector: %v, fieldSelector: %v, reason: %v", resource, listOptions.LabelSelector, listOptions.FieldSelector, err)
|
||||
}
|
||||
|
||||
resourceList = append(resourceList, result.Items...)
|
||||
@@ -337,14 +378,23 @@ func (k8sHandler *K8sResourceHandler) pullSingleResource(resource *schema.GroupV
|
||||
func ConvertMapListToMeta(resourceMap []map[string]interface{}) []workloadinterface.IMetadata {
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
for i := range resourceMap {
|
||||
if w := objectsenvelopes.NewObject(resourceMap[i]); w != nil {
|
||||
r := resourceMap[i]
|
||||
|
||||
// skip workloads with parents. e.g. Pod with a ReplicaSet ownerReference. This will not skip resources with CRDs asa parents
|
||||
if k8sinterface.IsTypeWorkload(r) {
|
||||
if k8sinterface.WorkloadHasParent(workloadinterface.NewWorkloadObj(r)) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if w := objectsenvelopes.NewObject(r); w != nil {
|
||||
workloads = append(workloads, w)
|
||||
}
|
||||
}
|
||||
return workloads
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) collectHostResources(ctx context.Context, allResources map[string]workloadinterface.IMetadata, ksResourceMap *cautils.KSResources) (map[string]apis.StatusInfo, error) {
|
||||
func (k8sHandler *K8sResourceHandler) collectHostResources(ctx context.Context, allResources map[string]workloadinterface.IMetadata, externalResourceMap cautils.ExternalResources) (map[string]apis.StatusInfo, error) {
|
||||
logger.L().Debug("Collecting host scanner resources")
|
||||
hostResources, infoMap, err := k8sHandler.hostSensorHandler.CollectResources(ctx)
|
||||
if err != nil {
|
||||
@@ -356,17 +406,17 @@ func (k8sHandler *K8sResourceHandler) collectHostResources(ctx context.Context,
|
||||
groupResource := k8sinterface.JoinResourceTriplets(group, version, hostResources[rscIdx].GetKind())
|
||||
allResources[hostResources[rscIdx].GetID()] = &hostResources[rscIdx]
|
||||
|
||||
grpResourceList, ok := (*ksResourceMap)[groupResource]
|
||||
grpResourceList, ok := externalResourceMap[groupResource]
|
||||
if !ok {
|
||||
grpResourceList = make([]string, 0)
|
||||
}
|
||||
(*ksResourceMap)[groupResource] = append(grpResourceList, hostResources[rscIdx].GetID())
|
||||
externalResourceMap[groupResource] = append(grpResourceList, hostResources[rscIdx].GetID())
|
||||
}
|
||||
return infoMap, nil
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) collectRbacResources(allResources map[string]workloadinterface.IMetadata) error {
|
||||
logger.L().Debug("Collecting rbac resources")
|
||||
logger.L().Start("Collecting RBAC resources")
|
||||
|
||||
if k8sHandler.rbacObjectsAPI == nil {
|
||||
return nil
|
||||
@@ -378,6 +428,9 @@ func (k8sHandler *K8sResourceHandler) collectRbacResources(allResources map[stri
|
||||
for k, v := range allRbacResources {
|
||||
allResources[k] = v
|
||||
}
|
||||
|
||||
logger.L().StopSuccess("Collected RBAC resources")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -51,10 +51,10 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func isEmptyImgVulns(ksResourcesMap cautils.KSResources) bool {
|
||||
imgVulnResources := cautils.MapImageVulnResources(&ksResourcesMap)
|
||||
func isEmptyImgVulns(externalResourcesMap cautils.ExternalResources) bool {
|
||||
imgVulnResources := cautils.MapImageVulnResources(externalResourcesMap)
|
||||
for _, resource := range imgVulnResources {
|
||||
if val, ok := ksResourcesMap[resource]; ok {
|
||||
if val, ok := externalResourcesMap[resource]; ok {
|
||||
if len(val) > 0 {
|
||||
return false
|
||||
}
|
||||
@@ -63,50 +63,20 @@ func isEmptyImgVulns(ksResourcesMap cautils.KSResources) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func setK8sResourceMap(frameworks []reporthandling.Framework) *cautils.K8SResources {
|
||||
k8sResources := make(cautils.K8SResources)
|
||||
complexMap := setComplexK8sResourceMap(frameworks)
|
||||
for group := range complexMap {
|
||||
for version := range complexMap[group] {
|
||||
for resource := range complexMap[group][version] {
|
||||
groupResources := k8sinterface.ResourceGroupToString(group, version, resource)
|
||||
for _, groupResource := range groupResources {
|
||||
k8sResources[groupResource] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &k8sResources
|
||||
}
|
||||
|
||||
func setKSResourceMap(frameworks []reporthandling.Framework, resourceToControl map[string][]string) *cautils.KSResources {
|
||||
ksResources := make(cautils.KSResources)
|
||||
func setKSResourceMap(frameworks []reporthandling.Framework, resourceToControl map[string][]string) cautils.ExternalResources {
|
||||
externalResources := make(cautils.ExternalResources)
|
||||
complexMap := setComplexKSResourceMap(frameworks, resourceToControl)
|
||||
for group := range complexMap {
|
||||
for version := range complexMap[group] {
|
||||
for resource := range complexMap[group][version] {
|
||||
groupResources := k8sinterface.ResourceGroupToString(group, version, resource)
|
||||
for _, groupResource := range groupResources {
|
||||
ksResources[groupResource] = nil
|
||||
externalResources[groupResource] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &ksResources
|
||||
}
|
||||
|
||||
func setComplexK8sResourceMap(frameworks []reporthandling.Framework) map[string]map[string]map[string]interface{} {
|
||||
k8sResources := make(map[string]map[string]map[string]interface{})
|
||||
for _, framework := range frameworks {
|
||||
for _, control := range framework.Controls {
|
||||
for _, rule := range control.Rules {
|
||||
for _, match := range rule.Match {
|
||||
insertResources(k8sResources, match)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return k8sResources
|
||||
return externalResources
|
||||
}
|
||||
|
||||
// [group][versionn][resource]
|
||||
@@ -152,24 +122,6 @@ func insertControls(resource string, resourceToControl map[string][]string, cont
|
||||
}
|
||||
}
|
||||
|
||||
func insertResources(k8sResources map[string]map[string]map[string]interface{}, match reporthandling.RuleMatchObjects) {
|
||||
for _, apiGroup := range match.APIGroups {
|
||||
if v, ok := k8sResources[apiGroup]; !ok || v == nil {
|
||||
k8sResources[apiGroup] = make(map[string]map[string]interface{})
|
||||
}
|
||||
for _, apiVersions := range match.APIVersions {
|
||||
if v, ok := k8sResources[apiGroup][apiVersions]; !ok || v == nil {
|
||||
k8sResources[apiGroup][apiVersions] = make(map[string]interface{})
|
||||
}
|
||||
for _, resource := range match.Resources {
|
||||
if _, ok := k8sResources[apiGroup][apiVersions][resource]; !ok {
|
||||
k8sResources[apiGroup][apiVersions][resource] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func insertKSResourcesAndControls(k8sResources map[string]map[string]map[string]interface{}, match reporthandling.RuleMatchObjects, resourceToControl map[string][]string, control reporthandling.Control) {
|
||||
for _, apiGroup := range match.APIGroups {
|
||||
if v, ok := k8sResources[apiGroup]; !ok || v == nil {
|
||||
@@ -200,3 +152,14 @@ func getGroupNVersion(apiVersion string) (string, string) {
|
||||
}
|
||||
return group, version
|
||||
}
|
||||
|
||||
func getFieldSelectorFromScanInfo(scanInfo *cautils.ScanInfo) IFieldSelector {
|
||||
if scanInfo.IncludeNamespaces != "" {
|
||||
return NewIncludeSelector(scanInfo.IncludeNamespaces)
|
||||
}
|
||||
if scanInfo.ExcludedNamespaces != "" {
|
||||
return NewExcludeSelector(scanInfo.ExcludedNamespaces)
|
||||
}
|
||||
|
||||
return &EmptySelector{}
|
||||
}
|
||||
|
||||
@@ -1,96 +1,21 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetK8sResources(t *testing.T) {
|
||||
// getK8sResources
|
||||
}
|
||||
func TestSetResourceMap(t *testing.T) {
|
||||
k8sinterface.InitializeMapResourcesMock()
|
||||
framework := reporthandling.MockFrameworkA()
|
||||
k8sResources := setK8sResourceMap([]reporthandling.Framework{*framework})
|
||||
resources := k8sinterface.ResourceGroupToString("*", "v1", "Pod")
|
||||
if len(resources) == 0 {
|
||||
t.Error("expected resources")
|
||||
}
|
||||
_, ok := (*k8sResources)[resources[0]]
|
||||
if !ok {
|
||||
t.Errorf("missing: 'apps'. k8sResources: %v", k8sResources)
|
||||
}
|
||||
|
||||
}
|
||||
func TestSsEmptyImgVulns(t *testing.T) {
|
||||
ksResourcesMap := make(cautils.KSResources, 0)
|
||||
ksResourcesMap["container.googleapis.com/v1"] = []string{"fsdfds"}
|
||||
assert.Equal(t, true, isEmptyImgVulns(ksResourcesMap))
|
||||
externalResourcesMap := make(cautils.ExternalResources, 0)
|
||||
externalResourcesMap["container.googleapis.com/v1"] = []string{"fsdfds"}
|
||||
assert.Equal(t, true, isEmptyImgVulns(externalResourcesMap))
|
||||
|
||||
ksResourcesMap["armo.vuln.images/v1/ImageVulnerabilities"] = []string{"dada"}
|
||||
assert.Equal(t, false, isEmptyImgVulns(ksResourcesMap))
|
||||
externalResourcesMap["armo.vuln.images/v1/ImageVulnerabilities"] = []string{"dada"}
|
||||
assert.Equal(t, false, isEmptyImgVulns(externalResourcesMap))
|
||||
|
||||
ksResourcesMap["armo.vuln.images/v1/ImageVulnerabilities"] = []string{}
|
||||
ksResourcesMap["bla"] = []string{"blu"}
|
||||
assert.Equal(t, true, isEmptyImgVulns(ksResourcesMap))
|
||||
}
|
||||
|
||||
func TestInsertK8sResources(t *testing.T) {
|
||||
// insertK8sResources
|
||||
k8sResources := make(map[string]map[string]map[string]interface{})
|
||||
match1 := reporthandling.RuleMatchObjects{
|
||||
APIGroups: []string{"apps"},
|
||||
APIVersions: []string{"v1", "v1beta"},
|
||||
Resources: []string{"pods"},
|
||||
}
|
||||
match2 := reporthandling.RuleMatchObjects{
|
||||
APIGroups: []string{"apps"},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"deployments"},
|
||||
}
|
||||
match3 := reporthandling.RuleMatchObjects{
|
||||
APIGroups: []string{"core"},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"secrets"},
|
||||
}
|
||||
insertResources(k8sResources, match1)
|
||||
insertResources(k8sResources, match2)
|
||||
insertResources(k8sResources, match3)
|
||||
|
||||
apiGroup1, ok := k8sResources["apps"]
|
||||
if !ok {
|
||||
t.Errorf("missing: 'apps'. k8sResources: %v", k8sResources)
|
||||
return
|
||||
}
|
||||
apiVersion1, ok := apiGroup1["v1"]
|
||||
if !ok {
|
||||
t.Errorf("missing: 'v1'. k8sResources: %v", k8sResources)
|
||||
return
|
||||
}
|
||||
_, ok = apiVersion1["pods"]
|
||||
if !ok {
|
||||
t.Errorf("missing: 'pods'. k8sResources: %v", k8sResources)
|
||||
}
|
||||
_, ok = apiVersion1["deployments"]
|
||||
if !ok {
|
||||
t.Errorf("missing: 'deployments'. k8sResources: %v", k8sResources)
|
||||
}
|
||||
apiVersion2, ok := apiGroup1["v1beta"]
|
||||
if !ok {
|
||||
t.Errorf("missing: 'v1beta'. k8sResources: %v", k8sResources)
|
||||
return
|
||||
}
|
||||
_, ok = apiVersion2["pods"]
|
||||
if !ok {
|
||||
t.Errorf("missing: 'pods'. k8sResources: %v", k8sResources)
|
||||
}
|
||||
_, ok = k8sResources["core"]
|
||||
if !ok {
|
||||
t.Errorf("missing: 'core'. k8sResources: %v", k8sResources)
|
||||
return
|
||||
}
|
||||
externalResourcesMap["armo.vuln.images/v1/ImageVulnerabilities"] = []string{}
|
||||
externalResourcesMap["bla"] = []string{"blu"}
|
||||
assert.Equal(t, true, isEmptyImgVulns(externalResourcesMap))
|
||||
}
|
||||
|
||||
56
core/pkg/resourcehandler/queryableresource.go
Normal file
56
core/pkg/resourcehandler/queryableresource.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
)
|
||||
|
||||
type QueryableResources map[string]QueryableResource
|
||||
|
||||
// QueryableResource is a struct that holds a representation of a resource we would like to query (from the K8S API, or from other sources)
|
||||
type QueryableResource struct {
|
||||
// <api group/api version/resource>
|
||||
GroupVersionResourceTriplet string
|
||||
// metadata.name==<resource name>, metadata.namespace==<resource namespace> etc.
|
||||
FieldSelectors string
|
||||
}
|
||||
|
||||
func (qr *QueryableResource) String() string {
|
||||
if qr.FieldSelectors == "" {
|
||||
return qr.GroupVersionResourceTriplet
|
||||
}
|
||||
return strings.Join([]string{qr.GroupVersionResourceTriplet, qr.FieldSelectors}, "/")
|
||||
}
|
||||
|
||||
func (qr *QueryableResource) Copy() QueryableResource {
|
||||
return QueryableResource{
|
||||
GroupVersionResourceTriplet: qr.GroupVersionResourceTriplet,
|
||||
FieldSelectors: qr.FieldSelectors,
|
||||
}
|
||||
}
|
||||
|
||||
func (qr *QueryableResource) AddFieldSelector(fieldSelector string) {
|
||||
if fieldSelector == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if qr.FieldSelectors == "" {
|
||||
qr.FieldSelectors = fieldSelector
|
||||
return
|
||||
}
|
||||
|
||||
qr.FieldSelectors = combineFieldSelectors(qr.FieldSelectors, fieldSelector)
|
||||
}
|
||||
|
||||
func (qrm QueryableResources) ToK8sResourceMap() cautils.K8SResources {
|
||||
resources := make(cautils.K8SResources)
|
||||
for _, qr := range qrm {
|
||||
resources[qr.GroupVersionResourceTriplet] = nil
|
||||
}
|
||||
return resources
|
||||
}
|
||||
|
||||
func (qrm QueryableResources) Add(qr QueryableResource) {
|
||||
qrm[qr.String()] = qr
|
||||
}
|
||||
126
core/pkg/resourcehandler/queryableresource_test.go
Normal file
126
core/pkg/resourcehandler/queryableresource_test.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
)
|
||||
|
||||
func TestString(t *testing.T) {
|
||||
tt := []struct {
|
||||
name string
|
||||
input QueryableResource
|
||||
output string
|
||||
}{
|
||||
{
|
||||
name: "Empty field selectors",
|
||||
input: QueryableResource{GroupVersionResourceTriplet: "/v1/pods", FieldSelectors: ""},
|
||||
output: "/v1/pods",
|
||||
},
|
||||
{
|
||||
name: "Non-empty field selectors",
|
||||
input: QueryableResource{GroupVersionResourceTriplet: "/v1/pods", FieldSelectors: "fs1"},
|
||||
output: "/v1/pods/fs1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := tc.input.String()
|
||||
if result != tc.output {
|
||||
t.Errorf("Expected: %s, got: %s", tc.output, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
rsrc := &QueryableResource{GroupVersionResourceTriplet: "gvr1", FieldSelectors: "fs1"}
|
||||
copy := rsrc.Copy()
|
||||
|
||||
if copy != *rsrc {
|
||||
t.Errorf("Expected: %v, got: %v", *rsrc, copy)
|
||||
}
|
||||
|
||||
if fmt.Sprintf("%p", rsrc) == fmt.Sprintf("%p", ©) {
|
||||
t.Errorf("pointers of original object and copy should not be same. object: %p, copy: %p", rsrc, ©)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddFieldSelector(t *testing.T) {
|
||||
tt := []struct {
|
||||
name string
|
||||
initial QueryableResource
|
||||
fieldSelector string
|
||||
expected QueryableResource
|
||||
}{
|
||||
{
|
||||
name: "Add to empty FieldSelectors",
|
||||
initial: QueryableResource{GroupVersionResourceTriplet: "gvr1", FieldSelectors: ""},
|
||||
fieldSelector: "fs1",
|
||||
expected: QueryableResource{GroupVersionResourceTriplet: "gvr1", FieldSelectors: "fs1"},
|
||||
},
|
||||
{
|
||||
name: "Add to non-empty FieldSelectors",
|
||||
initial: QueryableResource{GroupVersionResourceTriplet: "gvr1", FieldSelectors: "fs1"},
|
||||
fieldSelector: "fs2",
|
||||
expected: QueryableResource{GroupVersionResourceTriplet: "gvr1", FieldSelectors: "fs1,fs2"},
|
||||
},
|
||||
{
|
||||
name: "Add empty FieldSelector to non-empty FieldSelectors",
|
||||
initial: QueryableResource{GroupVersionResourceTriplet: "gvr1", FieldSelectors: "fs1"},
|
||||
fieldSelector: "",
|
||||
expected: QueryableResource{GroupVersionResourceTriplet: "gvr1", FieldSelectors: "fs1"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tc.initial.AddFieldSelector(tc.fieldSelector)
|
||||
|
||||
if tc.initial != tc.expected {
|
||||
t.Errorf("Expected: %v, got: %v", tc.expected, tc.initial)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToK8sResourceMap(t *testing.T) {
|
||||
qrm := make(QueryableResources)
|
||||
qrm.Add(QueryableResource{GroupVersionResourceTriplet: "/v1/pods", FieldSelectors: "metadata.namespace=kube-system"})
|
||||
qrm.Add(QueryableResource{GroupVersionResourceTriplet: "/v1/pods", FieldSelectors: "metadata.namespace=default"})
|
||||
qrm.Add(QueryableResource{GroupVersionResourceTriplet: "/v1/nodes", FieldSelectors: ""})
|
||||
qrm.Add(QueryableResource{GroupVersionResourceTriplet: "batch/v1/jobs", FieldSelectors: ""})
|
||||
|
||||
expectedResult := cautils.K8SResources{
|
||||
"/v1/pods": nil,
|
||||
"/v1/nodes": nil,
|
||||
"batch/v1/jobs": nil,
|
||||
}
|
||||
|
||||
result := qrm.ToK8sResourceMap()
|
||||
|
||||
if len(result) != len(expectedResult) {
|
||||
t.Fatalf("Expected: %v, got: %v", expectedResult, result)
|
||||
}
|
||||
|
||||
for k, v := range result {
|
||||
if _, ok := expectedResult[k]; !ok || v != nil {
|
||||
t.Fatalf("Expected: %v, got: %v", expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
qrMap := make(QueryableResources)
|
||||
qr := QueryableResource{GroupVersionResourceTriplet: "/v1/pods", FieldSelectors: "metadata.namespace=default"}
|
||||
qrMap.Add(qr)
|
||||
|
||||
if resource, ok := qrMap["/v1/pods/metadata.namespace=default"]; !ok {
|
||||
t.Fatalf("Expected resource was not added to the map")
|
||||
} else if !reflect.DeepEqual(resource, qr) {
|
||||
t.Fatalf("Expected: %v, got: %v", qr, resource)
|
||||
}
|
||||
}
|
||||
@@ -40,7 +40,7 @@ func NewRegistryAdaptors() (*RegistryAdaptors, error) {
|
||||
return registryAdaptors, nil
|
||||
}
|
||||
|
||||
func (registryAdaptors *RegistryAdaptors) collectImagesVulnerabilities(k8sResourcesMap *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, ksResourceMap *cautils.KSResources) error {
|
||||
func (registryAdaptors *RegistryAdaptors) collectImagesVulnerabilities(k8sResourcesMap cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, externalResourceMap cautils.ExternalResources) error {
|
||||
logger.L().Debug("Collecting images vulnerabilities")
|
||||
|
||||
if len(registryAdaptors.adaptors) == 0 {
|
||||
@@ -80,7 +80,7 @@ func (registryAdaptors *RegistryAdaptors) collectImagesVulnerabilities(k8sResour
|
||||
for i := range metaObjs {
|
||||
allResources[metaObjs[i].GetID()] = metaObjs[i]
|
||||
}
|
||||
(*ksResourceMap)[k8sinterface.JoinResourceTriplets(ImagevulnerabilitiesObjectGroup, ImagevulnerabilitiesObjectVersion, ImagevulnerabilitiesObjectKind)] = workloadinterface.ListMetaIDs(metaObjs)
|
||||
externalResourceMap[k8sinterface.JoinResourceTriplets(ImagevulnerabilitiesObjectGroup, ImagevulnerabilitiesObjectVersion, ImagevulnerabilitiesObjectKind)] = workloadinterface.ListMetaIDs(metaObjs)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -106,9 +106,9 @@ func vulnerabilityToIMetadata(imageTag string, vulnerabilities []registryvulnera
|
||||
}
|
||||
|
||||
// list all images tags
|
||||
func listImagesTags(k8sResourcesMap *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata) []string {
|
||||
func listImagesTags(k8sResourcesMap cautils.K8SResources, allResources map[string]workloadinterface.IMetadata) []string {
|
||||
images := []string{}
|
||||
for _, resources := range *k8sResourcesMap {
|
||||
for _, resources := range k8sResourcesMap {
|
||||
for j := range resources {
|
||||
if resource, ok := allResources[resources[j]]; ok {
|
||||
if resource.GetObjectType() == workloadinterface.TypeWorkloadObject {
|
||||
|
||||
151
core/pkg/resourcehandler/resourcehandlerutils.go
Normal file
151
core/pkg/resourcehandler/resourcehandlerutils.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// utils which are common to all resource handlers
|
||||
func addSingleResourceToResourceMaps(k8sResources cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, wl workloadinterface.IWorkload) {
|
||||
if wl == nil {
|
||||
return
|
||||
}
|
||||
// if k8sinterface.WorkloadHasParent(wl) {
|
||||
// return
|
||||
// }
|
||||
|
||||
allResources[wl.GetID()] = wl
|
||||
|
||||
resourceGroup := k8sinterface.ResourceGroupToSlice(wl.GetGroup(), wl.GetVersion(), wl.GetKind())[0]
|
||||
k8sResources[resourceGroup] = append(k8sResources[resourceGroup], wl.GetID())
|
||||
}
|
||||
|
||||
func getQueryableResourceMapFromPolicies(frameworks []reporthandling.Framework, resource workloadinterface.IWorkload) (QueryableResources, map[string]bool) {
|
||||
queryableResources := make(QueryableResources)
|
||||
excludedRulesMap := make(map[string]bool)
|
||||
namespace := getScannedResourceNamespace(resource)
|
||||
|
||||
for _, framework := range frameworks {
|
||||
for _, control := range framework.Controls {
|
||||
for _, rule := range control.Rules {
|
||||
var resourcesFilterMap map[string]bool = nil
|
||||
// for single resource scan, we need to filter the rules and which resources to query according to the given resource
|
||||
if resource != nil {
|
||||
if resourcesFilterMap = filterRuleMatchesForResource(resource.GetKind(), rule.Match); resourcesFilterMap == nil {
|
||||
// rule does not apply to this resource
|
||||
excludedRulesMap[rule.Name] = false
|
||||
continue
|
||||
}
|
||||
}
|
||||
for _, match := range rule.Match {
|
||||
updateQueryableResourcesMapFromRuleMatchObject(&match, resourcesFilterMap, queryableResources, namespace)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return queryableResources, excludedRulesMap
|
||||
}
|
||||
|
||||
// getScannedResourceNamespace returns the namespace of the scanned resource.
|
||||
// If input is nil (e.g. cluster scan), returns an empty string
|
||||
// If the resource is a namespaced or the Namespace itself, returns the namespace name
|
||||
// In all other cases, returns an empty string
|
||||
func getScannedResourceNamespace(workload workloadinterface.IWorkload) string {
|
||||
if workload == nil {
|
||||
return ""
|
||||
}
|
||||
if workload.GetKind() == "Namespace" {
|
||||
return workload.GetName()
|
||||
}
|
||||
|
||||
if k8sinterface.IsResourceInNamespaceScope(workload.GetKind()) {
|
||||
return workload.GetNamespace()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// filterRuleMatchesForResource returns a map, of which resources should be queried for a given resource
|
||||
// The map is of the form: map[<resource>]bool (The bool value indicates whether the resource should be queried or not)
|
||||
// The function will return a nil map if the rule does not apply to the given workload
|
||||
func filterRuleMatchesForResource(resourceKind string, matchObjects []reporthandling.RuleMatchObjects) map[string]bool {
|
||||
resourceMap := make(map[string]bool)
|
||||
for _, match := range matchObjects {
|
||||
for _, resource := range match.Resources {
|
||||
resourceMap[resource] = false
|
||||
}
|
||||
}
|
||||
|
||||
// rule does not apply to this workload
|
||||
if _, exists := resourceMap[resourceKind]; !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
workloadKinds := map[string]bool{
|
||||
"Pod": false,
|
||||
"DaemonSet": false,
|
||||
"Deployment": false,
|
||||
"ReplicaSet": false,
|
||||
"StatefulSet": false,
|
||||
"CronJob": false,
|
||||
"Job": false,
|
||||
}
|
||||
|
||||
_, isInputResourceWorkload := workloadKinds[resourceKind]
|
||||
|
||||
for r := range resourceMap {
|
||||
// we don't need to query the same resource
|
||||
if r == resourceKind {
|
||||
continue
|
||||
}
|
||||
|
||||
_, isCurrentResourceWorkload := workloadKinds[r]
|
||||
resourceMap[r] = !isCurrentResourceWorkload || !isInputResourceWorkload
|
||||
}
|
||||
|
||||
return resourceMap
|
||||
}
|
||||
|
||||
// updateQueryableResourcesMapFromMatch updates the queryableResources map with the relevant resources from the match object.
|
||||
// if namespace is not empty, the namespace filter is added to the queryable resources (which are namespaced)
|
||||
// if resourcesFilterMap is not nil, only the resources with value 'true' will be added to the queryable resources
|
||||
func updateQueryableResourcesMapFromRuleMatchObject(match *reporthandling.RuleMatchObjects, resourcesFilterMap map[string]bool, queryableResources QueryableResources, namespace string) {
|
||||
for _, apiGroup := range match.APIGroups {
|
||||
for _, apiVersions := range match.APIVersions {
|
||||
for _, resource := range match.Resources {
|
||||
if resourcesFilterMap != nil {
|
||||
if relevant := resourcesFilterMap[resource]; !relevant {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
groupResources := k8sinterface.ResourceGroupToString(apiGroup, apiVersions, resource)
|
||||
// if namespace filter is set, we are scanning a workload in a specific namespace
|
||||
// calling the getNamespacesSelector will add the namespace field selector (or name for Namespace resource)
|
||||
globalFieldSelector := getNamespacesSelector(resource, namespace, "=")
|
||||
|
||||
for _, groupResource := range groupResources {
|
||||
queryableResource := QueryableResource{
|
||||
GroupVersionResourceTriplet: groupResource,
|
||||
}
|
||||
queryableResource.AddFieldSelector(globalFieldSelector)
|
||||
|
||||
if match.FieldSelector == nil || len(match.FieldSelector) == 0 {
|
||||
queryableResources.Add(queryableResource)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, fieldSelector := range match.FieldSelector {
|
||||
qrCopy := queryableResource.Copy()
|
||||
qrCopy.AddFieldSelector(fieldSelector)
|
||||
queryableResources.Add(qrCopy)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
499
core/pkg/resourcehandler/resourcehandlerutils_test.go
Normal file
499
core/pkg/resourcehandler/resourcehandlerutils_test.go
Normal file
@@ -0,0 +1,499 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func mockMatch(i int) reporthandling.RuleMatchObjects {
|
||||
switch i {
|
||||
case 1:
|
||||
return reporthandling.RuleMatchObjects{
|
||||
APIGroups: []string{"apps"},
|
||||
APIVersions: []string{"v1", "v1beta"},
|
||||
Resources: []string{"Pod"},
|
||||
}
|
||||
case 2:
|
||||
return reporthandling.RuleMatchObjects{
|
||||
APIGroups: []string{"apps"},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"Deployment", "ReplicaSet"},
|
||||
}
|
||||
case 3:
|
||||
return reporthandling.RuleMatchObjects{
|
||||
APIGroups: []string{"core"},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"Secret"},
|
||||
}
|
||||
case 4:
|
||||
return reporthandling.RuleMatchObjects{
|
||||
APIGroups: []string{"core"},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"Secret"},
|
||||
FieldSelector: []string{"metadata.name=secret1", "metadata.name=secret2,metadata.namespace=default"},
|
||||
}
|
||||
case 5:
|
||||
return reporthandling.RuleMatchObjects{
|
||||
APIGroups: []string{"rbac.authorization.k8s.io"},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"ClusterRoleBinding", "RoleBinding"},
|
||||
FieldSelector: []string{"metadata.name=test123"},
|
||||
}
|
||||
case 6:
|
||||
return reporthandling.RuleMatchObjects{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"Namespace"},
|
||||
FieldSelector: []string{},
|
||||
}
|
||||
case 7:
|
||||
return reporthandling.RuleMatchObjects{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"Node"},
|
||||
FieldSelector: []string{},
|
||||
}
|
||||
|
||||
default:
|
||||
panic("invalid index")
|
||||
}
|
||||
}
|
||||
|
||||
func mockRule(ruleName string, matches []reporthandling.RuleMatchObjects, ruleRego string) reporthandling.PolicyRule {
|
||||
rule := reporthandling.PolicyRule{
|
||||
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-bbbb-cccc-dddd-000000000001", ruleName, nil),
|
||||
RuleLanguage: reporthandling.RegoLanguage,
|
||||
Match: matches,
|
||||
RuleDependencies: []reporthandling.RuleDependency{
|
||||
{
|
||||
PackageName: "kubernetes.api.client",
|
||||
},
|
||||
},
|
||||
}
|
||||
if ruleRego != "" {
|
||||
rule.Rule = ruleRego
|
||||
} else {
|
||||
rule.Rule = reporthandling.MockRegoPrivilegedPods()
|
||||
}
|
||||
return rule
|
||||
}
|
||||
|
||||
func mockControl(controlName string, rules []reporthandling.PolicyRule) reporthandling.Control {
|
||||
return reporthandling.Control{
|
||||
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-bbbb-cccc-dddd-000000000001", controlName, nil),
|
||||
Rules: rules,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func mockFramework(frameworkName string, controls []reporthandling.Control) *reporthandling.Framework {
|
||||
return &reporthandling.Framework{
|
||||
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-bbbb-cccc-dddd-000000000001", frameworkName, nil),
|
||||
CreationTime: "",
|
||||
Description: "mock framework description",
|
||||
Controls: controls,
|
||||
}
|
||||
}
|
||||
|
||||
func mockWorkload(apiVersion, kind, namespace, name string) workloadinterface.IWorkload {
|
||||
mock := workloadinterface.NewWorkloadMock(nil)
|
||||
mock.SetKind(kind)
|
||||
mock.SetApiVersion(apiVersion)
|
||||
mock.SetName(name)
|
||||
mock.SetNamespace(namespace)
|
||||
|
||||
if ok := k8sinterface.IsTypeWorkload(mock.GetObject()); !ok {
|
||||
panic("mocked object is not a valid workload")
|
||||
}
|
||||
|
||||
return mock
|
||||
}
|
||||
|
||||
func TestGetQueryableResourceMapFromPolicies(t *testing.T) {
|
||||
k8sinterface.InitializeMapResourcesMock()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
workload workloadinterface.IWorkload
|
||||
controls []reporthandling.Control
|
||||
expectedResourceGroups []string
|
||||
expectedExcludedRules []string
|
||||
}{
|
||||
{
|
||||
name: "no workload - all resources groups are queryable",
|
||||
workload: nil,
|
||||
controls: []reporthandling.Control{
|
||||
mockControl("1", []reporthandling.PolicyRule{
|
||||
mockRule("rule-a", []reporthandling.RuleMatchObjects{
|
||||
mockMatch(1), mockMatch(2), mockMatch(3), mockMatch(4),
|
||||
}, ""),
|
||||
mockRule("rule-b", []reporthandling.RuleMatchObjects{
|
||||
mockMatch(6),
|
||||
}, ""),
|
||||
}),
|
||||
},
|
||||
expectedExcludedRules: []string{},
|
||||
expectedResourceGroups: []string{
|
||||
"/v1/namespaces",
|
||||
"apps/v1/deployments",
|
||||
"apps/v1/pods",
|
||||
"apps/v1/replicasets",
|
||||
"apps/v1beta/pods",
|
||||
"core/v1/secrets",
|
||||
"core/v1/secrets/metadata.name=secret1",
|
||||
"core/v1/secrets/metadata.name=secret2,metadata.namespace=default",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "workload - Namespace",
|
||||
workload: mockWorkload("v1", "Namespace", "", "ns1"),
|
||||
controls: []reporthandling.Control{
|
||||
mockControl("1", []reporthandling.PolicyRule{
|
||||
mockRule("rule-a", []reporthandling.RuleMatchObjects{
|
||||
mockMatch(1), mockMatch(2), mockMatch(3), mockMatch(4),
|
||||
}, ""),
|
||||
mockRule("rule-b", []reporthandling.RuleMatchObjects{
|
||||
mockMatch(6), mockMatch(3), mockMatch(2), mockMatch(7),
|
||||
}, ""),
|
||||
}),
|
||||
},
|
||||
expectedExcludedRules: []string{
|
||||
"rule-a",
|
||||
},
|
||||
expectedResourceGroups: []string{
|
||||
"/v1/nodes",
|
||||
"core/v1/secrets/metadata.namespace=ns1",
|
||||
"apps/v1/deployments/metadata.namespace=ns1",
|
||||
"apps/v1/replicasets/metadata.namespace=ns1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "workload - Deployment",
|
||||
workload: mockWorkload("apps/v1", "Deployment", "ns1", "deploy1"),
|
||||
controls: []reporthandling.Control{
|
||||
mockControl("1", []reporthandling.PolicyRule{
|
||||
mockRule("rule-b", []reporthandling.RuleMatchObjects{
|
||||
mockMatch(6), mockMatch(3), mockMatch(2), mockMatch(7),
|
||||
}, ""),
|
||||
}),
|
||||
},
|
||||
expectedExcludedRules: []string{},
|
||||
expectedResourceGroups: []string{
|
||||
"core/v1/secrets/metadata.namespace=ns1",
|
||||
"/v1/namespaces/metadata.name=ns1",
|
||||
"/v1/nodes",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "workload - Node",
|
||||
workload: mockWorkload("v1", "Node", "", "node1"),
|
||||
controls: []reporthandling.Control{
|
||||
mockControl("1", []reporthandling.PolicyRule{
|
||||
mockRule("rule-b", []reporthandling.RuleMatchObjects{
|
||||
mockMatch(6), mockMatch(3), mockMatch(2), mockMatch(7),
|
||||
}, ""),
|
||||
}),
|
||||
},
|
||||
expectedExcludedRules: []string{},
|
||||
expectedResourceGroups: []string{
|
||||
"core/v1/secrets",
|
||||
"/v1/namespaces",
|
||||
"apps/v1/deployments",
|
||||
"apps/v1/replicasets",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
resourceGroups, excludedRulesMap := getQueryableResourceMapFromPolicies([]reporthandling.Framework{*mockFramework("test", testCase.controls)}, testCase.workload) // TODO check second param
|
||||
assert.Equalf(t, len(testCase.expectedExcludedRules), len(excludedRulesMap), "excludedRulesMap length is not as expected")
|
||||
for _, expectedExcludedRuleName := range testCase.expectedExcludedRules {
|
||||
assert.Contains(t, excludedRulesMap, expectedExcludedRuleName, "excludedRulesMap does not contain expected rule name")
|
||||
}
|
||||
|
||||
assert.Equalf(t, len(testCase.expectedResourceGroups), len(resourceGroups), "queryableResourceMap length is not as expected")
|
||||
for _, expected := range testCase.expectedResourceGroups {
|
||||
assert.Contains(t, resourceGroups, expected, "queryableResourceMap does not contain expected resource group")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateQueryableResourcesMapFromRuleMatchObject(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
matches []reporthandling.RuleMatchObjects
|
||||
resourcesFilterMap map[string]bool
|
||||
namespace string
|
||||
expectedQueryableResourceGroups []string
|
||||
expectedK8SResourceGroups []string
|
||||
}{
|
||||
{
|
||||
name: "filter map is nil - query all",
|
||||
matches: []reporthandling.RuleMatchObjects{
|
||||
mockMatch(1), mockMatch(2), mockMatch(3), mockMatch(4),
|
||||
},
|
||||
resourcesFilterMap: nil,
|
||||
namespace: "",
|
||||
expectedQueryableResourceGroups: []string{
|
||||
"apps/v1/pods",
|
||||
"apps/v1beta/pods",
|
||||
"apps/v1/deployments",
|
||||
"apps/v1/replicasets",
|
||||
"core/v1/secrets",
|
||||
"core/v1/secrets/metadata.name=secret1",
|
||||
"core/v1/secrets/metadata.name=secret2,metadata.namespace=default",
|
||||
},
|
||||
expectedK8SResourceGroups: []string{
|
||||
"apps/v1/pods",
|
||||
"apps/v1beta/pods",
|
||||
"apps/v1/deployments",
|
||||
"apps/v1/replicasets",
|
||||
"core/v1/secrets",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter map not nil - query only secrets and pods",
|
||||
matches: []reporthandling.RuleMatchObjects{
|
||||
mockMatch(1), mockMatch(2), mockMatch(3), mockMatch(4),
|
||||
},
|
||||
namespace: "",
|
||||
resourcesFilterMap: map[string]bool{
|
||||
"Secret": true,
|
||||
"Pod": true,
|
||||
"ReplicaSet": false,
|
||||
"Deployment": false,
|
||||
},
|
||||
expectedQueryableResourceGroups: []string{
|
||||
"apps/v1/pods",
|
||||
"apps/v1beta/pods",
|
||||
"core/v1/secrets",
|
||||
"core/v1/secrets/metadata.name=secret1",
|
||||
"core/v1/secrets/metadata.name=secret2,metadata.namespace=default",
|
||||
},
|
||||
expectedK8SResourceGroups: []string{
|
||||
"apps/v1/pods",
|
||||
"apps/v1beta/pods",
|
||||
"core/v1/secrets",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "namespace field selector for namespaced resources",
|
||||
matches: []reporthandling.RuleMatchObjects{
|
||||
mockMatch(5),
|
||||
},
|
||||
namespace: "ns1",
|
||||
resourcesFilterMap: map[string]bool{
|
||||
"RoleBinding": true,
|
||||
"ClusterRoleBinding": true,
|
||||
},
|
||||
expectedQueryableResourceGroups: []string{
|
||||
|
||||
"rbac.authorization.k8s.io/v1/clusterrolebindings/metadata.name=test123",
|
||||
"rbac.authorization.k8s.io/v1/rolebindings/metadata.namespace=ns1,metadata.name=test123",
|
||||
},
|
||||
expectedK8SResourceGroups: []string{
|
||||
"rbac.authorization.k8s.io/v1/clusterrolebindings",
|
||||
"rbac.authorization.k8s.io/v1/rolebindings",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "name field selector for Namespace resource",
|
||||
matches: []reporthandling.RuleMatchObjects{
|
||||
mockMatch(2), mockMatch(6),
|
||||
},
|
||||
namespace: "ns1",
|
||||
resourcesFilterMap: map[string]bool{
|
||||
"Deployment": true,
|
||||
"ReplicaSet": false,
|
||||
"Namespace": true,
|
||||
},
|
||||
expectedQueryableResourceGroups: []string{
|
||||
"apps/v1/deployments/metadata.namespace=ns1",
|
||||
"/v1/namespaces/metadata.name=ns1",
|
||||
},
|
||||
expectedK8SResourceGroups: []string{
|
||||
"apps/v1/deployments",
|
||||
"/v1/namespaces",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
queryableResources := make(QueryableResources)
|
||||
for i := range testCase.matches {
|
||||
updateQueryableResourcesMapFromRuleMatchObject(&testCase.matches[i], testCase.resourcesFilterMap, queryableResources, testCase.namespace)
|
||||
}
|
||||
|
||||
assert.Equal(t, len(testCase.expectedQueryableResourceGroups), len(queryableResources))
|
||||
for _, resourceGroup := range testCase.expectedQueryableResourceGroups {
|
||||
assert.Contains(t, queryableResources, resourceGroup)
|
||||
}
|
||||
|
||||
k8sResources := queryableResources.ToK8sResourceMap()
|
||||
assert.Equal(t, len(testCase.expectedK8SResourceGroups), len(k8sResources))
|
||||
for _, resourceGroup := range testCase.expectedK8SResourceGroups {
|
||||
assert.Contains(t, k8sResources, resourceGroup)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterRuleMatchesForResource(t *testing.T) {
|
||||
testCases := []struct {
|
||||
resourceKind string
|
||||
matchResources []string
|
||||
expectedMap map[string]bool
|
||||
}{
|
||||
{
|
||||
resourceKind: "Pod",
|
||||
matchResources: []string{
|
||||
"Node", "Pod", "DaemonSet", "Deployment", "ReplicaSet", "StatefulSet", "CronJob", "Job", "PodSecurityPolicy",
|
||||
},
|
||||
expectedMap: map[string]bool{
|
||||
"Node": true,
|
||||
"PodSecurityPolicy": true,
|
||||
"Pod": false,
|
||||
"DaemonSet": false,
|
||||
"Deployment": false,
|
||||
"ReplicaSet": false,
|
||||
"StatefulSet": false,
|
||||
"CronJob": false,
|
||||
"Job": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
resourceKind: "Deployment",
|
||||
matchResources: []string{
|
||||
"Node", "Pod", "DaemonSet", "Deployment", "ReplicaSet", "StatefulSet", "CronJob", "Job", "PodSecurityPolicy",
|
||||
},
|
||||
expectedMap: map[string]bool{
|
||||
"Node": true,
|
||||
"PodSecurityPolicy": true,
|
||||
"Pod": false,
|
||||
"DaemonSet": false,
|
||||
"Deployment": false,
|
||||
"ReplicaSet": false,
|
||||
"StatefulSet": false,
|
||||
"CronJob": false,
|
||||
"Job": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
resourceKind: "Deployment",
|
||||
matchResources: []string{
|
||||
"Deployment", "ReplicaSet",
|
||||
},
|
||||
expectedMap: map[string]bool{
|
||||
"Deployment": false,
|
||||
"ReplicaSet": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
resourceKind: "ReplicaSet",
|
||||
matchResources: []string{
|
||||
"Node", "Pod", "DaemonSet", "Deployment", "ReplicaSet", "StatefulSet", "CronJob", "Job", "PodSecurityPolicy",
|
||||
},
|
||||
expectedMap: map[string]bool{
|
||||
"Node": true,
|
||||
"PodSecurityPolicy": true,
|
||||
"Pod": false,
|
||||
"DaemonSet": false,
|
||||
"Deployment": false,
|
||||
"ReplicaSet": false,
|
||||
"StatefulSet": false,
|
||||
"CronJob": false,
|
||||
"Job": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
resourceKind: "ClusterRole",
|
||||
matchResources: []string{
|
||||
"Node", "Pod", "DaemonSet", "Deployment", "ReplicaSet", "StatefulSet", "CronJob", "Job", "PodSecurityPolicy",
|
||||
},
|
||||
expectedMap: nil, // rule does not apply to workload
|
||||
},
|
||||
{
|
||||
resourceKind: "Node",
|
||||
matchResources: []string{
|
||||
"Node", "Pod", "DaemonSet", "Deployment", "ReplicaSet", "StatefulSet", "CronJob", "Job", "PodSecurityPolicy",
|
||||
},
|
||||
expectedMap: map[string]bool{
|
||||
"Node": false,
|
||||
"PodSecurityPolicy": true,
|
||||
"Pod": true,
|
||||
"DaemonSet": true,
|
||||
"Deployment": true,
|
||||
"ReplicaSet": true,
|
||||
"StatefulSet": true,
|
||||
"CronJob": true,
|
||||
"Job": true,
|
||||
},
|
||||
},
|
||||
{
|
||||
resourceKind: "Pod",
|
||||
matchResources: []string{
|
||||
"PodSecurityPolicy", "Pod",
|
||||
},
|
||||
expectedMap: map[string]bool{
|
||||
"PodSecurityPolicy": true,
|
||||
"Pod": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
resourceKind: "Pod",
|
||||
matchResources: []string{
|
||||
"PodSecurityPolicy", "Pod", "ReplicaSet",
|
||||
},
|
||||
expectedMap: map[string]bool{
|
||||
"PodSecurityPolicy": true,
|
||||
"Pod": false,
|
||||
"ReplicaSet": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
resourceKind: "Deployment",
|
||||
matchResources: []string{
|
||||
"PodSecurityPolicy", "Pod",
|
||||
},
|
||||
expectedMap: nil, // rule does not apply to workload
|
||||
},
|
||||
{
|
||||
resourceKind: "PodSecurityPolicy",
|
||||
matchResources: []string{
|
||||
"PodSecurityPolicy", "Pod",
|
||||
},
|
||||
expectedMap: map[string]bool{
|
||||
"PodSecurityPolicy": false,
|
||||
"Pod": true,
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
t.Run(fmt.Sprintf("%v", i), func(t *testing.T) {
|
||||
matches := []reporthandling.RuleMatchObjects{
|
||||
{
|
||||
Resources: testCase.matchResources,
|
||||
},
|
||||
}
|
||||
|
||||
result := filterRuleMatchesForResource(testCase.resourceKind, matches)
|
||||
if testCase.expectedMap == nil {
|
||||
assert.Nil(t, result, "expected nil (rule does not apply to the resource)")
|
||||
return
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedMap) {
|
||||
t.Errorf("expected %v, got %v", testCase.expectedMap, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,8 @@ const (
|
||||
)
|
||||
|
||||
type IPrinter interface {
|
||||
ActionPrint(ctx context.Context, opaSessionObj *cautils.OPASessionObj)
|
||||
PrintNextSteps()
|
||||
ActionPrint(ctx context.Context, opaSessionObj *cautils.OPASessionObj, imageScanData []cautils.ImageScanData)
|
||||
SetWriter(ctx context.Context, outputFile string)
|
||||
Score(score float32)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ const (
|
||||
jsonOutputExt = ".json"
|
||||
)
|
||||
|
||||
var _ printer.IPrinter = &JsonPrinter{}
|
||||
|
||||
type JsonPrinter struct {
|
||||
writer *os.File
|
||||
}
|
||||
@@ -40,7 +42,11 @@ func (jsonPrinter *JsonPrinter) Score(score float32) {
|
||||
fmt.Fprintf(os.Stderr, "\nOverall compliance-score (100- Excellent, 0- All failed): %d\n", cautils.Float32ToInt(score))
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) ActionPrint(ctx context.Context, opaSessionObj *cautils.OPASessionObj) {
|
||||
func (jsonPrinter *JsonPrinter) PrintNextSteps() {
|
||||
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) ActionPrint(ctx context.Context, opaSessionObj *cautils.OPASessionObj, _ []cautils.ImageScanData) {
|
||||
report := cautils.ReportV2ToV1(opaSessionObj)
|
||||
|
||||
var postureReportStr []byte
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/jwalton/gchalk"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/gotree"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
@@ -47,14 +47,12 @@ func (prettyPrinter *PrettyPrinter) createFailedControlList(node v1alpha1.IAttac
|
||||
func (prettyPrinter *PrettyPrinter) buildTreeFromAttackTrackStep(tree gotree.Tree, node v1alpha1.IAttackTrackStep) gotree.Tree {
|
||||
nodeName := node.GetName()
|
||||
if len(node.GetControls()) > 0 {
|
||||
red := color.New(color.Bold, color.FgRed).SprintFunc()
|
||||
nodeName = red(nodeName)
|
||||
nodeName = gchalk.WithRed().Bold(nodeName)
|
||||
}
|
||||
|
||||
controlText := prettyPrinter.createFailedControlList(node)
|
||||
if len(controlText) > 0 {
|
||||
controlStyle := color.New(color.FgWhite, color.Faint).SprintFunc()
|
||||
controlText = controlStyle(fmt.Sprintf(" (%s)", controlText))
|
||||
controlText = gchalk.WithWhite().Dim(fmt.Sprintf(" (%s)", controlText))
|
||||
}
|
||||
|
||||
subTree := gotree.New(nodeName + controlText)
|
||||
|
||||
@@ -3,8 +3,9 @@ package printer
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/jwalton/gchalk"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
@@ -37,10 +38,21 @@ func generateRow(controlSummary reportsummary.IControlSummary, infoToPrintInfo [
|
||||
row[columnCounterFailed] = fmt.Sprintf("%d", controlSummary.NumberOfResources().Failed())
|
||||
row[columnCounterAll] = fmt.Sprintf("%d", controlSummary.NumberOfResources().All())
|
||||
row[columnComplianceScore] = getComplianceScoreColumn(controlSummary, infoToPrintInfo)
|
||||
if row[columnComplianceScore] == "-1%" {
|
||||
row[columnComplianceScore] = "N/A"
|
||||
}
|
||||
|
||||
return row
|
||||
}
|
||||
|
||||
func shortFormatRow(dataRows [][]string) [][]string {
|
||||
rows := [][]string{}
|
||||
for _, dataRow := range dataRows {
|
||||
rows = append(rows, []string{fmt.Sprintf("Severity"+strings.Repeat(" ", 11)+": %+v\nControl Name"+strings.Repeat(" ", 7)+": %+v\nFailed Resources"+strings.Repeat(" ", 3)+": %+v\nAll Resources"+strings.Repeat(" ", 6)+": %+v\n%% Compliance-Score"+strings.Repeat(" ", 1)+": %+v", dataRow[columnSeverity], dataRow[columnName], dataRow[columnCounterFailed], dataRow[columnCounterAll], dataRow[columnComplianceScore])})
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func generateRowPdf(controlSummary reportsummary.IControlSummary, infoToPrintInfo []infoStars, verbose bool) []string {
|
||||
row := make([]string, _rowLen)
|
||||
|
||||
@@ -79,20 +91,21 @@ func getComplianceScoreColumn(controlSummary reportsummary.IControlSummary, info
|
||||
}
|
||||
|
||||
func getSeverityColumn(controlSummary reportsummary.IControlSummary) string {
|
||||
return color.New(getColor(apis.ControlSeverityToInt(controlSummary.GetScoreFactor())), color.Bold).SprintFunc()(apis.ControlSeverityToString(controlSummary.GetScoreFactor()))
|
||||
return getColor(apis.ControlSeverityToInt(controlSummary.GetScoreFactor()))(apis.ControlSeverityToString(controlSummary.GetScoreFactor()))
|
||||
}
|
||||
func getColor(controlSeverity int) color.Attribute {
|
||||
|
||||
func getColor(controlSeverity int) (func(...string) string) {
|
||||
switch controlSeverity {
|
||||
case apis.SeverityCritical:
|
||||
return color.FgRed
|
||||
return gchalk.WithAnsi256(1).Bold
|
||||
case apis.SeverityHigh:
|
||||
return color.FgYellow
|
||||
return gchalk.WithAnsi256(196).Bold
|
||||
case apis.SeverityMedium:
|
||||
return color.FgCyan
|
||||
return gchalk.WithAnsi256(166).Bold
|
||||
case apis.SeverityLow:
|
||||
return color.FgWhite
|
||||
return gchalk.WithAnsi256(220).Bold
|
||||
default:
|
||||
return color.FgWhite
|
||||
return gchalk.WithAnsi256(16).Bold
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,13 +137,19 @@ func getSortedControlsNames(controls reportsummary.ControlSummaries) [][]string
|
||||
}
|
||||
*/
|
||||
|
||||
func getControlTableHeaders() []string {
|
||||
headers := make([]string, _rowLen)
|
||||
headers[columnName] = "CONTROL NAME"
|
||||
headers[columnCounterFailed] = "FAILED RESOURCES"
|
||||
headers[columnCounterAll] = "ALL RESOURCES"
|
||||
headers[columnSeverity] = "SEVERITY"
|
||||
headers[columnComplianceScore] = "% COMPLIANCE-SCORE"
|
||||
func getControlTableHeaders(short bool) []string {
|
||||
var headers []string
|
||||
if short {
|
||||
headers = make([]string, 1)
|
||||
headers[0] = "CONTROLS"
|
||||
} else {
|
||||
headers = make([]string, _rowLen)
|
||||
headers[columnName] = "CONTROL NAME"
|
||||
headers[columnCounterFailed] = "FAILED RESOURCES"
|
||||
headers[columnCounterAll] = "ALL RESOURCES"
|
||||
headers[columnSeverity] = "SEVERITY"
|
||||
headers[columnComplianceScore] = "% COMPLIANCE-SCORE"
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,11 @@ func (hp *HtmlPrinter) SetWriter(ctx context.Context, outputFile string) {
|
||||
hp.writer = printer.GetWriter(ctx, outputFile)
|
||||
}
|
||||
|
||||
func (hp *HtmlPrinter) ActionPrint(ctx context.Context, opaSessionObj *cautils.OPASessionObj) {
|
||||
func (hp *HtmlPrinter) PrintNextSteps() {
|
||||
|
||||
}
|
||||
|
||||
func (hp *HtmlPrinter) ActionPrint(ctx context.Context, opaSessionObj *cautils.OPASessionObj, imageScanData []cautils.ImageScanData) {
|
||||
tplFuncMap := template.FuncMap{
|
||||
"sum": func(nums ...int) int {
|
||||
total := 0
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/anchore/grype/grype/presenter"
|
||||
"github.com/anchore/grype/grype/presenter/models"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
@@ -41,17 +43,44 @@ func (jp *JsonPrinter) SetWriter(ctx context.Context, outputFile string) {
|
||||
|
||||
func (jp *JsonPrinter) Score(score float32) {
|
||||
fmt.Fprintf(os.Stderr, "\nOverall compliance-score (100- Excellent, 0- All failed): %d\n", cautils.Float32ToInt(score))
|
||||
|
||||
}
|
||||
|
||||
func (jp *JsonPrinter) ActionPrint(ctx context.Context, opaSessionObj *cautils.OPASessionObj) {
|
||||
r, err := json.Marshal(FinalizeResults(opaSessionObj))
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Fatal("failed to Marshal posture report object")
|
||||
func (jp *JsonPrinter) ActionPrint(ctx context.Context, opaSessionObj *cautils.OPASessionObj, imageScanData []cautils.ImageScanData) {
|
||||
var err error
|
||||
if opaSessionObj != nil {
|
||||
err = printConfigurationsScanning(opaSessionObj, ctx, jp)
|
||||
} else if imageScanData != nil {
|
||||
err = jp.PrintImageScan(ctx, imageScanData[0].PresenterConfig)
|
||||
} else {
|
||||
err = fmt.Errorf("failed to write results, no data provided")
|
||||
}
|
||||
|
||||
if _, err := jp.writer.Write(r); err != nil {
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Error("failed to write results", helpers.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
printer.LogOutputFile(jp.writer.Name())
|
||||
}
|
||||
|
||||
func printConfigurationsScanning(opaSessionObj *cautils.OPASessionObj, ctx context.Context, jp *JsonPrinter) error {
|
||||
r, err := json.Marshal(FinalizeResults(opaSessionObj))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = jp.writer.Write(r)
|
||||
return err
|
||||
}
|
||||
|
||||
func (jp *JsonPrinter) PrintImageScan(ctx context.Context, scanResults *models.PresenterConfig) error {
|
||||
presenterConfig, _ := presenter.ValidatedConfig("json", "", false)
|
||||
pres := presenter.GetPresenter(presenterConfig, *scanResults)
|
||||
|
||||
return pres.Present(jp.writer)
|
||||
}
|
||||
|
||||
func (jp *JsonPrinter) PrintNextSteps() {
|
||||
|
||||
}
|
||||
|
||||
@@ -112,7 +112,11 @@ func (jp *JunitPrinter) Score(score float32) {
|
||||
fmt.Fprintf(os.Stderr, "\nOverall compliance-score (100- Excellent, 0- All failed): %d\n", cautils.Float32ToInt(score))
|
||||
}
|
||||
|
||||
func (jp *JunitPrinter) ActionPrint(ctx context.Context, opaSessionObj *cautils.OPASessionObj) {
|
||||
func (jp *JunitPrinter) PrintNextSteps() {
|
||||
|
||||
}
|
||||
|
||||
func (jp *JunitPrinter) ActionPrint(ctx context.Context, opaSessionObj *cautils.OPASessionObj, imageScanData []cautils.ImageScanData) {
|
||||
junitResult := testsSuites(opaSessionObj)
|
||||
postureReportStr, err := xml.Marshal(junitResult)
|
||||
if err != nil {
|
||||
|
||||
@@ -63,10 +63,10 @@ func (pp *PdfPrinter) printInfo(m pdf.Maroto, summaryDetails *reportsummary.Summ
|
||||
if infoMap[i].info != "" {
|
||||
m.Row(5, func() {
|
||||
m.Col(12, func() {
|
||||
m.Text(fmt.Sprintf("%v %v", infoMap[i].stars, infoMap[i].info),props.Text{
|
||||
Style: consts.Bold,
|
||||
Align: consts.Left,
|
||||
Size: 8,
|
||||
m.Text(fmt.Sprintf("%v %v", infoMap[i].stars, infoMap[i].info), props.Text{
|
||||
Style: consts.Bold,
|
||||
Align: consts.Left,
|
||||
Size: 8,
|
||||
Extrapolate: false,
|
||||
Color: color.Color{
|
||||
Red: 0,
|
||||
@@ -85,7 +85,11 @@ func (pp *PdfPrinter) printInfo(m pdf.Maroto, summaryDetails *reportsummary.Summ
|
||||
|
||||
}
|
||||
|
||||
func (pp *PdfPrinter) ActionPrint(ctx context.Context, opaSessionObj *cautils.OPASessionObj) {
|
||||
func (pp *PdfPrinter) PrintNextSteps() {
|
||||
|
||||
}
|
||||
|
||||
func (pp *PdfPrinter) ActionPrint(ctx context.Context, opaSessionObj *cautils.OPASessionObj, imageScanData []cautils.ImageScanData) {
|
||||
sortedControlIDs := getSortedControlsIDs(opaSessionObj.Report.SummaryDetails.Controls)
|
||||
|
||||
infoToPrintInfo := mapInfoToPrintInfo(opaSessionObj.Report.SummaryDetails.Controls)
|
||||
@@ -161,7 +165,7 @@ func (pp *PdfPrinter) printFramework(m pdf.Maroto, frameworks []reportsummary.IF
|
||||
|
||||
// printTable creates the PDF table
|
||||
func (pp *PdfPrinter) printTable(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
|
||||
headers := getControlTableHeaders()
|
||||
headers := getControlTableHeaders(false)
|
||||
infoToPrintInfoMap := mapInfoToPrintInfo(summaryDetails.Controls)
|
||||
var controls [][]string
|
||||
for i := len(sortedControlIDs) - 1; i >= 0; i-- {
|
||||
|
||||
@@ -8,20 +8,25 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/anchore/grype/grype/presenter/models"
|
||||
"github.com/enescakir/emoji"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/imageprinter"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
helpersv1 "github.com/kubescape/opa-utils/reporthandling/helpers/v1"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"k8s.io/utils/strings/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
prettyPrinterOutputFile = "report"
|
||||
prettyPrinterOutputExt = ".txt"
|
||||
prettyPrinterOutputFile = "report"
|
||||
prettyPrinterOutputExt = ".txt"
|
||||
clusterScanningScopeInformationLink = "https://github.com/kubescape/regolibrary/tree/master#add-a-framework"
|
||||
)
|
||||
|
||||
var _ printer.IPrinter = &PrettyPrinter{}
|
||||
@@ -32,40 +37,135 @@ type PrettyPrinter struct {
|
||||
viewType cautils.ViewTypes
|
||||
verboseMode bool
|
||||
printAttackTree bool
|
||||
scanType cautils.ScanTypes
|
||||
inputPatterns []string
|
||||
mainPrinter prettyprinter.MainPrinter
|
||||
}
|
||||
|
||||
func NewPrettyPrinter(verboseMode bool, formatVersion string, attackTree bool, viewType cautils.ViewTypes) *PrettyPrinter {
|
||||
return &PrettyPrinter{
|
||||
func NewPrettyPrinter(verboseMode bool, formatVersion string, attackTree bool, viewType cautils.ViewTypes, scanType cautils.ScanTypes, inputPatterns []string) *PrettyPrinter {
|
||||
prettyPrinter := &PrettyPrinter{
|
||||
verboseMode: verboseMode,
|
||||
formatVersion: formatVersion,
|
||||
viewType: viewType,
|
||||
printAttackTree: attackTree,
|
||||
scanType: scanType,
|
||||
inputPatterns: inputPatterns,
|
||||
}
|
||||
|
||||
return prettyPrinter
|
||||
}
|
||||
|
||||
func (pp *PrettyPrinter) SetMainPrinter() {
|
||||
switch pp.scanType {
|
||||
case cautils.ScanTypeCluster:
|
||||
pp.mainPrinter = prettyprinter.NewClusterPrinter(pp.writer)
|
||||
case cautils.ScanTypeRepo:
|
||||
pp.mainPrinter = prettyprinter.NewRepoPrinter(pp.writer, pp.inputPatterns)
|
||||
case cautils.ScanTypeImage:
|
||||
pp.mainPrinter = prettyprinter.NewImagePrinter(pp.writer, pp.verboseMode)
|
||||
case cautils.ScanTypeWorkload:
|
||||
pp.mainPrinter = prettyprinter.NewWorkloadPrinter(pp.writer)
|
||||
default:
|
||||
pp.mainPrinter = prettyprinter.NewSummaryPrinter(pp.writer, pp.verboseMode)
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *PrettyPrinter) ActionPrint(_ context.Context, opaSessionObj *cautils.OPASessionObj) {
|
||||
fmt.Fprintf(pp.writer, "\n"+getSeparator("^")+"\n")
|
||||
func (pp *PrettyPrinter) PrintNextSteps() {
|
||||
pp.mainPrinter.PrintNextSteps()
|
||||
}
|
||||
|
||||
sortedControlIDs := getSortedControlsIDs(opaSessionObj.Report.SummaryDetails.Controls) // ListControls().All())
|
||||
// convertToImageScanSummary takes a list of image scan data and converts it to a single image scan summary
|
||||
func (pp *PrettyPrinter) convertToImageScanSummary(imageScanData []cautils.ImageScanData) (*imageprinter.ImageScanSummary, error) {
|
||||
imageScanSummary := imageprinter.ImageScanSummary{
|
||||
CVEs: []imageprinter.CVE{},
|
||||
PackageScores: map[string]*imageprinter.PackageScore{},
|
||||
MapsSeverityToSummary: map[string]*imageprinter.SeveritySummary{},
|
||||
}
|
||||
|
||||
switch pp.viewType {
|
||||
case cautils.ControlViewType:
|
||||
pp.printResults(&opaSessionObj.Report.SummaryDetails.Controls, opaSessionObj.AllResources, sortedControlIDs)
|
||||
case cautils.ResourceViewType:
|
||||
if pp.verboseMode {
|
||||
pp.resourceTable(opaSessionObj)
|
||||
for i := range imageScanData {
|
||||
if !slices.Contains(imageScanSummary.Images, imageScanData[i].Image) {
|
||||
imageScanSummary.Images = append(imageScanSummary.Images, imageScanData[i].Image)
|
||||
}
|
||||
|
||||
presenterConfig := imageScanData[i].PresenterConfig
|
||||
doc, err := models.NewDocument(presenterConfig.Packages, presenterConfig.Context, presenterConfig.Matches, presenterConfig.IgnoredMatches, presenterConfig.MetadataProvider, nil, presenterConfig.DBStatus)
|
||||
if err != nil {
|
||||
logger.L().Error(fmt.Sprintf("failed to create document for image: %v", imageScanData[i].Image), helpers.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
CVEs := extractCVEs(doc.Matches)
|
||||
imageScanSummary.CVEs = append(imageScanSummary.CVEs, CVEs...)
|
||||
|
||||
setPkgNameToScoreMap(doc.Matches, imageScanSummary.PackageScores)
|
||||
|
||||
setSeverityToSummaryMap(CVEs, imageScanSummary.MapsSeverityToSummary)
|
||||
}
|
||||
|
||||
return &imageScanSummary, nil
|
||||
}
|
||||
|
||||
func (pp *PrettyPrinter) PrintImageScan(imageScanData []cautils.ImageScanData) {
|
||||
imageScanSummary, err := pp.convertToImageScanSummary(imageScanData)
|
||||
if err != nil {
|
||||
logger.L().Error("failed to convert to image scan summary", helpers.Error(err))
|
||||
return
|
||||
}
|
||||
pp.mainPrinter.PrintImageScanning(imageScanSummary)
|
||||
}
|
||||
|
||||
func (pp *PrettyPrinter) ActionPrint(_ context.Context, opaSessionObj *cautils.OPASessionObj, imageScanData []cautils.ImageScanData) {
|
||||
if opaSessionObj != nil {
|
||||
fmt.Fprintf(pp.writer, "\n"+getSeparator("^")+"\n")
|
||||
|
||||
sortedControlIDs := getSortedControlsIDs(opaSessionObj.Report.SummaryDetails.Controls) // ListControls().All())
|
||||
|
||||
switch pp.viewType {
|
||||
case cautils.ControlViewType:
|
||||
pp.printResults(&opaSessionObj.Report.SummaryDetails.Controls, opaSessionObj.AllResources, sortedControlIDs)
|
||||
case cautils.ResourceViewType:
|
||||
if pp.verboseMode {
|
||||
pp.resourceTable(opaSessionObj)
|
||||
}
|
||||
}
|
||||
|
||||
pp.printOverview(opaSessionObj, pp.verboseMode)
|
||||
|
||||
pp.mainPrinter.PrintConfigurationsScanning(&opaSessionObj.Report.SummaryDetails, sortedControlIDs)
|
||||
|
||||
// When writing to Stdout, we aren’t really writing to an output file,
|
||||
// so no need to print that we are
|
||||
if pp.writer.Name() != os.Stdout.Name() {
|
||||
printer.LogOutputFile(pp.writer.Name())
|
||||
}
|
||||
|
||||
pp.printAttackTracks(opaSessionObj)
|
||||
}
|
||||
|
||||
if len(imageScanData) > 0 {
|
||||
pp.PrintImageScan(imageScanData)
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *PrettyPrinter) printOverview(opaSessionObj *cautils.OPASessionObj, printExtraLine bool) {
|
||||
if printExtraLine {
|
||||
fmt.Fprintf(pp.writer, "\n")
|
||||
}
|
||||
|
||||
pp.printHeader(opaSessionObj)
|
||||
}
|
||||
|
||||
func (pp *PrettyPrinter) printHeader(opaSessionObj *cautils.OPASessionObj) {
|
||||
if pp.scanType == cautils.ScanTypeCluster || pp.scanType == cautils.ScanTypeRepo {
|
||||
cautils.InfoDisplay(pp.writer, "\nSecurity Overview\n\n")
|
||||
} else if pp.scanType == cautils.ScanTypeWorkload {
|
||||
ns := opaSessionObj.SingleResourceScan.GetNamespace()
|
||||
if ns == "" {
|
||||
cautils.InfoDisplay(pp.writer, "Workload - Kind: %s, Name: %s\n\n", opaSessionObj.SingleResourceScan.GetKind(), opaSessionObj.SingleResourceScan.GetName())
|
||||
} else {
|
||||
cautils.InfoDisplay(pp.writer, "Workload - Namespace: %s, Kind: %s, Name: %s\n\n", opaSessionObj.SingleResourceScan.GetNamespace(), opaSessionObj.SingleResourceScan.GetKind(), opaSessionObj.SingleResourceScan.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
pp.printSummaryTable(&opaSessionObj.Report.SummaryDetails, sortedControlIDs)
|
||||
|
||||
// When writing to Stdout, we aren’t really writing to an output file,
|
||||
// so no need to print that we are
|
||||
if pp.writer.Name() != os.Stdout.Name() {
|
||||
printer.LogOutputFile(pp.writer.Name())
|
||||
}
|
||||
|
||||
pp.printAttackTracks(opaSessionObj)
|
||||
}
|
||||
|
||||
func (pp *PrettyPrinter) SetWriter(ctx context.Context, outputFile string) {
|
||||
@@ -74,6 +174,7 @@ func (pp *PrettyPrinter) SetWriter(ctx context.Context, outputFile string) {
|
||||
// otherwise
|
||||
if outputFile == os.Stdout.Name() {
|
||||
pp.writer = printer.GetWriter(ctx, "")
|
||||
pp.SetMainPrinter()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -85,6 +186,8 @@ func (pp *PrettyPrinter) SetWriter(ctx context.Context, outputFile string) {
|
||||
}
|
||||
|
||||
pp.writer = printer.GetWriter(ctx, outputFile)
|
||||
|
||||
pp.SetMainPrinter()
|
||||
}
|
||||
|
||||
func (pp *PrettyPrinter) Score(score float32) {
|
||||
@@ -113,6 +216,7 @@ func (prettyPrinter *PrettyPrinter) printSummary(controlName string, controlSumm
|
||||
cautils.DescriptionDisplay(prettyPrinter.writer, "\n")
|
||||
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) printTitle(controlSummary reportsummary.IControlSummary) {
|
||||
cautils.InfoDisplay(prettyPrinter.writer, "[control: %s - %s] ", controlSummary.GetName(), cautils.GetControlLink(controlSummary.GetID()))
|
||||
statusDetails := ""
|
||||
@@ -132,6 +236,7 @@ func (prettyPrinter *PrettyPrinter) printTitle(controlSummary reportsummary.ICon
|
||||
cautils.WarningDisplay(prettyPrinter.writer, "Reason: %v\n", controlSummary.GetStatus().Info())
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *PrettyPrinter) printResources(controlSummary reportsummary.IControlSummary, allResources map[string]workloadinterface.IMetadata) {
|
||||
|
||||
workloadsSummary := listResultSummary(controlSummary, allResources)
|
||||
@@ -199,67 +304,6 @@ func generateRelatedObjectsStr(workload WorkloadSummary) string {
|
||||
}
|
||||
return relatedStr
|
||||
}
|
||||
func generateFooter(summaryDetails *reportsummary.SummaryDetails) []string {
|
||||
// Severity | Control name | failed resources | all resources | % success
|
||||
row := make([]string, _rowLen)
|
||||
row[columnName] = "Resource Summary"
|
||||
row[columnCounterFailed] = fmt.Sprintf("%d", summaryDetails.NumberOfResources().Failed())
|
||||
row[columnCounterAll] = fmt.Sprintf("%d", summaryDetails.NumberOfResources().All())
|
||||
row[columnSeverity] = " "
|
||||
row[columnComplianceScore] = fmt.Sprintf("%.2f%s", summaryDetails.ComplianceScore, "%")
|
||||
|
||||
return row
|
||||
}
|
||||
func (pp *PrettyPrinter) printSummaryTable(summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
|
||||
|
||||
if summaryDetails.NumberOfControls().All() == 0 {
|
||||
fmt.Fprintf(pp.writer, "\nKubescape did not scan any of the resources, make sure you are scanning valid kubernetes manifests (Deployments, Pods, etc.)\n")
|
||||
return
|
||||
}
|
||||
cautils.InfoTextDisplay(pp.writer, "\n"+controlCountersForSummary(summaryDetails.NumberOfControls())+"\n")
|
||||
cautils.InfoTextDisplay(pp.writer, renderSeverityCountersSummary(summaryDetails.GetResourcesSeverityCounters())+"\n\n")
|
||||
|
||||
// cautils.InfoTextDisplay(prettyPrinter.writer, "\n"+"Severities: SOME OTHER"+"\n\n")
|
||||
|
||||
summaryTable := tablewriter.NewWriter(pp.writer)
|
||||
summaryTable.SetAutoWrapText(false)
|
||||
summaryTable.SetHeader(getControlTableHeaders())
|
||||
summaryTable.SetHeaderLine(true)
|
||||
summaryTable.SetColumnAlignment(getColumnsAlignments())
|
||||
|
||||
printAll := pp.verboseMode
|
||||
if summaryDetails.NumberOfResources().Failed() == 0 {
|
||||
// if there are no failed controls, print the resource table and detailed information
|
||||
printAll = true
|
||||
}
|
||||
|
||||
infoToPrintInfo := mapInfoToPrintInfo(summaryDetails.Controls)
|
||||
for i := len(sortedControlIDs) - 1; i >= 0; i-- {
|
||||
for _, c := range sortedControlIDs[i] {
|
||||
row := generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, c), infoToPrintInfo, printAll)
|
||||
if len(row) > 0 {
|
||||
summaryTable.Append(row)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
summaryTable.SetFooter(generateFooter(summaryDetails))
|
||||
|
||||
summaryTable.Render()
|
||||
|
||||
// When scanning controls the framework list will be empty
|
||||
cautils.InfoTextDisplay(pp.writer, frameworksScoresToString(summaryDetails.ListFrameworks()))
|
||||
|
||||
pp.printInfo(infoToPrintInfo)
|
||||
|
||||
}
|
||||
|
||||
func (pp *PrettyPrinter) printInfo(infoToPrintInfo []infoStars) {
|
||||
fmt.Println()
|
||||
for i := range infoToPrintInfo {
|
||||
cautils.InfoDisplay(pp.writer, fmt.Sprintf("%s %s\n", infoToPrintInfo[i].stars, infoToPrintInfo[i].info))
|
||||
}
|
||||
}
|
||||
|
||||
func frameworksScoresToString(frameworks []reportsummary.IFrameworkSummary) string {
|
||||
if len(frameworks) == 1 {
|
||||
@@ -279,26 +323,6 @@ func frameworksScoresToString(frameworks []reportsummary.IFrameworkSummary) stri
|
||||
return ""
|
||||
}
|
||||
|
||||
// renderSeverityCountersSummary renders the string that reports severity counters summary
|
||||
func renderSeverityCountersSummary(counters reportsummary.ISeverityCounters) string {
|
||||
critical := counters.NumberOfCriticalSeverity()
|
||||
high := counters.NumberOfHighSeverity()
|
||||
medium := counters.NumberOfMediumSeverity()
|
||||
low := counters.NumberOfLowSeverity()
|
||||
|
||||
return fmt.Sprintf(
|
||||
"Failed Resources by Severity: Critical — %d, High — %d, Medium — %d, Low — %d",
|
||||
critical, high, medium, low,
|
||||
)
|
||||
}
|
||||
|
||||
func controlCountersForSummary(counters reportsummary.ICounters) string {
|
||||
return fmt.Sprintf("Controls: %d (Failed: %d, Passed: %d, Action Required: %d)", counters.All(), counters.Failed(), counters.Passed(), counters.Skipped())
|
||||
}
|
||||
|
||||
func controlCountersForResource(l *helpersv1.AllLists) string {
|
||||
return fmt.Sprintf("Controls: %d (Failed: %d, action required: %d)", l.Len(), l.Failed(), l.Skipped())
|
||||
}
|
||||
func getSeparator(sep string) string {
|
||||
s := ""
|
||||
for i := 0; i < 80; i++ {
|
||||
|
||||
@@ -0,0 +1,71 @@
|
||||
package prettyprinter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/configurationprinter"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/imageprinter"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
type ClusterPrinter struct {
|
||||
writer *os.File
|
||||
categoriesTablePrinter configurationprinter.TablePrinter
|
||||
}
|
||||
|
||||
func NewClusterPrinter(writer *os.File) *ClusterPrinter {
|
||||
return &ClusterPrinter{
|
||||
writer: writer,
|
||||
categoriesTablePrinter: configurationprinter.NewClusterPrinter(),
|
||||
}
|
||||
}
|
||||
|
||||
var _ MainPrinter = &ClusterPrinter{}
|
||||
|
||||
func (cp *ClusterPrinter) PrintImageScanning(summary *imageprinter.ImageScanSummary) {
|
||||
printImageScanningSummary(cp.writer, *summary, false)
|
||||
printImagesCommands(cp.writer, *summary)
|
||||
}
|
||||
|
||||
func (cp *ClusterPrinter) PrintConfigurationsScanning(summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
|
||||
|
||||
cp.categoriesTablePrinter.PrintCategoriesTables(cp.writer, summaryDetails, sortedControlIDs)
|
||||
|
||||
printComplianceScore(cp.writer, filterComplianceFrameworks(summaryDetails.ListFrameworks()))
|
||||
|
||||
if len(summaryDetails.TopWorkloadsByScore) > 0 {
|
||||
cp.printTopWorkloads(summaryDetails)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (cp *ClusterPrinter) PrintNextSteps() {
|
||||
printNextSteps(cp.writer, cp.getNextSteps(), false)
|
||||
}
|
||||
|
||||
func (cp *ClusterPrinter) getNextSteps() []string {
|
||||
return []string{
|
||||
configScanVerboseRunText,
|
||||
installHelmText,
|
||||
CICDSetupText,
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *ClusterPrinter) printTopWorkloads(summaryDetails *reportsummary.SummaryDetails) {
|
||||
cautils.InfoTextDisplay(cp.writer, getTopWorkloadsTitle(len(summaryDetails.TopWorkloadsByScore)))
|
||||
|
||||
for i, wl := range summaryDetails.TopWorkloadsByScore {
|
||||
ns := wl.GetNamespace()
|
||||
name := wl.GetName()
|
||||
kind := wl.GetKind()
|
||||
cautils.SimpleDisplay(cp.writer, fmt.Sprintf("%d. namespace: %s, name: %s, kind: %s - '%s'\n", i+1, ns, name, kind, getCallToActionString(cp.getWorkloadScanCommand(ns, kind, name))))
|
||||
}
|
||||
|
||||
cautils.InfoTextDisplay(cp.writer, "\n")
|
||||
}
|
||||
|
||||
func (cp *ClusterPrinter) getWorkloadScanCommand(namespace, kind, name string) string {
|
||||
return fmt.Sprintf("$ kubescape scan workload %s/%s --namespace %s", kind, name, namespace)
|
||||
}
|
||||
@@ -0,0 +1,35 @@
|
||||
package prettyprinter
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestClusterScan_getNextSteps(t *testing.T) {
|
||||
clusterPrinter := &ClusterPrinter{}
|
||||
|
||||
nextSteps := clusterPrinter.getNextSteps()
|
||||
|
||||
if len(nextSteps) != 3 {
|
||||
t.Errorf("Expected 3 next steps, got %d", len(nextSteps))
|
||||
}
|
||||
|
||||
if nextSteps[0] != configScanVerboseRunText {
|
||||
t.Errorf("Expected %s, got %s", configScanVerboseRunText, nextSteps[0])
|
||||
}
|
||||
|
||||
if nextSteps[1] != installHelmText {
|
||||
t.Errorf("Expected %s, got %s", installHelmText, nextSteps[1])
|
||||
}
|
||||
|
||||
if nextSteps[2] != CICDSetupText {
|
||||
t.Errorf("Expected %s, got %s", CICDSetupText, nextSteps[2])
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterScan_getWorkloadScanCommand(t *testing.T) {
|
||||
clusterPrinter := &ClusterPrinter{}
|
||||
|
||||
command := clusterPrinter.getWorkloadScanCommand("ns", "kind", "name")
|
||||
|
||||
if command != "$ kubescape scan workload kind/name --namespace ns" {
|
||||
t.Errorf("Expected $ kubescape scan workload kind/name --namespace ns, got %s", command)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
package prettyprinter
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/configurationprinter"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/imageprinter"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
var _ MainPrinter = &SummaryPrinter{}
|
||||
|
||||
type SummaryPrinter struct {
|
||||
writer *os.File
|
||||
verboseMode bool
|
||||
summaryTablePrinter configurationprinter.TablePrinter
|
||||
}
|
||||
|
||||
func NewSummaryPrinter(writer *os.File, verboseMode bool) *SummaryPrinter {
|
||||
return &SummaryPrinter{
|
||||
writer: writer,
|
||||
verboseMode: verboseMode,
|
||||
summaryTablePrinter: configurationprinter.NewFrameworkPrinter(verboseMode),
|
||||
}
|
||||
}
|
||||
|
||||
var _ MainPrinter = &RepoPrinter{}
|
||||
|
||||
func (sp *SummaryPrinter) PrintImageScanning(*imageprinter.ImageScanSummary) {}
|
||||
|
||||
func (sp *SummaryPrinter) PrintNextSteps() {}
|
||||
|
||||
func (sp *SummaryPrinter) getVerboseMode() bool {
|
||||
return sp.verboseMode
|
||||
}
|
||||
|
||||
func (sp *SummaryPrinter) PrintConfigurationsScanning(summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
|
||||
sp.summaryTablePrinter.PrintSummaryTable(sp.writer, summaryDetails, sortedControlIDs)
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
package prettyprinter
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/imageprinter"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
const (
|
||||
TopPackagesNumber = 5 // number of top packages to display
|
||||
)
|
||||
|
||||
type ImagePrinter struct {
|
||||
writer *os.File
|
||||
imageTablePrinter imageprinter.TablePrinter
|
||||
verboseMode bool
|
||||
}
|
||||
|
||||
func NewImagePrinter(writer *os.File, verboseMode bool) *ImagePrinter {
|
||||
return &ImagePrinter{
|
||||
writer: writer,
|
||||
verboseMode: verboseMode,
|
||||
imageTablePrinter: imageprinter.NewTableWriter(),
|
||||
}
|
||||
}
|
||||
|
||||
var _ MainPrinter = &ImagePrinter{}
|
||||
|
||||
func (ip *ImagePrinter) PrintImageScanning(summary *imageprinter.ImageScanSummary) {
|
||||
ip.PrintImageScanningTable(*summary)
|
||||
printImageScanningSummary(ip.writer, *summary, ip.verboseMode)
|
||||
printTopComponents(ip.writer, *summary)
|
||||
}
|
||||
|
||||
func (ip *ImagePrinter) PrintImageScanningTable(summary imageprinter.ImageScanSummary) {
|
||||
if !ip.verboseMode {
|
||||
summary.CVEs = getFilteredCVEs(summary.CVEs)
|
||||
}
|
||||
|
||||
ip.imageTablePrinter.PrintImageScanningTable(ip.writer, summary)
|
||||
cautils.InfoTextDisplay(ip.writer, "\n")
|
||||
}
|
||||
|
||||
func (ip *ImagePrinter) PrintConfigurationsScanning(summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
|
||||
}
|
||||
|
||||
func (ip *ImagePrinter) PrintNextSteps() {
|
||||
if ip.verboseMode {
|
||||
printNextSteps(ip.writer, []string{CICDSetupText, installHelmText}, true)
|
||||
return
|
||||
}
|
||||
printNextSteps(ip.writer, []string{imageScanVerboseRunText, CICDSetupText, installHelmText}, true)
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
package prettyprinter
|
||||
|
||||
import (
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/imageprinter"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
type MainPrinter interface {
|
||||
PrintConfigurationsScanning(summaryDetails *reportsummary.SummaryDetails, sortedControls [][]string)
|
||||
PrintImageScanning(imageScanSummary *imageprinter.ImageScanSummary)
|
||||
PrintNextSteps()
|
||||
}
|
||||
@@ -0,0 +1,83 @@
|
||||
package prettyprinter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/configurationprinter"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/imageprinter"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
type RepoPrinter struct {
|
||||
writer *os.File
|
||||
inputPatterns []string
|
||||
categoriesTablePrinter configurationprinter.TablePrinter
|
||||
}
|
||||
|
||||
func NewRepoPrinter(writer *os.File, inputPatterns []string) *RepoPrinter {
|
||||
return &RepoPrinter{
|
||||
writer: writer,
|
||||
categoriesTablePrinter: configurationprinter.NewRepoPrinter(inputPatterns),
|
||||
}
|
||||
}
|
||||
|
||||
var _ MainPrinter = &RepoPrinter{}
|
||||
|
||||
func (rp *RepoPrinter) PrintImageScanning(summary *imageprinter.ImageScanSummary) {
|
||||
printImageScanningSummary(rp.writer, *summary, false)
|
||||
printImagesCommands(rp.writer, *summary)
|
||||
printTopComponents(rp.writer, *summary)
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) PrintConfigurationsScanning(summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
|
||||
rp.categoriesTablePrinter.PrintCategoriesTables(rp.writer, summaryDetails, sortedControlIDs)
|
||||
|
||||
if len(summaryDetails.TopWorkloadsByScore) > 1 {
|
||||
rp.printTopWorkloads(summaryDetails)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) PrintNextSteps() {
|
||||
printNextSteps(rp.writer, rp.getNextSteps(), false)
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) getNextSteps() []string {
|
||||
return []string{
|
||||
configScanVerboseRunText,
|
||||
clusterScanRunText,
|
||||
CICDSetupText,
|
||||
installHelmText,
|
||||
}
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) printTopWorkloads(summaryDetails *reportsummary.SummaryDetails) {
|
||||
cautils.InfoTextDisplay(rp.writer, getTopWorkloadsTitle(len(summaryDetails.TopWorkloadsByScore)))
|
||||
|
||||
for i, wl := range summaryDetails.TopWorkloadsByScore {
|
||||
ns := wl.GetNamespace()
|
||||
name := wl.GetName()
|
||||
kind := wl.GetKind()
|
||||
cmdPrefix := getWorkloadPrefixForCmd(ns, kind, name)
|
||||
cautils.SimpleDisplay(rp.writer, fmt.Sprintf("%d. %s - '%s'\n", i+1, cmdPrefix, getCallToActionString(rp.getWorkloadScanCommand(ns, kind, name, *wl.GetSource()))))
|
||||
}
|
||||
|
||||
cautils.InfoTextDisplay(rp.writer, "\n")
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) getWorkloadScanCommand(ns, kind, name string, source reporthandling.Source) string {
|
||||
cmd := fmt.Sprintf("$ kubescape scan workload %s/%s --namespace %s", kind, name, ns)
|
||||
if ns == "" {
|
||||
cmd = fmt.Sprintf("$ kubescape scan workload %s/%s", kind, name)
|
||||
}
|
||||
|
||||
if source.FileType == reporthandling.SourceTypeHelmChart {
|
||||
return fmt.Sprintf("%s --chart-path=%s --file-path=%s", cmd, source.HelmPath, fmt.Sprintf("%s/%s", source.Path, source.RelativePath))
|
||||
|
||||
} else {
|
||||
return fmt.Sprintf("%s --file-path=%s", cmd, fmt.Sprintf("%s/%s", source.Path, source.RelativePath))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,102 @@
|
||||
package prettyprinter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
func TestRepoScan_getNextSteps(t *testing.T) {
|
||||
repoPrinter := &RepoPrinter{}
|
||||
|
||||
nextSteps := repoPrinter.getNextSteps()
|
||||
|
||||
if len(nextSteps) != 4 {
|
||||
t.Errorf("Expected 4 next steps, got %d", len(nextSteps))
|
||||
}
|
||||
|
||||
if nextSteps[0] != configScanVerboseRunText {
|
||||
t.Errorf("Expected %s, got %s", configScanVerboseRunText, nextSteps[0])
|
||||
}
|
||||
|
||||
if nextSteps[1] != clusterScanRunText {
|
||||
t.Errorf("Expected %s, got %s", clusterScanRunText, nextSteps[1])
|
||||
}
|
||||
|
||||
if nextSteps[2] != CICDSetupText {
|
||||
t.Errorf("Expected %s, got %s", CICDSetupText, nextSteps[2])
|
||||
}
|
||||
|
||||
if nextSteps[3] != installHelmText {
|
||||
t.Errorf("Expected %s, got %s", installHelmText, nextSteps[3])
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepoScan_getWorkloadScanCommand(t *testing.T) {
|
||||
test := []struct {
|
||||
testName string
|
||||
ns string
|
||||
kind string
|
||||
name string
|
||||
source reporthandling.Source
|
||||
want string
|
||||
}{
|
||||
{
|
||||
testName: "file path",
|
||||
ns: "ns",
|
||||
kind: "kind",
|
||||
name: "name",
|
||||
source: reporthandling.Source{
|
||||
Path: "path",
|
||||
RelativePath: "relativePath",
|
||||
},
|
||||
want: "$ kubescape scan workload kind/name --namespace ns --file-path=path/relativePath",
|
||||
},
|
||||
{
|
||||
testName: "helm path",
|
||||
ns: "ns",
|
||||
kind: "kind",
|
||||
name: "name",
|
||||
source: reporthandling.Source{
|
||||
Path: "path",
|
||||
RelativePath: "relativePath",
|
||||
HelmPath: "helmPath",
|
||||
FileType: "Helm Chart",
|
||||
},
|
||||
want: "$ kubescape scan workload kind/name --namespace ns --chart-path=helmPath --file-path=path/relativePath",
|
||||
},
|
||||
{
|
||||
testName: "file path - no namespace",
|
||||
kind: "kind",
|
||||
name: "name",
|
||||
source: reporthandling.Source{
|
||||
Path: "path",
|
||||
RelativePath: "relativePath",
|
||||
},
|
||||
want: "$ kubescape scan workload kind/name --file-path=path/relativePath",
|
||||
},
|
||||
{
|
||||
testName: "helm path - no namespace",
|
||||
kind: "kind",
|
||||
name: "name",
|
||||
source: reporthandling.Source{
|
||||
Path: "path",
|
||||
RelativePath: "relativePath",
|
||||
HelmPath: "helmPath",
|
||||
FileType: "Helm Chart",
|
||||
},
|
||||
want: "$ kubescape scan workload kind/name --chart-path=helmPath --file-path=path/relativePath",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range test {
|
||||
t.Run(tt.testName, func(t *testing.T) {
|
||||
repoPrinter := &RepoPrinter{}
|
||||
|
||||
if got := repoPrinter.getWorkloadScanCommand(tt.ns, tt.kind, tt.name, tt.source); got != tt.want {
|
||||
t.Errorf("in test %s failed, got = %v, want %v", tt.testName, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,120 @@
|
||||
package configurationprinter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/utils"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
const (
|
||||
docsPrefix = "https://hub.armosec.io/docs"
|
||||
scanControlPrefix = "$ kubescape scan control"
|
||||
controlNameHeader = "CONTROL NAME"
|
||||
statusHeader = "STATUS"
|
||||
docsHeader = "DOCS"
|
||||
resourcesHeader = "RESOURCES"
|
||||
runHeader = "RUN"
|
||||
)
|
||||
|
||||
// initializes the table headers and column alignments based on the category type
|
||||
func initCategoryTableData(categoryType CategoryType) ([]string, []int) {
|
||||
if categoryType == TypeCounting {
|
||||
return getCategoryCountingTypeHeaders(), getCountingTypeAlignments()
|
||||
}
|
||||
return getCategoryStatusTypeHeaders(), getStatusTypeAlignments()
|
||||
}
|
||||
|
||||
func getCategoryStatusTypeHeaders() []string {
|
||||
headers := make([]string, 3)
|
||||
headers[0] = controlNameHeader
|
||||
headers[1] = statusHeader
|
||||
headers[2] = docsHeader
|
||||
|
||||
return headers
|
||||
}
|
||||
|
||||
func getCategoryCountingTypeHeaders() []string {
|
||||
headers := make([]string, 3)
|
||||
headers[0] = controlNameHeader
|
||||
headers[1] = resourcesHeader
|
||||
headers[2] = runHeader
|
||||
|
||||
return headers
|
||||
}
|
||||
|
||||
func getStatusTypeAlignments() []int {
|
||||
return []int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER}
|
||||
}
|
||||
|
||||
func getCountingTypeAlignments() []int {
|
||||
return []int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_LEFT}
|
||||
}
|
||||
|
||||
// returns a row for status type table based on the control summary
|
||||
func generateCategoryStatusRow(controlSummary reportsummary.IControlSummary, infoToPrintInfo []utils.InfoStars) []string {
|
||||
|
||||
// show only passed, failed and action required controls
|
||||
status := controlSummary.GetStatus()
|
||||
if !status.IsFailed() && !status.IsSkipped() && !status.IsPassed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
rows := make([]string, 3)
|
||||
|
||||
rows[0] = controlSummary.GetName()
|
||||
if len(controlSummary.GetName()) > 50 {
|
||||
rows[0] = controlSummary.GetName()[:50] + "..."
|
||||
} else {
|
||||
rows[0] = controlSummary.GetName()
|
||||
}
|
||||
|
||||
rows[1] = utils.GetStatusColor(controlSummary.GetStatus().Status())(getStatus(status, controlSummary, infoToPrintInfo))
|
||||
|
||||
rows[2] = getDocsForControl(controlSummary)
|
||||
|
||||
return rows
|
||||
|
||||
}
|
||||
|
||||
func getStatus(status apis.IStatus, controlSummary reportsummary.IControlSummary, infoToPrintInfo []utils.InfoStars) string {
|
||||
// skipped is shown as action required
|
||||
if status.IsSkipped() {
|
||||
return fmt.Sprintf("%s %s", "action required", GetInfoColumn(controlSummary, infoToPrintInfo))
|
||||
}
|
||||
return string(controlSummary.GetStatus().Status())
|
||||
}
|
||||
|
||||
func getCategoryTableWriter(writer io.Writer, headers []string, columnAligments []int) *tablewriter.Table {
|
||||
table := tablewriter.NewWriter(writer)
|
||||
table.SetHeader(headers)
|
||||
table.SetHeaderLine(true)
|
||||
table.SetColumnAlignment(columnAligments)
|
||||
table.SetAutoWrapText(false)
|
||||
table.SetUnicodeHV(tablewriter.Regular, tablewriter.Regular)
|
||||
var headerColors []tablewriter.Colors
|
||||
for range headers {
|
||||
headerColors = append(headerColors, tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiYellowColor})
|
||||
}
|
||||
table.SetHeaderColor(headerColors...)
|
||||
return table
|
||||
}
|
||||
|
||||
func renderSingleCategory(writer io.Writer, categoryName string, table *tablewriter.Table, rows [][]string, infoToPrintInfo []utils.InfoStars) {
|
||||
cautils.InfoTextDisplay(writer, categoryName+"\n")
|
||||
|
||||
table.ClearRows()
|
||||
table.AppendBulk(rows)
|
||||
|
||||
table.Render()
|
||||
|
||||
if len(infoToPrintInfo) > 0 {
|
||||
printCategoryInfo(writer, infoToPrintInfo)
|
||||
}
|
||||
|
||||
cautils.SimpleDisplay(writer, "\n")
|
||||
}
|
||||
@@ -0,0 +1,189 @@
|
||||
package configurationprinter
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/utils"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInitCategoryTableData(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
categoryType CategoryType
|
||||
expectedHeaders []string
|
||||
expectedAlignments []int
|
||||
}{
|
||||
{
|
||||
name: "Test1",
|
||||
categoryType: TypeCounting,
|
||||
expectedHeaders: []string{"CONTROL NAME", "RESOURCES", "RUN"},
|
||||
expectedAlignments: []int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_LEFT},
|
||||
},
|
||||
{
|
||||
name: "Test2",
|
||||
categoryType: TypeStatus,
|
||||
expectedHeaders: []string{"CONTROL NAME", "STATUS", "DOCS"},
|
||||
expectedAlignments: []int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
headers, alignments := initCategoryTableData(tt.categoryType)
|
||||
if len(headers) != len(tt.expectedHeaders) {
|
||||
t.Errorf("initCategoryTableData() headers = %v, want %v", headers, tt.expectedHeaders)
|
||||
}
|
||||
if len(alignments) != len(tt.expectedAlignments) {
|
||||
t.Errorf("initCategoryTableData() alignments = %v, want %v", alignments, tt.expectedAlignments)
|
||||
}
|
||||
assert.True(t, reflect.DeepEqual(headers, tt.expectedHeaders))
|
||||
assert.True(t, reflect.DeepEqual(alignments, tt.expectedAlignments))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCategoryStatusTypeHeaders(t *testing.T) {
|
||||
headers := getCategoryStatusTypeHeaders()
|
||||
|
||||
if len(headers) != 3 {
|
||||
t.Errorf("Expected 3 headers, got %d", len(headers))
|
||||
}
|
||||
|
||||
if headers[0] != controlNameHeader {
|
||||
t.Errorf("Expected %s, got %s", controlNameHeader, headers[0])
|
||||
}
|
||||
|
||||
if headers[1] != statusHeader {
|
||||
t.Errorf("Expected %s, got %s", statusHeader, headers[1])
|
||||
}
|
||||
|
||||
if headers[2] != docsHeader {
|
||||
t.Errorf("Expected %s, got %s", docsHeader, headers[2])
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCategoryCountingTypeHeaders(t *testing.T) {
|
||||
headers := getCategoryCountingTypeHeaders()
|
||||
|
||||
if len(headers) != 3 {
|
||||
t.Errorf("Expected 3 headers, got %d", len(headers))
|
||||
}
|
||||
|
||||
if headers[0] != controlNameHeader {
|
||||
t.Errorf("Expected %s, got %s", controlNameHeader, headers[0])
|
||||
}
|
||||
|
||||
if headers[1] != resourcesHeader {
|
||||
t.Errorf("Expected %s, got %s", resourcesHeader, headers[1])
|
||||
}
|
||||
|
||||
if headers[2] != runHeader {
|
||||
t.Errorf("Expected %s, got %s", runHeader, headers[2])
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetStatusTypeAlignments(t *testing.T) {
|
||||
alignments := getStatusTypeAlignments()
|
||||
|
||||
if len(alignments) != 3 {
|
||||
t.Errorf("Expected 3 alignments, got %d", len(alignments))
|
||||
}
|
||||
|
||||
if alignments[0] != tablewriter.ALIGN_LEFT {
|
||||
t.Errorf("Expected %d, got %d", tablewriter.ALIGN_LEFT, alignments[0])
|
||||
}
|
||||
|
||||
if alignments[1] != tablewriter.ALIGN_CENTER {
|
||||
t.Errorf("Expected %d, got %d", tablewriter.ALIGN_CENTER, alignments[1])
|
||||
}
|
||||
|
||||
if alignments[2] != tablewriter.ALIGN_CENTER {
|
||||
t.Errorf("Expected %d, got %d", tablewriter.ALIGN_CENTER, alignments[2])
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCountingTypeAlignments(t *testing.T) {
|
||||
alignments := getCountingTypeAlignments()
|
||||
|
||||
if len(alignments) != 3 {
|
||||
t.Errorf("Expected 3 alignments, got %d", len(alignments))
|
||||
}
|
||||
|
||||
if alignments[0] != tablewriter.ALIGN_LEFT {
|
||||
t.Errorf("Expected %d, got %d", tablewriter.ALIGN_LEFT, alignments[0])
|
||||
}
|
||||
|
||||
if alignments[1] != tablewriter.ALIGN_CENTER {
|
||||
t.Errorf("Expected %d, got %d", tablewriter.ALIGN_CENTER, alignments[1])
|
||||
}
|
||||
|
||||
if alignments[2] != tablewriter.ALIGN_LEFT {
|
||||
t.Errorf("Expected %d, got %d", tablewriter.ALIGN_LEFT, alignments[2])
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateCategoryStatusRow(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
controlSummary reportsummary.IControlSummary
|
||||
infoToPrintInfo []utils.InfoStars
|
||||
expectedRows []string
|
||||
}{
|
||||
{
|
||||
name: "failed control",
|
||||
controlSummary: &reportsummary.ControlSummary{
|
||||
Name: "test",
|
||||
Status: apis.StatusFailed,
|
||||
ControlID: "ctrlID",
|
||||
},
|
||||
expectedRows: []string{"test", "failed", "https://hub.armosec.io/docs/ctrlid"},
|
||||
},
|
||||
{
|
||||
name: "skipped control",
|
||||
controlSummary: &reportsummary.ControlSummary{
|
||||
Name: "test",
|
||||
Status: apis.StatusSkipped,
|
||||
StatusInfo: apis.StatusInfo{
|
||||
InnerInfo: "testInfo",
|
||||
},
|
||||
ControlID: "ctrlID",
|
||||
},
|
||||
expectedRows: []string{"test", "action required *", "https://hub.armosec.io/docs/ctrlid"},
|
||||
infoToPrintInfo: []utils.InfoStars{
|
||||
{
|
||||
Info: "testInfo",
|
||||
Stars: "*",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "passed control",
|
||||
controlSummary: &reportsummary.ControlSummary{
|
||||
Name: "test",
|
||||
Status: apis.StatusPassed,
|
||||
ControlID: "ctrlID",
|
||||
},
|
||||
expectedRows: []string{"test", "passed", "https://hub.armosec.io/docs/ctrlid"},
|
||||
},
|
||||
{
|
||||
name: "big name",
|
||||
controlSummary: &reportsummary.ControlSummary{
|
||||
Name: "testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest",
|
||||
Status: apis.StatusFailed,
|
||||
ControlID: "ctrlID",
|
||||
},
|
||||
expectedRows: []string{"testtesttesttesttesttesttesttesttesttesttesttestte...", "failed", "https://hub.armosec.io/docs/ctrlid"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
row := generateCategoryStatusRow(tt.controlSummary, tt.infoToPrintInfo)
|
||||
assert.True(t, reflect.DeepEqual(row, tt.expectedRows))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,86 @@
|
||||
package configurationprinter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/jwalton/gchalk"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/utils"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
type ClusterPrinter struct{}
|
||||
|
||||
func NewClusterPrinter() *ClusterPrinter {
|
||||
return &ClusterPrinter{}
|
||||
}
|
||||
|
||||
var _ TablePrinter = &ClusterPrinter{}
|
||||
|
||||
func (cp *ClusterPrinter) PrintSummaryTable(writer io.Writer, summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
|
||||
|
||||
}
|
||||
|
||||
func (cp *ClusterPrinter) PrintCategoriesTables(writer io.Writer, summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
|
||||
|
||||
categoriesToCategoryControls := mapCategoryToSummary(summaryDetails.ListControls(), mapClusterControlsToCategories)
|
||||
|
||||
for _, id := range clusterCategoriesDisplayOrder {
|
||||
categoryControl, ok := categoriesToCategoryControls[id]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
cp.renderSingleCategoryTable(categoryControl.CategoryName, mapCategoryToType[id], writer, categoryControl.controlSummaries, utils.MapInfoToPrintInfoFromIface(categoryControl.controlSummaries))
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *ClusterPrinter) renderSingleCategoryTable(categoryName string, categoryType CategoryType, writer io.Writer, controlSummaries []reportsummary.IControlSummary, infoToPrintInfo []utils.InfoStars) {
|
||||
sortControlSummaries(controlSummaries)
|
||||
|
||||
headers, columnAligments := initCategoryTableData(categoryType)
|
||||
|
||||
table := getCategoryTableWriter(writer, headers, columnAligments)
|
||||
|
||||
var rows [][]string
|
||||
for _, ctrls := range controlSummaries {
|
||||
var row []string
|
||||
if categoryType == TypeCounting {
|
||||
row = cp.generateCountingCategoryRow(ctrls)
|
||||
} else {
|
||||
row = generateCategoryStatusRow(ctrls, infoToPrintInfo)
|
||||
}
|
||||
if len(row) > 0 {
|
||||
rows = append(rows, row)
|
||||
}
|
||||
}
|
||||
|
||||
if len(rows) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
renderSingleCategory(writer, categoryName, table, rows, infoToPrintInfo)
|
||||
|
||||
}
|
||||
|
||||
func (cp *ClusterPrinter) generateCountingCategoryRow(controlSummary reportsummary.IControlSummary) []string {
|
||||
|
||||
row := make([]string, 3)
|
||||
|
||||
row[0] = controlSummary.GetName()
|
||||
|
||||
failedResources := controlSummary.NumberOfResources().Failed()
|
||||
if failedResources > 0 {
|
||||
row[1] = string(gchalk.WithYellow().Bold(fmt.Sprintf("%d", failedResources)))
|
||||
} else {
|
||||
row[1] = fmt.Sprintf("%d", failedResources)
|
||||
}
|
||||
|
||||
row[2] = cp.generateTableNextSteps(controlSummary)
|
||||
|
||||
return row
|
||||
}
|
||||
|
||||
func (cp *ClusterPrinter) generateTableNextSteps(controlSummary reportsummary.IControlSummary) string {
|
||||
return fmt.Sprintf("%s %s -v", scanControlPrefix, controlSummary.GetID())
|
||||
}
|
||||
@@ -0,0 +1,90 @@
|
||||
package configurationprinter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
func TestClusterScan_GenerateCountingCategoryRow(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
controlSummary reportsummary.IControlSummary
|
||||
expectedRow []string
|
||||
}{
|
||||
{
|
||||
name: "failed resources",
|
||||
controlSummary: &reportsummary.ControlSummary{
|
||||
ControlID: "ctrl1",
|
||||
Name: "ctrl1",
|
||||
StatusCounters: reportsummary.StatusCounters{
|
||||
FailedResources: 5,
|
||||
PassedResources: 3,
|
||||
SkippedResources: 2,
|
||||
},
|
||||
},
|
||||
expectedRow: []string{"ctrl1", "5", "$ kubescape scan control ctrl1 -v"},
|
||||
},
|
||||
{
|
||||
name: "passed resources",
|
||||
controlSummary: &reportsummary.ControlSummary{
|
||||
ControlID: "ctrl2",
|
||||
Name: "ctrl2",
|
||||
StatusCounters: reportsummary.StatusCounters{
|
||||
PassedResources: 3,
|
||||
},
|
||||
},
|
||||
expectedRow: []string{"ctrl2", "0", "$ kubescape scan control ctrl2 -v"},
|
||||
},
|
||||
}
|
||||
|
||||
clusterPrinter := NewClusterPrinter()
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
row := clusterPrinter.generateCountingCategoryRow(test.controlSummary)
|
||||
|
||||
if len(row) != len(test.expectedRow) {
|
||||
t.Errorf("expected row length %d, got %d", len(test.expectedRow), len(row))
|
||||
}
|
||||
|
||||
for i := range row {
|
||||
if row[i] != test.expectedRow[i] {
|
||||
t.Errorf("expected row %v, got %v", test.expectedRow, row)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterScan_GenerateTableNextSteps(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
controlSummary reportsummary.IControlSummary
|
||||
expectedNextSteps string
|
||||
}{
|
||||
{
|
||||
name: "with id",
|
||||
controlSummary: &reportsummary.ControlSummary{
|
||||
ControlID: "ctrl1",
|
||||
},
|
||||
expectedNextSteps: "$ kubescape scan control ctrl1 -v",
|
||||
}, {
|
||||
name: "empty id",
|
||||
controlSummary: &reportsummary.ControlSummary{},
|
||||
expectedNextSteps: "$ kubescape scan control -v",
|
||||
},
|
||||
}
|
||||
|
||||
clusterPrinter := NewClusterPrinter()
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nextSteps := clusterPrinter.generateTableNextSteps(test.controlSummary)
|
||||
|
||||
if nextSteps != test.expectedNextSteps {
|
||||
t.Errorf("expected next steps %s, got %s", test.expectedNextSteps, nextSteps)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,148 @@
|
||||
package configurationprinter
|
||||
|
||||
import (
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
type CategoryControls struct {
|
||||
CategoryName string
|
||||
controlSummaries []reportsummary.IControlSummary
|
||||
Status apis.ScanningStatus
|
||||
}
|
||||
|
||||
type CategoryType string
|
||||
|
||||
const (
|
||||
TypeCounting CategoryType = "COUNTING"
|
||||
TypeStatus CategoryType = "STATUS"
|
||||
|
||||
// Categories to show are hardcoded by ID, so their names are not important. We also want full control over the categories and their order, so a new release of the security checks will not affect the output
|
||||
|
||||
// cluster scan categories
|
||||
controlPlaneCategoryID = "Cat-1"
|
||||
accessControlCategoryID = "Cat-2"
|
||||
secretsCategoryID = "Cat-3"
|
||||
networkCategoryID = "Cat-4"
|
||||
workloadsCategoryID = "Cat-5"
|
||||
|
||||
// workload scan categories
|
||||
supplyChainCategoryID = "Cat-6"
|
||||
resourceManagementCategoryID = "Cat-7"
|
||||
storageCategoryID = "Cat-8"
|
||||
nodeEscapeCategoryID = "Cat-9"
|
||||
)
|
||||
|
||||
var clusterCategoriesDisplayOrder = []string{
|
||||
controlPlaneCategoryID,
|
||||
accessControlCategoryID,
|
||||
secretsCategoryID,
|
||||
networkCategoryID,
|
||||
workloadsCategoryID,
|
||||
}
|
||||
|
||||
var repoCategoriesDisplayOrder = []string{
|
||||
workloadsCategoryID,
|
||||
accessControlCategoryID,
|
||||
secretsCategoryID,
|
||||
networkCategoryID,
|
||||
}
|
||||
|
||||
var workloadCategoriesDisplayOrder = []string{
|
||||
supplyChainCategoryID,
|
||||
resourceManagementCategoryID,
|
||||
storageCategoryID,
|
||||
secretsCategoryID,
|
||||
networkCategoryID,
|
||||
nodeEscapeCategoryID,
|
||||
}
|
||||
|
||||
// map categories to table type. Each table type has a different display
|
||||
var mapCategoryToType = map[string]CategoryType{
|
||||
controlPlaneCategoryID: TypeStatus,
|
||||
accessControlCategoryID: TypeCounting,
|
||||
secretsCategoryID: TypeCounting,
|
||||
networkCategoryID: TypeCounting,
|
||||
workloadsCategoryID: TypeCounting,
|
||||
}
|
||||
|
||||
var mapClusterControlsToCategories = map[string]string{
|
||||
"C-0066": controlPlaneCategoryID,
|
||||
"C-0088": controlPlaneCategoryID,
|
||||
"C-0067": controlPlaneCategoryID,
|
||||
"C-0005": controlPlaneCategoryID,
|
||||
"C-0262": controlPlaneCategoryID,
|
||||
|
||||
"C-0015": accessControlCategoryID,
|
||||
"C-0002": accessControlCategoryID,
|
||||
"C-0007": accessControlCategoryID,
|
||||
"C-0063": accessControlCategoryID,
|
||||
"C-0036": accessControlCategoryID,
|
||||
"C-0039": accessControlCategoryID,
|
||||
"C-0035": accessControlCategoryID,
|
||||
"C-0188": accessControlCategoryID,
|
||||
"C-0187": accessControlCategoryID,
|
||||
|
||||
"C-0012": secretsCategoryID,
|
||||
|
||||
"C-0260": networkCategoryID,
|
||||
"C-0256": networkCategoryID,
|
||||
|
||||
"C-0038": workloadsCategoryID,
|
||||
"C-0041": workloadsCategoryID,
|
||||
"C-0048": workloadsCategoryID,
|
||||
"C-0057": workloadsCategoryID,
|
||||
"C-0013": workloadsCategoryID,
|
||||
}
|
||||
|
||||
var mapWorkloadControlsToCategories = map[string]string{
|
||||
"C-0078": supplyChainCategoryID,
|
||||
"C-0236": supplyChainCategoryID,
|
||||
"C-0237": supplyChainCategoryID,
|
||||
|
||||
"C-0004": resourceManagementCategoryID,
|
||||
"C-0050": resourceManagementCategoryID,
|
||||
|
||||
"C-0045": storageCategoryID,
|
||||
"C-0048": storageCategoryID,
|
||||
"C-0257": storageCategoryID,
|
||||
|
||||
"C-0207": secretsCategoryID,
|
||||
"C-0034": secretsCategoryID,
|
||||
"C-0012": secretsCategoryID,
|
||||
|
||||
"C-0041": networkCategoryID,
|
||||
"C-0260": networkCategoryID,
|
||||
"C-0044": networkCategoryID,
|
||||
|
||||
"C-0038": nodeEscapeCategoryID,
|
||||
"C-0046": nodeEscapeCategoryID,
|
||||
"C-0013": nodeEscapeCategoryID,
|
||||
"C-0016": nodeEscapeCategoryID,
|
||||
"C-0017": nodeEscapeCategoryID,
|
||||
"C-0055": nodeEscapeCategoryID,
|
||||
"C-0057": nodeEscapeCategoryID,
|
||||
}
|
||||
|
||||
var mapRepoControlsToCategories = map[string]string{
|
||||
"C-0015": accessControlCategoryID,
|
||||
"C-0002": accessControlCategoryID,
|
||||
"C-0007": accessControlCategoryID,
|
||||
"C-0063": accessControlCategoryID,
|
||||
"C-0036": accessControlCategoryID,
|
||||
"C-0039": accessControlCategoryID,
|
||||
"C-0035": accessControlCategoryID,
|
||||
"C-0188": accessControlCategoryID,
|
||||
"C-0187": accessControlCategoryID,
|
||||
|
||||
"C-0012": secretsCategoryID,
|
||||
|
||||
"C-0260": networkCategoryID,
|
||||
"C-0256": networkCategoryID,
|
||||
|
||||
"C-0038": workloadsCategoryID,
|
||||
"C-0041": workloadsCategoryID,
|
||||
"C-0048": workloadsCategoryID,
|
||||
"C-0057": workloadsCategoryID,
|
||||
"C-0013": workloadsCategoryID,
|
||||
}
|
||||
@@ -0,0 +1,110 @@
|
||||
package configurationprinter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/utils"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
type FrameworkPrinter struct {
|
||||
verboseMode bool
|
||||
}
|
||||
|
||||
func NewFrameworkPrinter(verboseMode bool) *FrameworkPrinter {
|
||||
return &FrameworkPrinter{
|
||||
verboseMode: verboseMode,
|
||||
}
|
||||
}
|
||||
|
||||
var _ TablePrinter = &FrameworkPrinter{}
|
||||
|
||||
func (fp *FrameworkPrinter) getVerboseMode() bool {
|
||||
return fp.verboseMode
|
||||
}
|
||||
|
||||
func (fp *FrameworkPrinter) PrintSummaryTable(writer io.Writer, summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
|
||||
if summaryDetails.NumberOfControls().All() == 0 {
|
||||
fmt.Fprintf(writer, "\nKubescape did not scan any of the resources, make sure you are scanning valid kubernetes manifests (Deployments, Pods, etc.)\n")
|
||||
return
|
||||
}
|
||||
cautils.InfoTextDisplay(writer, "\n"+ControlCountersForSummary(summaryDetails.NumberOfControls())+"\n")
|
||||
cautils.InfoTextDisplay(writer, renderSeverityCountersSummary(summaryDetails.GetResourcesSeverityCounters())+"\n\n")
|
||||
|
||||
summaryTable := tablewriter.NewWriter(writer)
|
||||
|
||||
summaryTable.SetAutoWrapText(false)
|
||||
summaryTable.SetHeaderLine(true)
|
||||
summaryTable.SetColumnAlignment(GetColumnsAlignments())
|
||||
summaryTable.SetUnicodeHV(tablewriter.Regular, tablewriter.Regular)
|
||||
|
||||
printAll := fp.getVerboseMode()
|
||||
if summaryDetails.NumberOfResources().Failed() == 0 {
|
||||
// if there are no failed controls, print the resource table and detailed information
|
||||
printAll = true
|
||||
}
|
||||
|
||||
dataRows := [][]string{}
|
||||
|
||||
infoToPrintInfo := utils.MapInfoToPrintInfo(summaryDetails.Controls)
|
||||
for i := len(sortedControlIDs) - 1; i >= 0; i-- {
|
||||
for _, c := range sortedControlIDs[i] {
|
||||
row := GenerateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, c), infoToPrintInfo, printAll)
|
||||
if len(row) > 0 {
|
||||
dataRows = append(dataRows, row)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
short := utils.CheckShortTerminalWidth(dataRows, GetControlTableHeaders(false))
|
||||
if short {
|
||||
summaryTable.SetRowLine(true)
|
||||
dataRows = shortFormatRow(dataRows)
|
||||
} else {
|
||||
summaryTable.SetColumnAlignment(GetColumnsAlignments())
|
||||
}
|
||||
summaryTable.SetHeader(GetControlTableHeaders(short))
|
||||
summaryTable.SetFooter(GenerateFooter(summaryDetails, short))
|
||||
|
||||
var headerColors []tablewriter.Colors
|
||||
for range dataRows[0] {
|
||||
headerColors = append(headerColors, tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiYellowColor})
|
||||
}
|
||||
summaryTable.SetHeaderColor(headerColors...)
|
||||
|
||||
summaryTable.AppendBulk(dataRows)
|
||||
summaryTable.Render()
|
||||
|
||||
// When scanning controls the framework list will be empty
|
||||
cautils.InfoTextDisplay(writer, utils.FrameworksScoresToString(summaryDetails.ListFrameworks()))
|
||||
|
||||
utils.PrintInfo(writer, infoToPrintInfo)
|
||||
}
|
||||
|
||||
func shortFormatRow(dataRows [][]string) [][]string {
|
||||
rows := [][]string{}
|
||||
for _, dataRow := range dataRows {
|
||||
rows = append(rows, []string{fmt.Sprintf("Severity"+strings.Repeat(" ", 11)+": %+v\nControl Name"+strings.Repeat(" ", 7)+": %+v\nFailed Resources"+strings.Repeat(" ", 3)+": %+v\nAll Resources"+strings.Repeat(" ", 6)+": %+v\n%% Compliance-Score"+strings.Repeat(" ", 1)+": %+v", dataRow[summaryColumnSeverity], dataRow[summaryColumnName], dataRow[summaryColumnCounterFailed], dataRow[summaryColumnCounterAll], dataRow[summaryColumnComplianceScore])})
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func (fp *FrameworkPrinter) PrintCategoriesTables(writer io.Writer, summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
|
||||
|
||||
}
|
||||
|
||||
func renderSeverityCountersSummary(counters reportsummary.ISeverityCounters) string {
|
||||
critical := counters.NumberOfCriticalSeverity()
|
||||
high := counters.NumberOfHighSeverity()
|
||||
medium := counters.NumberOfMediumSeverity()
|
||||
low := counters.NumberOfLowSeverity()
|
||||
|
||||
return fmt.Sprintf(
|
||||
"Failed Resources by Severity: Critical — %d, High — %d, Medium — %d, Low — %d",
|
||||
critical, high, medium, low,
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,12 @@
|
||||
package configurationprinter
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
type TablePrinter interface {
|
||||
PrintCategoriesTables(writer io.Writer, summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string)
|
||||
PrintSummaryTable(writer io.Writer, summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string)
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
package configurationprinter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/jwalton/gchalk"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/utils"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
type RepoPrinter struct {
|
||||
inputPatterns []string
|
||||
}
|
||||
|
||||
func NewRepoPrinter(inputPatterns []string) *RepoPrinter {
|
||||
return &RepoPrinter{
|
||||
inputPatterns: inputPatterns,
|
||||
}
|
||||
}
|
||||
|
||||
var _ TablePrinter = &RepoPrinter{}
|
||||
|
||||
func (rp *RepoPrinter) PrintSummaryTable(writer io.Writer, summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
|
||||
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) PrintCategoriesTables(writer io.Writer, summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
|
||||
|
||||
categoriesToCategoryControls := mapCategoryToSummary(summaryDetails.ListControls(), mapRepoControlsToCategories)
|
||||
|
||||
for _, id := range repoCategoriesDisplayOrder {
|
||||
categoryControl, ok := categoriesToCategoryControls[id]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if categoryControl.Status != apis.StatusFailed {
|
||||
continue
|
||||
}
|
||||
|
||||
rp.renderSingleCategoryTable(categoryControl.CategoryName, mapCategoryToType[id], writer, categoryControl.controlSummaries, utils.MapInfoToPrintInfoFromIface(categoryControl.controlSummaries))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) renderSingleCategoryTable(categoryName string, categoryType CategoryType, writer io.Writer, controlSummaries []reportsummary.IControlSummary, infoToPrintInfo []utils.InfoStars) {
|
||||
sortControlSummaries(controlSummaries)
|
||||
|
||||
headers, columnAligments := initCategoryTableData(categoryType)
|
||||
|
||||
table := getCategoryTableWriter(writer, headers, columnAligments)
|
||||
|
||||
var rows [][]string
|
||||
for _, ctrls := range controlSummaries {
|
||||
if ctrls.NumberOfResources().Failed() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var row []string
|
||||
if categoryType == TypeCounting {
|
||||
row = rp.generateCountingCategoryRow(ctrls, rp.inputPatterns)
|
||||
} else {
|
||||
row = generateCategoryStatusRow(ctrls, infoToPrintInfo)
|
||||
}
|
||||
if len(row) > 0 {
|
||||
rows = append(rows, row)
|
||||
}
|
||||
}
|
||||
|
||||
if len(rows) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
renderSingleCategory(writer, categoryName, table, rows, infoToPrintInfo)
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) generateCountingCategoryRow(controlSummary reportsummary.IControlSummary, inputPatterns []string) []string {
|
||||
rows := make([]string, 3)
|
||||
|
||||
rows[0] = controlSummary.GetName()
|
||||
|
||||
failedResources := controlSummary.NumberOfResources().Failed()
|
||||
if failedResources > 0 {
|
||||
rows[1] = string(gchalk.WithYellow().Bold(fmt.Sprintf("%d", failedResources)))
|
||||
} else {
|
||||
rows[1] = fmt.Sprintf("%d", failedResources)
|
||||
}
|
||||
|
||||
rows[2] = rp.generateTableNextSteps(controlSummary, inputPatterns)
|
||||
|
||||
return rows
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) getWorkloadScanCommand(ns, kind, name string, source reporthandling.Source) string {
|
||||
cmd := fmt.Sprintf("$ kubescape scan workload %s/%s/%s", ns, kind, name)
|
||||
if ns == "" {
|
||||
cmd = fmt.Sprintf("$ kubescape scan workload %s/%s", kind, name)
|
||||
}
|
||||
if source.FileType == "Helm" {
|
||||
return fmt.Sprintf("%s --chart-path=%s", cmd, source.RelativePath)
|
||||
|
||||
} else {
|
||||
return fmt.Sprintf("%s --file-path=%s", cmd, source.RelativePath)
|
||||
}
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) generateTableNextSteps(controlSummary reportsummary.IControlSummary, inputPatterns []string) string {
|
||||
return fmt.Sprintf("$ kubescape scan control %s %s -v", controlSummary.GetID(), strings.Join(inputPatterns, ","))
|
||||
}
|
||||
@@ -0,0 +1,102 @@
|
||||
package configurationprinter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
func TestRepoScan_GenerateCountingCategoryRow(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
controlSummary reportsummary.ControlSummary
|
||||
expectedRow []string
|
||||
inputPatterns []string
|
||||
}{
|
||||
{
|
||||
name: "multiple files",
|
||||
controlSummary: reportsummary.ControlSummary{
|
||||
ControlID: "ctrl1",
|
||||
Name: "ctrl1",
|
||||
StatusCounters: reportsummary.StatusCounters{
|
||||
FailedResources: 5,
|
||||
PassedResources: 3,
|
||||
SkippedResources: 2,
|
||||
},
|
||||
},
|
||||
inputPatterns: []string{"file.yaml", "file2.yaml"},
|
||||
expectedRow: []string{"ctrl1", "5", "$ kubescape scan control ctrl1 file.yaml,file2.yaml -v"},
|
||||
},
|
||||
{
|
||||
name: "one file",
|
||||
controlSummary: reportsummary.ControlSummary{
|
||||
ControlID: "ctrl1",
|
||||
Name: "ctrl1",
|
||||
StatusCounters: reportsummary.StatusCounters{
|
||||
FailedResources: 5,
|
||||
PassedResources: 3,
|
||||
SkippedResources: 2,
|
||||
},
|
||||
},
|
||||
inputPatterns: []string{"file.yaml"},
|
||||
expectedRow: []string{"ctrl1", "5", "$ kubescape scan control ctrl1 file.yaml -v"},
|
||||
},
|
||||
}
|
||||
|
||||
repoPrinter := NewRepoPrinter(nil)
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
row := repoPrinter.generateCountingCategoryRow(&test.controlSummary, test.inputPatterns)
|
||||
|
||||
if len(row) != len(test.expectedRow) {
|
||||
t.Errorf("expected row length %d, got %d", len(test.expectedRow), len(row))
|
||||
}
|
||||
|
||||
for i := range row {
|
||||
if row[i] != test.expectedRow[i] {
|
||||
t.Errorf("expected row %v, got %v", test.expectedRow, row)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestRepoScan_GenerateTableNextSteps(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
controlSummary reportsummary.ControlSummary
|
||||
expectedNextSteps string
|
||||
inputPatterns []string
|
||||
}{
|
||||
{
|
||||
name: "single file",
|
||||
controlSummary: reportsummary.ControlSummary{
|
||||
ControlID: "ctrl1",
|
||||
},
|
||||
inputPatterns: []string{"file.yaml"},
|
||||
expectedNextSteps: "$ kubescape scan control ctrl1 file.yaml -v",
|
||||
},
|
||||
{
|
||||
name: "multiple files",
|
||||
controlSummary: reportsummary.ControlSummary{
|
||||
ControlID: "ctrl1",
|
||||
},
|
||||
inputPatterns: []string{"file.yaml", "file2.yaml"},
|
||||
expectedNextSteps: "$ kubescape scan control ctrl1 file.yaml,file2.yaml -v",
|
||||
},
|
||||
}
|
||||
|
||||
repoPrinter := NewRepoPrinter(nil)
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nextSteps := repoPrinter.generateTableNextSteps(&test.controlSummary, test.inputPatterns)
|
||||
|
||||
if nextSteps != test.expectedNextSteps {
|
||||
t.Errorf("expected next steps %s, got %s", test.expectedNextSteps, nextSteps)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,110 @@
|
||||
package configurationprinter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/utils"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
const (
|
||||
summaryColumnSeverity = iota
|
||||
summaryColumnName = iota
|
||||
summaryColumnCounterFailed = iota
|
||||
summaryColumnCounterAll = iota
|
||||
summaryColumnComplianceScore = iota
|
||||
_summaryRowLen = iota
|
||||
)
|
||||
|
||||
func ControlCountersForSummary(counters reportsummary.ICounters) string {
|
||||
return fmt.Sprintf("Controls: %d (Failed: %d, Passed: %d, Action Required: %d)", counters.All(), counters.Failed(), counters.Passed(), counters.Skipped())
|
||||
}
|
||||
|
||||
func GetSeverityColumn(controlSummary reportsummary.IControlSummary) string {
|
||||
return utils.GetColor(apis.ControlSeverityToInt(controlSummary.GetScoreFactor()))(apis.ControlSeverityToString(controlSummary.GetScoreFactor()))
|
||||
}
|
||||
|
||||
func GetControlTableHeaders(short bool) []string {
|
||||
var headers []string
|
||||
if short {
|
||||
headers = make([]string, 1)
|
||||
headers[0] = "CONTROLS"
|
||||
} else {
|
||||
headers = make([]string, _summaryRowLen)
|
||||
headers[summaryColumnName] = "CONTROL NAME"
|
||||
headers[summaryColumnCounterFailed] = "FAILED RESOURCES"
|
||||
headers[summaryColumnCounterAll] = "ALL RESOURCES"
|
||||
headers[summaryColumnSeverity] = "SEVERITY"
|
||||
headers[summaryColumnComplianceScore] = "% COMPLIANCE-SCORE"
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
func GetColumnsAlignments() []int {
|
||||
alignments := make([]int, _summaryRowLen)
|
||||
alignments[summaryColumnName] = tablewriter.ALIGN_LEFT
|
||||
alignments[summaryColumnCounterFailed] = tablewriter.ALIGN_CENTER
|
||||
alignments[summaryColumnCounterAll] = tablewriter.ALIGN_CENTER
|
||||
alignments[summaryColumnSeverity] = tablewriter.ALIGN_LEFT
|
||||
alignments[summaryColumnComplianceScore] = tablewriter.ALIGN_CENTER
|
||||
return alignments
|
||||
}
|
||||
|
||||
func GenerateRow(controlSummary reportsummary.IControlSummary, infoToPrintInfo []utils.InfoStars, verbose bool) []string {
|
||||
row := make([]string, _summaryRowLen)
|
||||
|
||||
// ignore passed results
|
||||
if !verbose && (controlSummary.GetStatus().IsPassed()) {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
row[summaryColumnSeverity] = GetSeverityColumn(controlSummary)
|
||||
if len(controlSummary.GetName()) > 50 {
|
||||
row[summaryColumnName] = controlSummary.GetName()[:50] + "..."
|
||||
} else {
|
||||
row[summaryColumnName] = controlSummary.GetName()
|
||||
}
|
||||
row[summaryColumnCounterFailed] = fmt.Sprintf("%d", controlSummary.NumberOfResources().Failed())
|
||||
row[summaryColumnCounterAll] = fmt.Sprintf("%d", controlSummary.NumberOfResources().All())
|
||||
row[summaryColumnComplianceScore] = GetComplianceScoreColumn(controlSummary, infoToPrintInfo)
|
||||
|
||||
return row
|
||||
}
|
||||
|
||||
func GetComplianceScoreColumn(controlSummary reportsummary.IControlSummary, infoToPrintInfo []utils.InfoStars) string {
|
||||
if controlSummary.GetStatus().IsSkipped() {
|
||||
return fmt.Sprintf("%s %s", "Action Required", GetInfoColumn(controlSummary, infoToPrintInfo))
|
||||
}
|
||||
return fmt.Sprintf("%d", cautils.Float32ToInt(controlSummary.GetComplianceScore())) + "%"
|
||||
}
|
||||
|
||||
func GetInfoColumn(controlSummary reportsummary.IControlSummary, infoToPrintInfo []utils.InfoStars) string {
|
||||
for i := range infoToPrintInfo {
|
||||
if infoToPrintInfo[i].Info == controlSummary.GetStatus().Info() {
|
||||
return infoToPrintInfo[i].Stars
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GenerateFooter(summaryDetails *reportsummary.SummaryDetails, short bool) []string {
|
||||
var row []string
|
||||
if short {
|
||||
row = make([]string, 1)
|
||||
row[0] = fmt.Sprintf("Resource Summary"+strings.Repeat(" ", 0)+"\n\nFailed Resources"+strings.Repeat(" ", 1)+": %d\nAll Resources"+strings.Repeat(" ", 4)+": %d\n%% Compliance-Score"+strings.Repeat(" ", 4)+": %.2f%%", summaryDetails.NumberOfResources().Failed(), summaryDetails.NumberOfResources().All(), summaryDetails.ComplianceScore)
|
||||
} else {
|
||||
// Severity | Control name | failed resources | all resources | % success
|
||||
row = make([]string, _summaryRowLen)
|
||||
row[summaryColumnName] = "Resource Summary"
|
||||
row[summaryColumnCounterFailed] = fmt.Sprintf("%d", summaryDetails.NumberOfResources().Failed())
|
||||
row[summaryColumnCounterAll] = fmt.Sprintf("%d", summaryDetails.NumberOfResources().All())
|
||||
row[summaryColumnSeverity] = " "
|
||||
row[summaryColumnComplianceScore] = fmt.Sprintf("%.2f%s", summaryDetails.ComplianceScore, "%")
|
||||
}
|
||||
|
||||
return row
|
||||
}
|
||||
@@ -0,0 +1,106 @@
|
||||
package configurationprinter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/utils"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
// returns map of category ID to category controls (name and controls)
|
||||
// controls will be on the map only if the are in the mapClusterControlsToCategories map
|
||||
func mapCategoryToSummary(controlSummaries []reportsummary.IControlSummary, mapDisplayCtrlIDToCategory map[string]string) map[string]CategoryControls {
|
||||
|
||||
mapCategoriesToCtrlSummary := map[string][]reportsummary.IControlSummary{}
|
||||
// helper map to get the category name
|
||||
mapCategoryIDToName := make(map[string]string)
|
||||
|
||||
for i := range controlSummaries {
|
||||
// check if we need to print this control
|
||||
category, ok := mapDisplayCtrlIDToCategory[controlSummaries[i].GetID()]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if controlSummaries[i].GetCategory() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// the category on the map can be either category or subcategory, so we need to check both
|
||||
if controlSummaries[i].GetCategory().ID == category {
|
||||
if _, ok := mapCategoriesToCtrlSummary[controlSummaries[i].GetCategory().ID]; !ok {
|
||||
mapCategoryIDToName[controlSummaries[i].GetCategory().ID] = controlSummaries[i].GetCategory().Name // set category name
|
||||
mapCategoriesToCtrlSummary[controlSummaries[i].GetCategory().ID] = []reportsummary.IControlSummary{}
|
||||
|
||||
}
|
||||
mapCategoriesToCtrlSummary[controlSummaries[i].GetCategory().ID] = append(mapCategoriesToCtrlSummary[controlSummaries[i].GetCategory().ID], controlSummaries[i])
|
||||
continue
|
||||
}
|
||||
|
||||
if controlSummaries[i].GetCategory().SubCategory == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if controlSummaries[i].GetCategory().SubCategory.ID == category {
|
||||
if _, ok := mapCategoriesToCtrlSummary[controlSummaries[i].GetCategory().SubCategory.ID]; !ok {
|
||||
mapCategoryIDToName[controlSummaries[i].GetCategory().SubCategory.ID] = controlSummaries[i].GetCategory().SubCategory.Name // set category name
|
||||
mapCategoriesToCtrlSummary[controlSummaries[i].GetCategory().SubCategory.ID] = []reportsummary.IControlSummary{}
|
||||
}
|
||||
mapCategoriesToCtrlSummary[controlSummaries[i].GetCategory().SubCategory.ID] = append(mapCategoriesToCtrlSummary[controlSummaries[i].GetCategory().SubCategory.ID], controlSummaries[i])
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
mapCategoryToControls := buildCategoryToControlsMap(mapCategoriesToCtrlSummary, mapCategoryIDToName)
|
||||
|
||||
return mapCategoryToControls
|
||||
}
|
||||
|
||||
// returns map of category ID to category controls (name and controls)
|
||||
func buildCategoryToControlsMap(mapCategoriesToCtrlSummary map[string][]reportsummary.IControlSummary, mapCategoryIDToName map[string]string) map[string]CategoryControls {
|
||||
mapCategoryToControls := make(map[string]CategoryControls)
|
||||
for categoryID, ctrls := range mapCategoriesToCtrlSummary {
|
||||
status := apis.StatusPassed
|
||||
for _, ctrl := range ctrls {
|
||||
if ctrl.GetStatus().Status() == apis.StatusFailed {
|
||||
status = apis.StatusFailed
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
categoryName := mapCategoryIDToName[categoryID]
|
||||
mapCategoryToControls[categoryID] = CategoryControls{
|
||||
CategoryName: categoryName,
|
||||
controlSummaries: ctrls,
|
||||
Status: status,
|
||||
}
|
||||
}
|
||||
return mapCategoryToControls
|
||||
}
|
||||
|
||||
// returns doc link for control
|
||||
func getDocsForControl(controlSummary reportsummary.IControlSummary) string {
|
||||
return fmt.Sprintf("%s/%s", docsPrefix, strings.ToLower(controlSummary.GetID()))
|
||||
}
|
||||
|
||||
// returns run command with verbose for control
|
||||
func getRunCommandForControl(controlSummary reportsummary.IControlSummary) string {
|
||||
return fmt.Sprintf("%s %s -v", scanControlPrefix, controlSummary.GetID())
|
||||
}
|
||||
|
||||
func sortControlSummaries(controlSummaries []reportsummary.IControlSummary) {
|
||||
sort.Slice(controlSummaries, func(i, j int) bool {
|
||||
return controlSummaries[i].GetName() < controlSummaries[j].GetName()
|
||||
})
|
||||
}
|
||||
|
||||
func printCategoryInfo(writer io.Writer, infoToPrintInfo []utils.InfoStars) {
|
||||
for i := range infoToPrintInfo {
|
||||
cautils.InfoDisplay(writer, fmt.Sprintf("%s %s\n", infoToPrintInfo[i].Stars, infoToPrintInfo[i].Info))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,598 @@
|
||||
package configurationprinter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
func TestMapCategoryToSummary(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ctrlSummaries map[string]reportsummary.ControlSummary
|
||||
mapDisplayCtrlIDToCategory map[string]string
|
||||
expected map[string]CategoryControls
|
||||
}{
|
||||
{
|
||||
name: "controls mapped to right categories",
|
||||
ctrlSummaries: map[string]reportsummary.ControlSummary{
|
||||
"controlName1": {
|
||||
ControlID: "ctrlID1",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category1",
|
||||
ID: "catID1",
|
||||
},
|
||||
},
|
||||
"controlName2": {
|
||||
ControlID: "ctrlID2",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category1",
|
||||
ID: "catID1",
|
||||
},
|
||||
},
|
||||
"controlName3": {
|
||||
ControlID: "ctrlID3",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category2",
|
||||
ID: "catID2",
|
||||
},
|
||||
},
|
||||
},
|
||||
mapDisplayCtrlIDToCategory: map[string]string{
|
||||
"ctrlID1": "catID1",
|
||||
"ctrlID2": "catID1",
|
||||
"ctrlID3": "catID2",
|
||||
},
|
||||
expected: map[string]CategoryControls{
|
||||
"catID1": {
|
||||
CategoryName: "category1",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID1",
|
||||
},
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID2",
|
||||
},
|
||||
},
|
||||
},
|
||||
"catID2": {
|
||||
CategoryName: "category2",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty display map",
|
||||
ctrlSummaries: map[string]reportsummary.ControlSummary{
|
||||
"controlName1": {
|
||||
ControlID: "ctrlID1",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category1",
|
||||
ID: "catID1",
|
||||
},
|
||||
},
|
||||
"controlName2": {
|
||||
ControlID: "ctrlID2",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category1",
|
||||
ID: "catID1",
|
||||
},
|
||||
},
|
||||
"controlName3": {
|
||||
ControlID: "ctrlID3",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category2",
|
||||
ID: "catID2",
|
||||
},
|
||||
},
|
||||
},
|
||||
mapDisplayCtrlIDToCategory: map[string]string{},
|
||||
expected: map[string]CategoryControls{},
|
||||
},
|
||||
{
|
||||
name: "controls not in map are not mapped",
|
||||
ctrlSummaries: map[string]reportsummary.ControlSummary{
|
||||
"controlName1": {
|
||||
ControlID: "ctrlID1",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category1",
|
||||
ID: "catID1",
|
||||
},
|
||||
},
|
||||
"controlName2": {
|
||||
ControlID: "ctrlID2",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category1",
|
||||
ID: "catID1",
|
||||
},
|
||||
},
|
||||
"controlName3": {
|
||||
ControlID: "ctrlID3",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category2",
|
||||
ID: "catID2",
|
||||
},
|
||||
},
|
||||
},
|
||||
mapDisplayCtrlIDToCategory: map[string]string{
|
||||
"ctrlID3": "catID2",
|
||||
},
|
||||
expected: map[string]CategoryControls{
|
||||
"catID2": {
|
||||
CategoryName: "category2",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "controls mapped to right sub-categories",
|
||||
ctrlSummaries: map[string]reportsummary.ControlSummary{
|
||||
"controlName1": {
|
||||
ControlID: "ctrlID1",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category1",
|
||||
ID: "catID1",
|
||||
SubCategory: &reporthandling.SubCategory{
|
||||
Name: "subCategory1",
|
||||
ID: "subCatID1",
|
||||
},
|
||||
},
|
||||
},
|
||||
"controlName2": {
|
||||
ControlID: "ctrlID2",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category1",
|
||||
ID: "catID1",
|
||||
SubCategory: &reporthandling.SubCategory{
|
||||
Name: "subCategory1",
|
||||
ID: "subCatID1",
|
||||
},
|
||||
},
|
||||
},
|
||||
"controlName3": {
|
||||
ControlID: "ctrlID3",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category2",
|
||||
ID: "catID2",
|
||||
SubCategory: &reporthandling.SubCategory{
|
||||
Name: "subCategory2",
|
||||
ID: "subCatID2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mapDisplayCtrlIDToCategory: map[string]string{
|
||||
"ctrlID1": "subCatID1",
|
||||
"ctrlID2": "subCatID1",
|
||||
"ctrlID3": "subCatID2",
|
||||
},
|
||||
expected: map[string]CategoryControls{
|
||||
"subCatID1": {
|
||||
CategoryName: "subCategory1",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID1",
|
||||
},
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID2",
|
||||
},
|
||||
},
|
||||
},
|
||||
"subCatID2": {
|
||||
CategoryName: "subCategory2",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "controls mapped to categories and sub-categories",
|
||||
ctrlSummaries: map[string]reportsummary.ControlSummary{
|
||||
"controlName1": {
|
||||
ControlID: "ctrlID1",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category1",
|
||||
ID: "catID1",
|
||||
SubCategory: &reporthandling.SubCategory{
|
||||
Name: "subCategory1",
|
||||
ID: "subCatID1",
|
||||
},
|
||||
},
|
||||
},
|
||||
"controlName2": {
|
||||
ControlID: "ctrlID2",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category1",
|
||||
ID: "catID1",
|
||||
SubCategory: &reporthandling.SubCategory{
|
||||
Name: "subCategory1",
|
||||
ID: "subCatID1",
|
||||
},
|
||||
},
|
||||
},
|
||||
"controlName3": {
|
||||
ControlID: "ctrlID3",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category2",
|
||||
ID: "catID2",
|
||||
SubCategory: &reporthandling.SubCategory{
|
||||
Name: "subCategory2",
|
||||
ID: "subCatID2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
mapDisplayCtrlIDToCategory: map[string]string{
|
||||
"ctrlID1": "catID1",
|
||||
"ctrlID2": "subCatID1",
|
||||
"ctrlID3": "subCatID2",
|
||||
},
|
||||
expected: map[string]CategoryControls{
|
||||
"catID1": {
|
||||
CategoryName: "category1",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID1",
|
||||
},
|
||||
},
|
||||
},
|
||||
"subCatID1": {
|
||||
CategoryName: "subCategory1",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID2",
|
||||
},
|
||||
},
|
||||
},
|
||||
"subCatID2": {
|
||||
CategoryName: "subCategory2",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil category",
|
||||
ctrlSummaries: map[string]reportsummary.ControlSummary{
|
||||
"controlName1": {
|
||||
ControlID: "ctrlID1",
|
||||
Status: apis.StatusFailed,
|
||||
}},
|
||||
mapDisplayCtrlIDToCategory: map[string]string{
|
||||
"ctrlID1": "catID1",
|
||||
},
|
||||
expected: map[string]CategoryControls{},
|
||||
},
|
||||
{
|
||||
name: "nil sub category",
|
||||
ctrlSummaries: map[string]reportsummary.ControlSummary{
|
||||
"controlName1": {
|
||||
ControlID: "ctrlID1",
|
||||
Status: apis.StatusFailed,
|
||||
Category: &reporthandling.Category{
|
||||
Name: "category1",
|
||||
ID: "catID1",
|
||||
},
|
||||
}},
|
||||
mapDisplayCtrlIDToCategory: map[string]string{
|
||||
"ctrlID1": "subCatID1",
|
||||
},
|
||||
expected: map[string]CategoryControls{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
summaryDetails := reportsummary.SummaryDetails{
|
||||
Controls: test.ctrlSummaries,
|
||||
}
|
||||
|
||||
actual := mapCategoryToSummary(summaryDetails.ListControls(), test.mapDisplayCtrlIDToCategory)
|
||||
|
||||
if len(actual) != len(test.expected) {
|
||||
t.Errorf("expected %d categories, got %d", len(test.expected), len(actual))
|
||||
}
|
||||
|
||||
for categoryID, category := range actual {
|
||||
expectedCategory, ok := test.expected[categoryID]
|
||||
if !ok {
|
||||
t.Errorf("unexpected category %s", categoryID)
|
||||
}
|
||||
|
||||
if category.CategoryName != expectedCategory.CategoryName {
|
||||
t.Errorf("expected category name %s, got %s", test.expected[category.CategoryName].CategoryName, category.CategoryName)
|
||||
}
|
||||
|
||||
if len(category.controlSummaries) != len(expectedCategory.controlSummaries) {
|
||||
t.Errorf("expected %d controls, got %d", len(test.expected[category.CategoryName].controlSummaries), len(category.controlSummaries))
|
||||
}
|
||||
|
||||
for i := range category.controlSummaries {
|
||||
found := false
|
||||
for j := range expectedCategory.controlSummaries {
|
||||
if category.controlSummaries[i].GetID() == expectedCategory.controlSummaries[j].GetID() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("unexpected control %s", category.controlSummaries[i].GetID())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildCategoryToControlsMap(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
mapCategoriesToCtrlSummary map[string][]reportsummary.ControlSummary
|
||||
mapCategoryIDToName map[string]string
|
||||
expected map[string]CategoryControls
|
||||
}{
|
||||
{
|
||||
name: "build map of categories to controls",
|
||||
mapCategoriesToCtrlSummary: map[string][]reportsummary.ControlSummary{
|
||||
"catID1": {
|
||||
{
|
||||
ControlID: "ctrlID1",
|
||||
},
|
||||
},
|
||||
"catID2": {
|
||||
{
|
||||
ControlID: "ctrlID2",
|
||||
},
|
||||
},
|
||||
"catID3": {
|
||||
{
|
||||
ControlID: "ctrlID3",
|
||||
},
|
||||
{
|
||||
ControlID: "ctrlID4",
|
||||
},
|
||||
},
|
||||
},
|
||||
mapCategoryIDToName: map[string]string{
|
||||
"catID1": "category1",
|
||||
"catID2": "category2",
|
||||
"catID3": "category3",
|
||||
},
|
||||
expected: map[string]CategoryControls{
|
||||
"catID1": {
|
||||
CategoryName: "category1",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID1",
|
||||
},
|
||||
},
|
||||
},
|
||||
"catID2": {
|
||||
CategoryName: "category2",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID2",
|
||||
},
|
||||
},
|
||||
},
|
||||
"catID3": {
|
||||
CategoryName: "category3",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID3",
|
||||
},
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID4",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "build map of categories to controls with empty map",
|
||||
mapCategoriesToCtrlSummary: map[string][]reportsummary.ControlSummary{},
|
||||
mapCategoryIDToName: map[string]string{},
|
||||
expected: map[string]CategoryControls{},
|
||||
},
|
||||
{
|
||||
name: "two categories with same name",
|
||||
mapCategoriesToCtrlSummary: map[string][]reportsummary.ControlSummary{
|
||||
"catID1": {
|
||||
{
|
||||
ControlID: "ctrlID1",
|
||||
},
|
||||
},
|
||||
"catID2": {
|
||||
{
|
||||
ControlID: "ctrlID2",
|
||||
},
|
||||
},
|
||||
"catID3": {
|
||||
{
|
||||
ControlID: "ctrlID3",
|
||||
},
|
||||
},
|
||||
},
|
||||
mapCategoryIDToName: map[string]string{
|
||||
"catID1": "category1",
|
||||
"catID2": "category1",
|
||||
"catID3": "category2",
|
||||
},
|
||||
expected: map[string]CategoryControls{
|
||||
"catID1": {
|
||||
CategoryName: "category1",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID1",
|
||||
},
|
||||
},
|
||||
},
|
||||
"catID2": {
|
||||
CategoryName: "category1",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID2",
|
||||
},
|
||||
},
|
||||
},
|
||||
"catID3": {
|
||||
CategoryName: "category2",
|
||||
controlSummaries: []reportsummary.IControlSummary{
|
||||
&reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
||||
ctrlSummaries := make(map[string][]reportsummary.IControlSummary, 0)
|
||||
for id, summaries := range test.mapCategoriesToCtrlSummary {
|
||||
for _, summary := range summaries {
|
||||
if _, ok := ctrlSummaries[id]; !ok {
|
||||
ctrlSummaries[id] = []reportsummary.IControlSummary{}
|
||||
}
|
||||
ctrlSummaries[id] = append(ctrlSummaries[id], &summary)
|
||||
}
|
||||
}
|
||||
|
||||
actual := buildCategoryToControlsMap(ctrlSummaries, test.mapCategoryIDToName)
|
||||
|
||||
if len(actual) != len(test.expected) {
|
||||
t.Errorf("expected %d categories, got %d", len(test.expected), len(actual))
|
||||
}
|
||||
|
||||
for categoryID, category := range actual {
|
||||
expectedCategory, ok := test.expected[categoryID]
|
||||
if !ok {
|
||||
t.Errorf("unexpected category %s", categoryID)
|
||||
}
|
||||
|
||||
if category.CategoryName != expectedCategory.CategoryName {
|
||||
t.Errorf("expected category name %s, got %s", test.expected[category.CategoryName].CategoryName, category.CategoryName)
|
||||
}
|
||||
|
||||
if len(category.controlSummaries) != len(expectedCategory.controlSummaries) {
|
||||
t.Errorf("expected %d controls, got %d", len(test.expected[category.CategoryName].controlSummaries), len(category.controlSummaries))
|
||||
}
|
||||
|
||||
for i := range category.controlSummaries {
|
||||
found := false
|
||||
for j := range expectedCategory.controlSummaries {
|
||||
if category.controlSummaries[i].GetID() == expectedCategory.controlSummaries[j].GetID() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("unexpected control %s", category.controlSummaries[i].GetID())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDocsForControl(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
controlSummary reportsummary.IControlSummary
|
||||
expectedDocsLink string
|
||||
}{
|
||||
{
|
||||
name: "control with uppercase ID",
|
||||
controlSummary: &reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID1",
|
||||
},
|
||||
expectedDocsLink: "https://hub.armosec.io/docs/ctrlid1",
|
||||
},
|
||||
{
|
||||
name: "control with lowercase ID",
|
||||
controlSummary: &reportsummary.ControlSummary{
|
||||
ControlID: "ctrlid1",
|
||||
},
|
||||
expectedDocsLink: "https://hub.armosec.io/docs/ctrlid1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
actual := getDocsForControl(test.controlSummary)
|
||||
|
||||
if actual != test.expectedDocsLink {
|
||||
t.Errorf("expected %s, got %s", test.expectedDocsLink, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRunCommandForControl(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
controlSummary reportsummary.IControlSummary
|
||||
expectedRunLink string
|
||||
}{
|
||||
{
|
||||
name: "control with uppercase ID",
|
||||
controlSummary: &reportsummary.ControlSummary{
|
||||
ControlID: "ctrlID1",
|
||||
},
|
||||
expectedRunLink: "$ kubescape scan control ctrlID1 -v",
|
||||
},
|
||||
{
|
||||
name: "control with lowercase ID",
|
||||
controlSummary: &reportsummary.ControlSummary{
|
||||
ControlID: "ctrlid1",
|
||||
},
|
||||
expectedRunLink: "$ kubescape scan control ctrlid1 -v",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
actualLink := getRunCommandForControl(test.controlSummary)
|
||||
|
||||
if actualLink != test.expectedRunLink {
|
||||
t.Errorf("expected %s, got %s", test.expectedRunLink, actualLink)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user